crypto: qat - Convert to new AEAD interface
This patch converts qat to the new AEAD interface. IV generation has been removed since it's equivalent to a software implementation. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Tested-by: Tadeusz Struk <tadeusz.struk@intel.com>
This commit is contained in:
parent
c1359495c8
commit
e19ab1211d
1 changed files with 127 additions and 208 deletions
|
@ -53,7 +53,6 @@
|
||||||
#include <crypto/hash.h>
|
#include <crypto/hash.h>
|
||||||
#include <crypto/algapi.h>
|
#include <crypto/algapi.h>
|
||||||
#include <crypto/authenc.h>
|
#include <crypto/authenc.h>
|
||||||
#include <crypto/rng.h>
|
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include "adf_accel_devices.h"
|
#include "adf_accel_devices.h"
|
||||||
#include "adf_transport.h"
|
#include "adf_transport.h"
|
||||||
|
@ -113,9 +112,6 @@ struct qat_alg_aead_ctx {
|
||||||
struct crypto_shash *hash_tfm;
|
struct crypto_shash *hash_tfm;
|
||||||
enum icp_qat_hw_auth_algo qat_hash_alg;
|
enum icp_qat_hw_auth_algo qat_hash_alg;
|
||||||
struct qat_crypto_instance *inst;
|
struct qat_crypto_instance *inst;
|
||||||
struct crypto_tfm *tfm;
|
|
||||||
uint8_t salt[AES_BLOCK_SIZE];
|
|
||||||
spinlock_t lock; /* protects qat_alg_aead_ctx struct */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qat_alg_ablkcipher_ctx {
|
struct qat_alg_ablkcipher_ctx {
|
||||||
|
@ -273,11 +269,11 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
|
||||||
ICP_QAT_FW_LA_NO_UPDATE_STATE);
|
ICP_QAT_FW_LA_NO_UPDATE_STATE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
|
static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
|
||||||
int alg,
|
int alg,
|
||||||
struct crypto_authenc_keys *keys)
|
struct crypto_authenc_keys *keys)
|
||||||
{
|
{
|
||||||
struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
|
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
|
||||||
unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
|
unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
|
||||||
struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
|
struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
|
||||||
struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
|
struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
|
||||||
|
@ -353,11 +349,11 @@ static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
|
static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
|
||||||
int alg,
|
int alg,
|
||||||
struct crypto_authenc_keys *keys)
|
struct crypto_authenc_keys *keys)
|
||||||
{
|
{
|
||||||
struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
|
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
|
||||||
unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
|
unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
|
||||||
struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
|
struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
|
||||||
struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
|
struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
|
||||||
|
@ -510,30 +506,27 @@ static int qat_alg_validate_key(int key_len, int *alg)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
|
static int qat_alg_aead_init_sessions(struct crypto_aead *tfm,
|
||||||
const uint8_t *key, unsigned int keylen)
|
const uint8_t *key, unsigned int keylen)
|
||||||
{
|
{
|
||||||
struct crypto_authenc_keys keys;
|
struct crypto_authenc_keys keys;
|
||||||
int alg;
|
int alg;
|
||||||
|
|
||||||
if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
|
|
||||||
return -EFAULT;
|
|
||||||
|
|
||||||
if (crypto_authenc_extractkeys(&keys, key, keylen))
|
if (crypto_authenc_extractkeys(&keys, key, keylen))
|
||||||
goto bad_key;
|
goto bad_key;
|
||||||
|
|
||||||
if (qat_alg_validate_key(keys.enckeylen, &alg))
|
if (qat_alg_validate_key(keys.enckeylen, &alg))
|
||||||
goto bad_key;
|
goto bad_key;
|
||||||
|
|
||||||
if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
|
if (qat_alg_aead_init_enc_session(tfm, alg, &keys))
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
|
if (qat_alg_aead_init_dec_session(tfm, alg, &keys))
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
bad_key:
|
bad_key:
|
||||||
crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
error:
|
error:
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
@ -562,7 +555,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
|
||||||
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
if (ctx->enc_cd) {
|
if (ctx->enc_cd) {
|
||||||
/* rekeying */
|
/* rekeying */
|
||||||
dev = &GET_DEV(ctx->inst->accel_dev);
|
dev = &GET_DEV(ctx->inst->accel_dev);
|
||||||
|
@ -576,7 +568,6 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
|
||||||
struct qat_crypto_instance *inst =
|
struct qat_crypto_instance *inst =
|
||||||
qat_crypto_get_instance_node(node);
|
qat_crypto_get_instance_node(node);
|
||||||
if (!inst) {
|
if (!inst) {
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -586,19 +577,16 @@ static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
|
||||||
&ctx->enc_cd_paddr,
|
&ctx->enc_cd_paddr,
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!ctx->enc_cd) {
|
if (!ctx->enc_cd) {
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
|
ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
|
||||||
&ctx->dec_cd_paddr,
|
&ctx->dec_cd_paddr,
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
if (!ctx->dec_cd) {
|
if (!ctx->dec_cd) {
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
goto out_free_enc;
|
goto out_free_enc;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock(&ctx->lock);
|
if (qat_alg_aead_init_sessions(tfm, key, keylen))
|
||||||
if (qat_alg_aead_init_sessions(ctx, key, keylen))
|
|
||||||
goto out_free_all;
|
goto out_free_all;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -649,22 +637,20 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||||
struct scatterlist *assoc, int assoclen,
|
|
||||||
struct scatterlist *sgl,
|
struct scatterlist *sgl,
|
||||||
struct scatterlist *sglout, uint8_t *iv,
|
struct scatterlist *sglout,
|
||||||
uint8_t ivlen,
|
|
||||||
struct qat_crypto_request *qat_req)
|
struct qat_crypto_request *qat_req)
|
||||||
{
|
{
|
||||||
struct device *dev = &GET_DEV(inst->accel_dev);
|
struct device *dev = &GET_DEV(inst->accel_dev);
|
||||||
int i, bufs = 0, sg_nctr = 0;
|
int i, sg_nctr = 0;
|
||||||
int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
|
int n = sg_nents(sgl);
|
||||||
struct qat_alg_buf_list *bufl;
|
struct qat_alg_buf_list *bufl;
|
||||||
struct qat_alg_buf_list *buflout = NULL;
|
struct qat_alg_buf_list *buflout = NULL;
|
||||||
dma_addr_t blp;
|
dma_addr_t blp;
|
||||||
dma_addr_t bloutp = 0;
|
dma_addr_t bloutp = 0;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
|
size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
|
||||||
((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
|
((1 + n) * sizeof(struct qat_alg_buf));
|
||||||
|
|
||||||
if (unlikely(!n))
|
if (unlikely(!n))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -678,35 +664,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||||
if (unlikely(dma_mapping_error(dev, blp)))
|
if (unlikely(dma_mapping_error(dev, blp)))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
for_each_sg(assoc, sg, assoc_n, i) {
|
|
||||||
if (!sg->length)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (!(assoclen > 0))
|
|
||||||
break;
|
|
||||||
|
|
||||||
bufl->bufers[bufs].addr =
|
|
||||||
dma_map_single(dev, sg_virt(sg),
|
|
||||||
min_t(int, assoclen, sg->length),
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
bufl->bufers[bufs].len = min_t(int, assoclen, sg->length);
|
|
||||||
if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
|
|
||||||
goto err;
|
|
||||||
bufs++;
|
|
||||||
assoclen -= sg->length;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ivlen) {
|
|
||||||
bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
bufl->bufers[bufs].len = ivlen;
|
|
||||||
if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
|
|
||||||
goto err;
|
|
||||||
bufs++;
|
|
||||||
}
|
|
||||||
|
|
||||||
for_each_sg(sgl, sg, n, i) {
|
for_each_sg(sgl, sg, n, i) {
|
||||||
int y = sg_nctr + bufs;
|
int y = sg_nctr;
|
||||||
|
|
||||||
if (!sg->length)
|
if (!sg->length)
|
||||||
continue;
|
continue;
|
||||||
|
@ -719,7 +678,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||||
goto err;
|
goto err;
|
||||||
sg_nctr++;
|
sg_nctr++;
|
||||||
}
|
}
|
||||||
bufl->num_bufs = sg_nctr + bufs;
|
bufl->num_bufs = sg_nctr;
|
||||||
qat_req->buf.bl = bufl;
|
qat_req->buf.bl = bufl;
|
||||||
qat_req->buf.blp = blp;
|
qat_req->buf.blp = blp;
|
||||||
qat_req->buf.sz = sz;
|
qat_req->buf.sz = sz;
|
||||||
|
@ -729,7 +688,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||||
|
|
||||||
n = sg_nents(sglout);
|
n = sg_nents(sglout);
|
||||||
sz_out = sizeof(struct qat_alg_buf_list) +
|
sz_out = sizeof(struct qat_alg_buf_list) +
|
||||||
((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
|
((1 + n) * sizeof(struct qat_alg_buf));
|
||||||
sg_nctr = 0;
|
sg_nctr = 0;
|
||||||
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
|
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
|
||||||
dev_to_node(&GET_DEV(inst->accel_dev)));
|
dev_to_node(&GET_DEV(inst->accel_dev)));
|
||||||
|
@ -739,14 +698,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||||
if (unlikely(dma_mapping_error(dev, bloutp)))
|
if (unlikely(dma_mapping_error(dev, bloutp)))
|
||||||
goto err;
|
goto err;
|
||||||
bufers = buflout->bufers;
|
bufers = buflout->bufers;
|
||||||
/* For out of place operation dma map only data and
|
|
||||||
* reuse assoc mapping and iv */
|
|
||||||
for (i = 0; i < bufs; i++) {
|
|
||||||
bufers[i].len = bufl->bufers[i].len;
|
|
||||||
bufers[i].addr = bufl->bufers[i].addr;
|
|
||||||
}
|
|
||||||
for_each_sg(sglout, sg, n, i) {
|
for_each_sg(sglout, sg, n, i) {
|
||||||
int y = sg_nctr + bufs;
|
int y = sg_nctr;
|
||||||
|
|
||||||
if (!sg->length)
|
if (!sg->length)
|
||||||
continue;
|
continue;
|
||||||
|
@ -759,7 +712,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||||
bufers[y].len = sg->length;
|
bufers[y].len = sg->length;
|
||||||
sg_nctr++;
|
sg_nctr++;
|
||||||
}
|
}
|
||||||
buflout->num_bufs = sg_nctr + bufs;
|
buflout->num_bufs = sg_nctr;
|
||||||
buflout->num_mapped_bufs = sg_nctr;
|
buflout->num_mapped_bufs = sg_nctr;
|
||||||
qat_req->buf.blout = buflout;
|
qat_req->buf.blout = buflout;
|
||||||
qat_req->buf.bloutp = bloutp;
|
qat_req->buf.bloutp = bloutp;
|
||||||
|
@ -773,7 +726,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
||||||
err:
|
err:
|
||||||
dev_err(dev, "Failed to map buf for dma\n");
|
dev_err(dev, "Failed to map buf for dma\n");
|
||||||
sg_nctr = 0;
|
sg_nctr = 0;
|
||||||
for (i = 0; i < n + bufs; i++)
|
for (i = 0; i < n; i++)
|
||||||
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
|
if (!dma_mapping_error(dev, bufl->bufers[i].addr))
|
||||||
dma_unmap_single(dev, bufl->bufers[i].addr,
|
dma_unmap_single(dev, bufl->bufers[i].addr,
|
||||||
bufl->bufers[i].len,
|
bufl->bufers[i].len,
|
||||||
|
@ -784,7 +737,7 @@ err:
|
||||||
kfree(bufl);
|
kfree(bufl);
|
||||||
if (sgl != sglout && buflout) {
|
if (sgl != sglout && buflout) {
|
||||||
n = sg_nents(sglout);
|
n = sg_nents(sglout);
|
||||||
for (i = bufs; i < n + bufs; i++)
|
for (i = 0; i < n; i++)
|
||||||
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
|
if (!dma_mapping_error(dev, buflout->bufers[i].addr))
|
||||||
dma_unmap_single(dev, buflout->bufers[i].addr,
|
dma_unmap_single(dev, buflout->bufers[i].addr,
|
||||||
buflout->bufers[i].len,
|
buflout->bufers[i].len,
|
||||||
|
@ -847,9 +800,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
|
||||||
int digst_size = crypto_aead_crt(aead_tfm)->authsize;
|
int digst_size = crypto_aead_crt(aead_tfm)->authsize;
|
||||||
int ret, ctr = 0;
|
int ret, ctr = 0;
|
||||||
|
|
||||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
|
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
|
||||||
areq->src, areq->dst, areq->iv,
|
|
||||||
AES_BLOCK_SIZE, qat_req);
|
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -863,12 +814,11 @@ static int qat_alg_aead_dec(struct aead_request *areq)
|
||||||
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
|
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
|
||||||
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
|
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
|
||||||
cipher_param->cipher_length = areq->cryptlen - digst_size;
|
cipher_param->cipher_length = areq->cryptlen - digst_size;
|
||||||
cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
|
cipher_param->cipher_offset = areq->assoclen;
|
||||||
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
|
memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
|
||||||
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
|
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
|
||||||
auth_param->auth_off = 0;
|
auth_param->auth_off = 0;
|
||||||
auth_param->auth_len = areq->assoclen +
|
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
|
||||||
cipher_param->cipher_length + AES_BLOCK_SIZE;
|
|
||||||
do {
|
do {
|
||||||
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
|
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
|
||||||
} while (ret == -EAGAIN && ctr++ < 10);
|
} while (ret == -EAGAIN && ctr++ < 10);
|
||||||
|
@ -880,8 +830,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
|
static int qat_alg_aead_enc(struct aead_request *areq)
|
||||||
int enc_iv)
|
|
||||||
{
|
{
|
||||||
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
|
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
|
||||||
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
|
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
|
||||||
|
@ -890,11 +839,10 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
|
||||||
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
struct icp_qat_fw_la_cipher_req_params *cipher_param;
|
||||||
struct icp_qat_fw_la_auth_req_params *auth_param;
|
struct icp_qat_fw_la_auth_req_params *auth_param;
|
||||||
struct icp_qat_fw_la_bulk_req *msg;
|
struct icp_qat_fw_la_bulk_req *msg;
|
||||||
|
uint8_t *iv = areq->iv;
|
||||||
int ret, ctr = 0;
|
int ret, ctr = 0;
|
||||||
|
|
||||||
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->assoclen,
|
ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
|
||||||
areq->src, areq->dst, iv, AES_BLOCK_SIZE,
|
|
||||||
qat_req);
|
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -909,16 +857,12 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
|
||||||
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
|
cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
|
||||||
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
|
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
|
||||||
|
|
||||||
if (enc_iv) {
|
memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
|
||||||
cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
|
cipher_param->cipher_length = areq->cryptlen;
|
||||||
cipher_param->cipher_offset = areq->assoclen;
|
cipher_param->cipher_offset = areq->assoclen;
|
||||||
} else {
|
|
||||||
memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
|
|
||||||
cipher_param->cipher_length = areq->cryptlen;
|
|
||||||
cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
|
|
||||||
}
|
|
||||||
auth_param->auth_off = 0;
|
auth_param->auth_off = 0;
|
||||||
auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
|
auth_param->auth_len = areq->assoclen + areq->cryptlen;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
|
ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
|
||||||
|
@ -931,25 +875,6 @@ static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_aead_enc(struct aead_request *areq)
|
|
||||||
{
|
|
||||||
return qat_alg_aead_enc_internal(areq, areq->iv, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
|
|
||||||
{
|
|
||||||
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
|
|
||||||
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
|
|
||||||
struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
|
||||||
__be64 seq;
|
|
||||||
|
|
||||||
memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
|
|
||||||
seq = cpu_to_be64(req->seq);
|
|
||||||
memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
|
|
||||||
&seq, sizeof(uint64_t));
|
|
||||||
return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
|
static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
|
||||||
const uint8_t *key,
|
const uint8_t *key,
|
||||||
unsigned int keylen)
|
unsigned int keylen)
|
||||||
|
@ -1021,8 +946,7 @@ static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
|
||||||
struct icp_qat_fw_la_bulk_req *msg;
|
struct icp_qat_fw_la_bulk_req *msg;
|
||||||
int ret, ctr = 0;
|
int ret, ctr = 0;
|
||||||
|
|
||||||
ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
|
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
|
||||||
NULL, 0, qat_req);
|
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1059,8 +983,7 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
|
||||||
struct icp_qat_fw_la_bulk_req *msg;
|
struct icp_qat_fw_la_bulk_req *msg;
|
||||||
int ret, ctr = 0;
|
int ret, ctr = 0;
|
||||||
|
|
||||||
ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, 0, req->src, req->dst,
|
ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
|
||||||
NULL, 0, qat_req);
|
|
||||||
if (unlikely(ret))
|
if (unlikely(ret))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -1087,47 +1010,43 @@ static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_aead_init(struct crypto_tfm *tfm,
|
static int qat_alg_aead_init(struct crypto_aead *tfm,
|
||||||
enum icp_qat_hw_auth_algo hash,
|
enum icp_qat_hw_auth_algo hash,
|
||||||
const char *hash_name)
|
const char *hash_name)
|
||||||
{
|
{
|
||||||
struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
|
|
||||||
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
|
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
|
||||||
if (IS_ERR(ctx->hash_tfm))
|
if (IS_ERR(ctx->hash_tfm))
|
||||||
return -EFAULT;
|
return PTR_ERR(ctx->hash_tfm);
|
||||||
spin_lock_init(&ctx->lock);
|
|
||||||
ctx->qat_hash_alg = hash;
|
ctx->qat_hash_alg = hash;
|
||||||
crypto_aead_set_reqsize(__crypto_aead_cast(tfm),
|
crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
|
||||||
sizeof(struct aead_request) +
|
sizeof(struct qat_crypto_request));
|
||||||
sizeof(struct qat_crypto_request));
|
|
||||||
ctx->tfm = tfm;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
|
static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
|
||||||
{
|
{
|
||||||
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
|
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
|
static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
|
||||||
{
|
{
|
||||||
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
|
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
|
||||||
}
|
}
|
||||||
|
|
||||||
static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
|
static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
|
||||||
{
|
{
|
||||||
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
|
return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void qat_alg_aead_exit(struct crypto_tfm *tfm)
|
static void qat_alg_aead_exit(struct crypto_aead *tfm)
|
||||||
{
|
{
|
||||||
struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
|
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
|
||||||
struct qat_crypto_instance *inst = ctx->inst;
|
struct qat_crypto_instance *inst = ctx->inst;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
||||||
if (!IS_ERR(ctx->hash_tfm))
|
crypto_free_shash(ctx->hash_tfm);
|
||||||
crypto_free_shash(ctx->hash_tfm);
|
|
||||||
|
|
||||||
if (!inst)
|
if (!inst)
|
||||||
return;
|
return;
|
||||||
|
@ -1184,73 +1103,61 @@ static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
|
||||||
qat_crypto_put_instance(inst);
|
qat_crypto_put_instance(inst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static struct aead_alg qat_aeads[] = { {
|
||||||
|
.base = {
|
||||||
|
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
||||||
|
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
|
||||||
|
.cra_priority = 4001,
|
||||||
|
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_AEAD_NEW,
|
||||||
|
.cra_blocksize = AES_BLOCK_SIZE,
|
||||||
|
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
},
|
||||||
|
.init = qat_alg_aead_sha1_init,
|
||||||
|
.exit = qat_alg_aead_exit,
|
||||||
|
.setkey = qat_alg_aead_setkey,
|
||||||
|
.decrypt = qat_alg_aead_dec,
|
||||||
|
.encrypt = qat_alg_aead_enc,
|
||||||
|
.ivsize = AES_BLOCK_SIZE,
|
||||||
|
.maxauthsize = SHA1_DIGEST_SIZE,
|
||||||
|
}, {
|
||||||
|
.base = {
|
||||||
|
.cra_name = "authenc(hmac(sha256),cbc(aes))",
|
||||||
|
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
|
||||||
|
.cra_priority = 4001,
|
||||||
|
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_AEAD_NEW,
|
||||||
|
.cra_blocksize = AES_BLOCK_SIZE,
|
||||||
|
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
},
|
||||||
|
.init = qat_alg_aead_sha256_init,
|
||||||
|
.exit = qat_alg_aead_exit,
|
||||||
|
.setkey = qat_alg_aead_setkey,
|
||||||
|
.decrypt = qat_alg_aead_dec,
|
||||||
|
.encrypt = qat_alg_aead_enc,
|
||||||
|
.ivsize = AES_BLOCK_SIZE,
|
||||||
|
.maxauthsize = SHA256_DIGEST_SIZE,
|
||||||
|
}, {
|
||||||
|
.base = {
|
||||||
|
.cra_name = "authenc(hmac(sha512),cbc(aes))",
|
||||||
|
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
|
||||||
|
.cra_priority = 4001,
|
||||||
|
.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_AEAD_NEW,
|
||||||
|
.cra_blocksize = AES_BLOCK_SIZE,
|
||||||
|
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
|
||||||
|
.cra_module = THIS_MODULE,
|
||||||
|
},
|
||||||
|
.init = qat_alg_aead_sha512_init,
|
||||||
|
.exit = qat_alg_aead_exit,
|
||||||
|
.setkey = qat_alg_aead_setkey,
|
||||||
|
.decrypt = qat_alg_aead_dec,
|
||||||
|
.encrypt = qat_alg_aead_enc,
|
||||||
|
.ivsize = AES_BLOCK_SIZE,
|
||||||
|
.maxauthsize = SHA512_DIGEST_SIZE,
|
||||||
|
} };
|
||||||
|
|
||||||
static struct crypto_alg qat_algs[] = { {
|
static struct crypto_alg qat_algs[] = { {
|
||||||
.cra_name = "authenc(hmac(sha1),cbc(aes))",
|
|
||||||
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
|
|
||||||
.cra_priority = 4001,
|
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
|
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
|
||||||
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
|
|
||||||
.cra_alignmask = 0,
|
|
||||||
.cra_type = &crypto_aead_type,
|
|
||||||
.cra_module = THIS_MODULE,
|
|
||||||
.cra_init = qat_alg_aead_sha1_init,
|
|
||||||
.cra_exit = qat_alg_aead_exit,
|
|
||||||
.cra_u = {
|
|
||||||
.aead = {
|
|
||||||
.setkey = qat_alg_aead_setkey,
|
|
||||||
.decrypt = qat_alg_aead_dec,
|
|
||||||
.encrypt = qat_alg_aead_enc,
|
|
||||||
.givencrypt = qat_alg_aead_genivenc,
|
|
||||||
.ivsize = AES_BLOCK_SIZE,
|
|
||||||
.maxauthsize = SHA1_DIGEST_SIZE,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
.cra_name = "authenc(hmac(sha256),cbc(aes))",
|
|
||||||
.cra_driver_name = "qat_aes_cbc_hmac_sha256",
|
|
||||||
.cra_priority = 4001,
|
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
|
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
|
||||||
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
|
|
||||||
.cra_alignmask = 0,
|
|
||||||
.cra_type = &crypto_aead_type,
|
|
||||||
.cra_module = THIS_MODULE,
|
|
||||||
.cra_init = qat_alg_aead_sha256_init,
|
|
||||||
.cra_exit = qat_alg_aead_exit,
|
|
||||||
.cra_u = {
|
|
||||||
.aead = {
|
|
||||||
.setkey = qat_alg_aead_setkey,
|
|
||||||
.decrypt = qat_alg_aead_dec,
|
|
||||||
.encrypt = qat_alg_aead_enc,
|
|
||||||
.givencrypt = qat_alg_aead_genivenc,
|
|
||||||
.ivsize = AES_BLOCK_SIZE,
|
|
||||||
.maxauthsize = SHA256_DIGEST_SIZE,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
.cra_name = "authenc(hmac(sha512),cbc(aes))",
|
|
||||||
.cra_driver_name = "qat_aes_cbc_hmac_sha512",
|
|
||||||
.cra_priority = 4001,
|
|
||||||
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
|
|
||||||
.cra_blocksize = AES_BLOCK_SIZE,
|
|
||||||
.cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
|
|
||||||
.cra_alignmask = 0,
|
|
||||||
.cra_type = &crypto_aead_type,
|
|
||||||
.cra_module = THIS_MODULE,
|
|
||||||
.cra_init = qat_alg_aead_sha512_init,
|
|
||||||
.cra_exit = qat_alg_aead_exit,
|
|
||||||
.cra_u = {
|
|
||||||
.aead = {
|
|
||||||
.setkey = qat_alg_aead_setkey,
|
|
||||||
.decrypt = qat_alg_aead_dec,
|
|
||||||
.encrypt = qat_alg_aead_enc,
|
|
||||||
.givencrypt = qat_alg_aead_genivenc,
|
|
||||||
.ivsize = AES_BLOCK_SIZE,
|
|
||||||
.maxauthsize = SHA512_DIGEST_SIZE,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
.cra_name = "cbc(aes)",
|
.cra_name = "cbc(aes)",
|
||||||
.cra_driver_name = "qat_aes_cbc",
|
.cra_driver_name = "qat_aes_cbc",
|
||||||
.cra_priority = 4001,
|
.cra_priority = 4001,
|
||||||
|
@ -1276,42 +1183,54 @@ static struct crypto_alg qat_algs[] = { {
|
||||||
|
|
||||||
int qat_algs_register(void)
|
int qat_algs_register(void)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0, i;
|
||||||
|
|
||||||
mutex_lock(&algs_lock);
|
mutex_lock(&algs_lock);
|
||||||
if (++active_devs == 1) {
|
if (++active_devs != 1)
|
||||||
int i;
|
goto unlock;
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
|
for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
|
||||||
qat_algs[i].cra_flags =
|
qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
|
||||||
(qat_algs[i].cra_type == &crypto_aead_type) ?
|
|
||||||
CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
|
|
||||||
CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
|
|
||||||
|
|
||||||
ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
|
ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
|
||||||
}
|
if (ret)
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(qat_aeads); i++)
|
||||||
|
qat_aeads[i].base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_AEAD_NEW;
|
||||||
|
|
||||||
|
ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
|
||||||
|
if (ret)
|
||||||
|
goto unreg_algs;
|
||||||
|
|
||||||
|
unlock:
|
||||||
mutex_unlock(&algs_lock);
|
mutex_unlock(&algs_lock);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
unreg_algs:
|
||||||
|
crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
|
||||||
|
goto unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
int qat_algs_unregister(void)
|
int qat_algs_unregister(void)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
mutex_lock(&algs_lock);
|
mutex_lock(&algs_lock);
|
||||||
if (--active_devs == 0)
|
if (--active_devs != 0)
|
||||||
ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
|
goto unlock;
|
||||||
|
|
||||||
|
crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
|
||||||
|
crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
|
||||||
|
|
||||||
|
unlock:
|
||||||
mutex_unlock(&algs_lock);
|
mutex_unlock(&algs_lock);
|
||||||
return ret;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int qat_algs_init(void)
|
int qat_algs_init(void)
|
||||||
{
|
{
|
||||||
crypto_get_default_rng();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void qat_algs_exit(void)
|
void qat_algs_exit(void)
|
||||||
{
|
{
|
||||||
crypto_put_default_rng();
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue