ctx->cdata.keylen = keys.enckeylen;
ret = aead_set_sh_desc(aead);
-@@ -258,55 +284,139 @@ badkey:
+@@ -258,6 +284,468 @@ badkey:
return -EINVAL;
}
--static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-- const u8 *key, unsigned int keylen)
+static int tls_set_sh_desc(struct crypto_aead *tls)
- {
-- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
-- const char *alg_name = crypto_tfm_alg_name(tfm);
++{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ unsigned int ivsize = crypto_aead_ivsize(tls);
+ unsigned int blocksize = crypto_aead_blocksize(tls);
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
- struct device *jrdev = ctx->jrdev;
-- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-- u32 ctx1_iv_off = 0;
-- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-- OP_ALG_AAI_CTR_MOD128);
-- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
++ struct device *jrdev = ctx->jrdev;
+ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+ struct crypto_authenc_keys keys;
- int ret = 0;
-
-- memcpy(ctx->key, key, keylen);
++ int ret = 0;
++
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
- #ifdef DEBUG
++#ifdef DEBUG
+ dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
- print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
- #endif
-- /*
-- * AES-CTR needs to load IV in CONTEXT1 reg
-- * at an offset of 128bits (16bytes)
-- * CONTEXT1[255:128] = IV
-- */
-- if (ctr_mode)
-- ctx1_iv_off = 16;
-
- /*
-- * RFC3686 specific:
-- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
-- * | *key = {KEY, NONCE}
++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
++#endif
++
++ /*
+ * If DKP is supported, use it in the shared descriptor to generate
+ * the split key.
- */
-- if (is_rfc3686) {
-- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
-- keylen -= CTR_RFC3686_NONCE_SIZE;
++ */
+ if (ctrlpriv->era >= 6) {
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ goto skip_split_key;
- }
-
-- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-- ctx->cdata.keylen = keylen;
-- ctx->cdata.key_virt = ctx->key;
-- ctx->cdata.key_inline = true;
++ }
++
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ if (ret)
+ goto badkey;
-
-- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
-- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
-- is_rfc3686, ctx1_iv_off);
-- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
-- is_rfc3686, ctx1_iv_off);
-- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
-- ivsize, is_rfc3686, ctx1_iv_off);
++
+ /* postpend encryption key to auth split key */
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+ ret = tls_set_sh_desc(tls);
+ if (ret)
+ goto badkey;
-
- /* Now update the driver contexts with the new shared descriptor */
- if (ctx->drv_ctx[ENCRYPT]) {
-@@ -327,42 +437,84 @@ static int ablkcipher_setkey(struct cryp
- }
- }
-
-- if (ctx->drv_ctx[GIVENCRYPT]) {
-- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
-- ctx->sh_desc_givenc);
-- if (ret) {
-- dev_err(jrdev, "driver givenc context update failed\n");
-- goto badkey;
-- }
-- }
--
- return ret;
- badkey:
-- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
++ goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
++ goto badkey;
++ }
++ }
++
++ return ret;
++badkey:
+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
--static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-- const u8 *key, unsigned int keylen)
++ return -EINVAL;
++}
++
+static int gcm_set_sh_desc(struct crypto_aead *aead)
- {
-- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-- struct device *jrdev = ctx->jrdev;
-- int ret = 0;
++{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
-
-- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
-- dev_err(jrdev, "key size mismatch\n");
-- goto badkey;
++
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
- }
-
++ }
++
+ cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
- memcpy(ctx->key, key, keylen);
-- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir);
- ctx->cdata.keylen = keylen;
-- ctx->cdata.key_virt = ctx->key;
-- ctx->cdata.key_inline = true;
-
-- /* xts ablkcipher encrypt, decrypt shared descriptors */
-- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
-- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
++ ctx->cdata.keylen = keylen;
++
+ ret = gcm_set_sh_desc(aead);
+ if (ret)
+ return ret;
-
- /* Now update the driver contexts with the new shared descriptor */
- if (ctx->drv_ctx[ENCRYPT]) {
-@@ -370,7 +522,7 @@ static int xts_ablkcipher_setkey(struct
- ctx->sh_desc_enc);
- if (ret) {
- dev_err(jrdev, "driver enc context update failed\n");
-- goto badkey;
++
++ /* Now update the driver contexts with the new shared descriptor */
++ if (ctx->drv_ctx[ENCRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
++ ctx->sh_desc_enc);
++ if (ret) {
++ dev_err(jrdev, "driver enc context update failed\n");
+ return ret;
- }
- }
-
-@@ -379,151 +531,829 @@ static int xts_ablkcipher_setkey(struct
- ctx->sh_desc_dec);
- if (ret) {
- dev_err(jrdev, "driver dec context update failed\n");
-- goto badkey;
++ }
++ }
++
++ if (ctx->drv_ctx[DECRYPT]) {
++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
++ ctx->sh_desc_dec);
++ if (ret) {
++ dev_err(jrdev, "driver dec context update failed\n");
+ return ret;
- }
- }
-
-- return ret;
--badkey:
-- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-- return -EINVAL;
++ }
++ }
++
+ return 0;
- }
-
--/*
-- * aead_edesc - s/w-extended aead descriptor
-- * @src_nents: number of segments in input scatterlist
-- * @dst_nents: number of segments in output scatterlist
-- * @iv_dma: dma address of iv for checking continuity and link table
-- * @qm_sg_bytes: length of dma mapped h/w link table
-- * @qm_sg_dma: bus physical mapped address of h/w link table
-- * @assoclen: associated data length, in CAAM endianness
-- * @assoclen_dma: bus physical mapped address of req->assoclen
-- * @drv_req: driver-specific request structure
-- * @sgt: the h/w link table, followed by IV
-- */
--struct aead_edesc {
-- int src_nents;
-- int dst_nents;
-- dma_addr_t iv_dma;
-- int qm_sg_bytes;
-- dma_addr_t qm_sg_dma;
-- unsigned int assoclen;
-- dma_addr_t assoclen_dma;
-- struct caam_drv_req drv_req;
-- struct qm_sg_entry sgt[0];
--};
++}
++
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
-
--/*
-- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
-- * @src_nents: number of segments in input scatterlist
-- * @dst_nents: number of segments in output scatterlist
-- * @iv_dma: dma address of iv for checking continuity and link table
-- * @qm_sg_bytes: length of dma mapped h/w link table
-- * @qm_sg_dma: bus physical mapped address of h/w link table
-- * @drv_req: driver-specific request structure
-- * @sgt: the h/w link table, followed by IV
-- */
--struct ablkcipher_edesc {
-- int src_nents;
-- int dst_nents;
-- dma_addr_t iv_dma;
-- int qm_sg_bytes;
-- dma_addr_t qm_sg_dma;
-- struct caam_drv_req drv_req;
-- struct qm_sg_entry sgt[0];
--};
++
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
-
--static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
-- enum optype type)
--{
- /*
-- * This function is called on the fast path with values of 'type'
-- * known at compile time. Invalid arguments are not expected and
-- * thus no checks are made.
++
++ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
- */
-- struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
-- u32 *desc;
++ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
-
-- if (unlikely(!drv_ctx)) {
-- spin_lock(&ctx->lock);
++
+ cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ ctx->authsize, true);
-
-- /* Read again to check if some other core init drv_ctx */
-- drv_ctx = ctx->drv_ctx[type];
-- if (!drv_ctx) {
-- int cpu;
++
+ /*
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
-
-- if (type == ENCRYPT)
-- desc = ctx->sh_desc_enc;
-- else if (type == DECRYPT)
-- desc = ctx->sh_desc_dec;
-- else /* (type == GIVENCRYPT) */
-- desc = ctx->sh_desc_givenc;
++
+ cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ ctx->authsize, true);
+
+ return 0;
+}
+
-+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
-+ const char *alg_name = crypto_tfm_alg_name(tfm);
-+ struct device *jrdev = ctx->jrdev;
-+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-+ u32 ctx1_iv_off = 0;
-+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
-+ OP_ALG_AAI_CTR_MOD128);
-+ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
-+ int ret = 0;
-+
-+ memcpy(ctx->key, key, keylen);
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-+#endif
-+ /*
-+ * AES-CTR needs to load IV in CONTEXT1 reg
-+ * at an offset of 128bits (16bytes)
-+ * CONTEXT1[255:128] = IV
-+ */
-+ if (ctr_mode)
-+ ctx1_iv_off = 16;
-+
-+ /*
-+ * RFC3686 specific:
-+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
-+ * | *key = {KEY, NONCE}
-+ */
-+ if (is_rfc3686) {
-+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
-+ keylen -= CTR_RFC3686_NONCE_SIZE;
-+ }
-+
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
-+ ctx->cdata.key_inline = true;
-+
-+ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
-+ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
-+ is_rfc3686, ctx1_iv_off);
-+ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
-+ is_rfc3686, ctx1_iv_off);
-+ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
-+ ivsize, is_rfc3686, ctx1_iv_off);
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[GIVENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
-+ ctx->sh_desc_givenc);
-+ if (ret) {
-+ dev_err(jrdev, "driver givenc context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ return ret;
-+badkey:
-+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
-+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-+ const u8 *key, unsigned int keylen)
-+{
-+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-+ struct device *jrdev = ctx->jrdev;
-+ int ret = 0;
-+
-+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
-+ dev_err(jrdev, "key size mismatch\n");
-+ goto badkey;
-+ }
-+
-+ memcpy(ctx->key, key, keylen);
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
-+ ctx->cdata.key_inline = true;
-+
-+ /* xts ablkcipher encrypt, decrypt shared descriptors */
-+ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
-+ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
-+
-+ /* Now update the driver contexts with the new shared descriptor */
-+ if (ctx->drv_ctx[ENCRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
-+ ctx->sh_desc_enc);
-+ if (ret) {
-+ dev_err(jrdev, "driver enc context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ if (ctx->drv_ctx[DECRYPT]) {
-+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
-+ ctx->sh_desc_dec);
-+ if (ret) {
-+ dev_err(jrdev, "driver dec context update failed\n");
-+ goto badkey;
-+ }
-+ }
-+
-+ return ret;
-+badkey:
-+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-+ return -EINVAL;
-+}
-+
-+/*
-+ * aead_edesc - s/w-extended aead descriptor
+ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+ {
+@@ -414,6 +902,29 @@ struct aead_edesc {
+ };
+
+ /*
++ * tls_edesc - s/w-extended tls descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
++ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @assoclen: associated data length, in CAAM endianness
-+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @drv_req: driver-specific request structure
+ * @sgt: the h/w link table, followed by IV
+ */
-+struct aead_edesc {
++struct tls_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
-+ unsigned int assoclen;
-+ dma_addr_t assoclen_dma;
++ struct scatterlist tmp[2];
++ struct scatterlist *dst;
+ struct caam_drv_req drv_req;
+ struct qm_sg_entry sgt[0];
+};
+
+/*
-+ * tls_edesc - s/w-extended tls descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table, followed by IV
-+ */
-+struct tls_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ struct scatterlist tmp[2];
-+ struct scatterlist *dst;
-+ struct caam_drv_req drv_req;
-+ struct qm_sg_entry sgt[0];
-+};
-+
-+/*
-+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
-+ * @src_nents: number of segments in input scatterlist
-+ * @dst_nents: number of segments in output scatterlist
-+ * @iv_dma: dma address of iv for checking continuity and link table
-+ * @qm_sg_bytes: length of dma mapped h/w link table
-+ * @qm_sg_dma: bus physical mapped address of h/w link table
-+ * @drv_req: driver-specific request structure
-+ * @sgt: the h/w link table, followed by IV
-+ */
-+struct ablkcipher_edesc {
-+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
-+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+ struct caam_drv_req drv_req;
-+ struct qm_sg_entry sgt[0];
-+};
-+
-+static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
-+ enum optype type)
-+{
-+ /*
-+ * This function is called on the fast path with values of 'type'
-+ * known at compile time. Invalid arguments are not expected and
-+ * thus no checks are made.
-+ */
-+ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
-+ u32 *desc;
-+
-+ if (unlikely(!drv_ctx)) {
-+ spin_lock(&ctx->lock);
-+
-+ /* Read again to check if some other core init drv_ctx */
-+ drv_ctx = ctx->drv_ctx[type];
-+ if (!drv_ctx) {
-+ int cpu;
-+
-+ if (type == ENCRYPT)
-+ desc = ctx->sh_desc_enc;
-+ else if (type == DECRYPT)
-+ desc = ctx->sh_desc_dec;
-+ else /* (type == GIVENCRYPT) */
-+ desc = ctx->sh_desc_givenc;
-+
-+ cpu = smp_processor_id();
-+ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
-+ if (likely(!IS_ERR_OR_NULL(drv_ctx)))
-+ drv_ctx->op_type = type;
-+
-+ ctx->drv_ctx[type] = drv_ctx;
-+ }
-+
-+ spin_unlock(&ctx->lock);
-+ }
-+
-+ return drv_ctx;
-+}
-+
-+static void caam_unmap(struct device *dev, struct scatterlist *src,
-+ struct scatterlist *dst, int src_nents,
-+ int dst_nents, dma_addr_t iv_dma, int ivsize,
-+ enum optype op_type, dma_addr_t qm_sg_dma,
-+ int qm_sg_bytes)
-+{
-+ if (dst != src) {
-+ if (src_nents)
-+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
-+ } else {
-+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
-+ }
-+
-+ if (iv_dma)
-+ dma_unmap_single(dev, iv_dma, ivsize,
-+ op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
-+ DMA_TO_DEVICE);
-+ if (qm_sg_bytes)
-+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
-+}
-+
-+static void aead_unmap(struct device *dev,
-+ struct aead_edesc *edesc,
-+ struct aead_request *req)
-+{
-+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
-+ int ivsize = crypto_aead_ivsize(aead);
-+
-+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+}
-+
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+@@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ }
+
+static void tls_unmap(struct device *dev,
+ struct tls_edesc *edesc,
+ struct aead_request *req)
+ edesc->qm_sg_bytes);
+}
+
-+static void ablkcipher_unmap(struct device *dev,
-+ struct ablkcipher_edesc *edesc,
-+ struct ablkcipher_request *req)
+ static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+@@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
++ u32 ssrc = status & JRSTA_SSRC_MASK;
++ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
++
+ caam_jr_strstatus(qidev, status);
+- ecode = -EIO;
++ /*
++ * verify hw auth check passed else return -EBADMSG
++ */
++ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
++ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
++ ecode = -EBADMSG;
++ else
++ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
+@@ -647,9 +1181,24 @@ static struct aead_edesc *aead_edesc_all
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
++ * the end of the table by allocating more S/G entries. Logic:
++ * if (src != dst && output S/G)
++ * pad output S/G, if needed
++ * else if (src == dst && S/G)
++ * overlapping S/Gs; pad one of them
++ * else if (input S/G) ...
++ * pad input S/G, if needed
+ */
+- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
+- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
++ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
++ if (mapped_dst_nents > 1)
++ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
++ else if ((req->src == req->dst) && (mapped_src_nents > 1))
++ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
++ 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
++ else
++ qm_sg_ents = ALIGN(qm_sg_ents, 4);
++
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+@@ -785,6 +1334,260 @@ static int aead_decrypt(struct aead_requ
+ return aead_crypt(req, false);
+ }
+
++static int ipsec_gcm_encrypt(struct aead_request *req)
+{
-+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ if (req->assoclen < 8)
++ return -EINVAL;
+
-+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-+ edesc->qm_sg_dma, edesc->qm_sg_bytes);
++ return aead_crypt(req, true);
++}
++
++static int ipsec_gcm_decrypt(struct aead_request *req)
++{
++ if (req->assoclen < 8)
++ return -EINVAL;
++
++ return aead_crypt(req, false);
+}
+
-+static void aead_done(struct caam_drv_req *drv_req, u32 status)
++static void tls_done(struct caam_drv_req *drv_req, u32 status)
+{
+ struct device *qidev;
-+ struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ struct aead_request *aead_req = drv_req->app_ctx;
+ struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
+ struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
+ qidev = caam_ctx->qidev;
+
+ if (unlikely(status)) {
-+ u32 ssrc = status & JRSTA_SSRC_MASK;
-+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
-+
+ caam_jr_strstatus(qidev, status);
-+ /*
-+ * verify hw auth check passed else return -EBADMSG
-+ */
-+ if (ssrc == JRSTA_SSRC_CCB_ERROR &&
-+ err_id == JRSTA_CCBERR_ERRID_ICVCHK)
-+ ecode = -EBADMSG;
-+ else
-+ ecode = -EIO;
++ ecode = -EIO;
+ }
+
+ edesc = container_of(drv_req, typeof(*edesc), drv_req);
-+ aead_unmap(qidev, edesc, aead_req);
++ tls_unmap(qidev, edesc, aead_req);
+
+ aead_request_complete(aead_req, ecode);
+ qi_cache_free(edesc);
+}
+
+/*
-+ * allocate and map the aead extended descriptor
++ * allocate and map the tls extended descriptor
+ */
-+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
-+ bool encrypt)
++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
++ unsigned int blocksize = crypto_aead_blocksize(aead);
++ unsigned int padsize, authsize;
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *qidev = ctx->qidev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-+ GFP_KERNEL : GFP_ATOMIC;
++ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-+ struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
-+ unsigned int authsize = ctx->authsize;
-+ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
++ u8 *iv;
++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct qm_sg_entry *sg_table, *fd_sgt;
+ struct caam_drv_ctx *drv_ctx;
+ enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
++ struct scatterlist *dst;
++
++ if (encrypt) {
++ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
++ blocksize);
++ authsize = ctx->authsize + padsize;
++ } else {
++ authsize = ctx->authsize;
++ }
+
+ drv_ctx = get_drv_ctx(ctx, op_type);
+ if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-+ return (struct aead_edesc *)drv_ctx;
++ return (struct tls_edesc *)drv_ctx;
+
-+ /* allocate space for base edesc and hw desc commands, link tables */
++ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_alloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(qidev, "could not allocate extended descriptor\n");
+ if (likely(req->src == req->dst)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
-+ (encrypt ? authsize : 0));
++ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
++ dst = req->dst;
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ return ERR_PTR(src_nents);
+ }
+
-+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-+ req->cryptlen +
-+ (encrypt ? authsize :
-+ (-authsize)));
++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
++ dst_nents = sg_nents_for_len(dst, req->cryptlen +
++ (encrypt ? authsize : 0));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-+ req->assoclen + req->cryptlen +
-+ (encrypt ? authsize : (-authsize)));
++ req->cryptlen +
++ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+ mapped_src_nents = 0;
+ }
+
-+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
++ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(qidev, "unable to map destination\n");
+ }
+ }
+
-+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
-+ ivsize = crypto_aead_ivsize(aead);
-+
+ /*
-+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
++ * Create S/G table: IV, src, dst.
+ * Input is not contiguous.
-+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
-+ * the end of the table by allocating more S/G entries. Logic:
-+ * if (src != dst && output S/G)
-+ * pad output S/G, if needed
-+ * else if (src == dst && S/G)
-+ * overlapping S/Gs; pad one of them
-+ * else if (input S/G) ...
-+ * pad input S/G, if needed
+ */
-+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
-+ if (mapped_dst_nents > 1)
-+ qm_sg_ents += ALIGN(mapped_dst_nents, 4);
-+ else if ((req->src == req->dst) && (mapped_src_nents > 1))
-+ qm_sg_ents = max(ALIGN(qm_sg_ents, 4),
-+ 1 + !!ivsize + ALIGN(mapped_src_nents, 4));
-+ else
-+ qm_sg_ents = ALIGN(qm_sg_ents, 4);
-+
++ qm_sg_ents = 1 + mapped_src_nents +
++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
-+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
-+ CAAM_QI_MEMCACHE_SIZE)) {
-+ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
-+ qm_sg_ents, ivsize);
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-+ 0, 0, 0, 0);
++
++ ivsize = crypto_aead_ivsize(aead);
++ iv = (u8 *)(sg_table + qm_sg_ents);
++ /* Make sure IV is located in a DMAable area */
++ memcpy(iv, req->iv, ivsize);
++ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
++ if (dma_mapping_error(qidev, iv_dma)) {
++ dev_err(qidev, "unable to map IV\n");
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
++ 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
-+ if (ivsize) {
-+ u8 *iv = (u8 *)(sg_table + qm_sg_ents);
-+
-+ /* Make sure IV is located in a DMAable area */
-+ memcpy(iv, req->iv, ivsize);
-+
-+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, iv_dma)) {
-+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, req->dst, src_nents,
-+ dst_nents, 0, 0, 0, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
-+ }
-+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
++ edesc->dst = dst;
+ edesc->iv_dma = iv_dma;
+ edesc->drv_req.app_ctx = req;
-+ edesc->drv_req.cbk = aead_done;
++ edesc->drv_req.cbk = tls_done;
+ edesc->drv_req.drv_ctx = drv_ctx;
+
-+ edesc->assoclen = cpu_to_caam32(req->assoclen);
-+ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
-+ dev_err(qidev, "unable to map assoclen\n");
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
-+ qi_cache_free(edesc);
-+ return ERR_PTR(-ENOMEM);
-+ }
++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
++ qm_sg_index = 1;
+
-+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
-+ qm_sg_index++;
-+ if (ivsize) {
-+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
-+ qm_sg_index++;
-+ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
-+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(qidev, qm_sg_dma)) {
+ dev_err(qidev, "unable to map S/G table\n");
-+ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-+ iv_dma, ivsize, op_type, 0, 0);
++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
++ ivsize, op_type, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
-+ out_len = req->assoclen + req->cryptlen +
-+ (encrypt ? ctx->authsize : (-ctx->authsize));
-+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
++ out_len = req->cryptlen + (encrypt ? authsize : 0);
++ in_len = ivsize + req->assoclen + req->cryptlen;
+
+ fd_sgt = &edesc->drv_req.fd_sgt[0];
++
+ dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
+
-+ if (req->dst == req->src) {
-+ if (mapped_src_nents == 1)
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-+ out_len, 0);
-+ else
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+ (1 + !!ivsize) * sizeof(*sg_table),
-+ out_len, 0);
-+ } else if (mapped_dst_nents == 1) {
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
-+ 0);
-+ } else {
++ if (req->dst == req->src)
++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
++ (sg_nents_for_len(req->src, req->assoclen) +
++ 1) * sizeof(*sg_table), out_len, 0);
++ else if (mapped_dst_nents == 1)
++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
++ else
+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
+ qm_sg_index, out_len, 0);
-+ }
+
+ return edesc;
+}
-
-- cpu = smp_processor_id();
-- drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
-- if (likely(!IS_ERR_OR_NULL(drv_ctx)))
-- drv_ctx->op_type = type;
-+static inline int aead_crypt(struct aead_request *req, bool encrypt)
++
++static int tls_crypt(struct aead_request *req, bool encrypt)
+{
-+ struct aead_edesc *edesc;
++ struct tls_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ret;
-
-- ctx->drv_ctx[type] = drv_ctx;
-- }
++
+ if (unlikely(caam_congested))
+ return -EAGAIN;
-
-- spin_unlock(&ctx->lock);
-+ /* allocate extended descriptor */
-+ edesc = aead_edesc_alloc(req, encrypt);
++
++ edesc = tls_edesc_alloc(req, encrypt);
+ if (IS_ERR_OR_NULL(edesc))
+ return PTR_ERR(edesc);
+
-+ /* Create and submit job descriptor */
+ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
-+ aead_unmap(ctx->qidev, edesc, req);
++ tls_unmap(ctx->qidev, edesc, req);
+ qi_cache_free(edesc);
- }
-
-- return drv_ctx;
-+ return ret;
- }
-
--static void caam_unmap(struct device *dev, struct scatterlist *src,
-- struct scatterlist *dst, int src_nents,
-- int dst_nents, dma_addr_t iv_dma, int ivsize,
-- enum optype op_type, dma_addr_t qm_sg_dma,
-- int qm_sg_bytes)
-+static int aead_encrypt(struct aead_request *req)
- {
-- if (dst != src) {
-- if (src_nents)
-- dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
-- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
-- } else {
-- dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
-- }
-+ return aead_crypt(req, true);
-+}
-
-- if (iv_dma)
-- dma_unmap_single(dev, iv_dma, ivsize,
-- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
-- DMA_TO_DEVICE);
-- if (qm_sg_bytes)
-- dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
-+static int aead_decrypt(struct aead_request *req)
-+{
-+ return aead_crypt(req, false);
- }
-
--static void aead_unmap(struct device *dev,
-- struct aead_edesc *edesc,
-- struct aead_request *req)
-+static int ipsec_gcm_encrypt(struct aead_request *req)
- {
-- struct crypto_aead *aead = crypto_aead_reqtfm(req);
-- int ivsize = crypto_aead_ivsize(aead);
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-
-- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-- edesc->qm_sg_dma, edesc->qm_sg_bytes);
-- dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-+ return aead_crypt(req, true);
- }
-
--static void ablkcipher_unmap(struct device *dev,
-- struct ablkcipher_edesc *edesc,
-- struct ablkcipher_request *req)
-+static int ipsec_gcm_decrypt(struct aead_request *req)
- {
-- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
-+ if (req->assoclen < 8)
-+ return -EINVAL;
-
-- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
-- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
-- edesc->qm_sg_dma, edesc->qm_sg_bytes);
-+ return aead_crypt(req, false);
- }
-
--static void aead_done(struct caam_drv_req *drv_req, u32 status)
-+static void tls_done(struct caam_drv_req *drv_req, u32 status)
- {
- struct device *qidev;
-- struct aead_edesc *edesc;
-+ struct tls_edesc *edesc;
- struct aead_request *aead_req = drv_req->app_ctx;
- struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
- struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
-@@ -537,41 +1367,51 @@ static void aead_done(struct caam_drv_re
- }
-
- edesc = container_of(drv_req, typeof(*edesc), drv_req);
-- aead_unmap(qidev, edesc, aead_req);
-+ tls_unmap(qidev, edesc, aead_req);
-
- aead_request_complete(aead_req, ecode);
- qi_cache_free(edesc);
- }
-
- /*
-- * allocate and map the aead extended descriptor
-+ * allocate and map the tls extended descriptor
- */
--static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
-- bool encrypt)
-+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
- {
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
-+ unsigned int blocksize = crypto_aead_blocksize(aead);
-+ unsigned int padsize, authsize;
- struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
- typeof(*alg), aead);
- struct device *qidev = ctx->qidev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-- GFP_KERNEL : GFP_ATOMIC;
-+ GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
-- struct aead_edesc *edesc;
-+ struct tls_edesc *edesc;
- dma_addr_t qm_sg_dma, iv_dma = 0;
- int ivsize = 0;
-- unsigned int authsize = ctx->authsize;
-- int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
-+ u8 *iv;
-+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
- int in_len, out_len;
- struct qm_sg_entry *sg_table, *fd_sgt;
- struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
-+ struct scatterlist *dst;
-+
-+ if (encrypt) {
-+ padsize = blocksize - ((req->cryptlen + ctx->authsize) %
-+ blocksize);
-+ authsize = ctx->authsize + padsize;
-+ } else {
-+ authsize = ctx->authsize;
+ }
-
- drv_ctx = get_drv_ctx(ctx, op_type);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
-- return (struct aead_edesc *)drv_ctx;
-+ return (struct tls_edesc *)drv_ctx;
-
-- /* allocate space for base edesc and hw desc commands, link tables */
-+ /* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
- if (unlikely(!edesc)) {
- dev_err(qidev, "could not allocate extended descriptor\n");
-@@ -581,7 +1421,7 @@ static struct aead_edesc *aead_edesc_all
- if (likely(req->src == req->dst)) {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen +
-- (encrypt ? authsize : 0));
-+ (encrypt ? authsize : 0));
- if (unlikely(src_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->assoclen + req->cryptlen +
-@@ -597,6 +1437,7 @@ static struct aead_edesc *aead_edesc_all
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-+ dst = req->dst;
- } else {
- src_nents = sg_nents_for_len(req->src, req->assoclen +
- req->cryptlen);
-@@ -607,14 +1448,13 @@ static struct aead_edesc *aead_edesc_all
- return ERR_PTR(src_nents);
- }
-
-- dst_nents = sg_nents_for_len(req->dst, req->assoclen +
-- req->cryptlen +
-- (encrypt ? authsize :
-- (-authsize)));
-+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
-+ dst_nents = sg_nents_for_len(dst, req->cryptlen +
-+ (encrypt ? authsize : 0));
- if (unlikely(dst_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
-- req->assoclen + req->cryptlen +
-- (encrypt ? authsize : (-authsize)));
-+ req->cryptlen +
-+ (encrypt ? authsize : 0));
- qi_cache_free(edesc);
- return ERR_PTR(dst_nents);
- }
-@@ -631,7 +1471,7 @@ static struct aead_edesc *aead_edesc_all
- mapped_src_nents = 0;
- }
-
-- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
-+ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
-@@ -641,80 +1481,51 @@ static struct aead_edesc *aead_edesc_all
- }
- }
-
-- if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
-- ivsize = crypto_aead_ivsize(aead);
--
- /*
-- * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
-+ * Create S/G table: IV, src, dst.
- * Input is not contiguous.
- */
-- qm_sg_ents = 1 + !!ivsize + mapped_src_nents +
-+ qm_sg_ents = 1 + mapped_src_nents +
- (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
- sg_table = &edesc->sgt[0];
- qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
-- if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
-- CAAM_QI_MEMCACHE_SIZE)) {
-- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
-- qm_sg_ents, ivsize);
-- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
-- 0, 0, 0, 0);
+
-+ ivsize = crypto_aead_ivsize(aead);
-+ iv = (u8 *)(sg_table + qm_sg_ents);
-+ /* Make sure IV is located in a DMAable area */
-+ memcpy(iv, req->iv, ivsize);
-+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
-+ if (dma_mapping_error(qidev, iv_dma)) {
-+ dev_err(qidev, "unable to map IV\n");
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0,
-+ 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
-- if (ivsize) {
-- u8 *iv = (u8 *)(sg_table + qm_sg_ents);
--
-- /* Make sure IV is located in a DMAable area */
-- memcpy(iv, req->iv, ivsize);
--
-- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
-- if (dma_mapping_error(qidev, iv_dma)) {
-- dev_err(qidev, "unable to map IV\n");
-- caam_unmap(qidev, req->src, req->dst, src_nents,
-- dst_nents, 0, 0, 0, 0, 0);
-- qi_cache_free(edesc);
-- return ERR_PTR(-ENOMEM);
-- }
-- }
--
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
-+ edesc->dst = dst;
- edesc->iv_dma = iv_dma;
- edesc->drv_req.app_ctx = req;
-- edesc->drv_req.cbk = aead_done;
-+ edesc->drv_req.cbk = tls_done;
- edesc->drv_req.drv_ctx = drv_ctx;
-
-- edesc->assoclen = cpu_to_caam32(req->assoclen);
-- edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
-- DMA_TO_DEVICE);
-- if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
-- dev_err(qidev, "unable to map assoclen\n");
-- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-- iv_dma, ivsize, op_type, 0, 0);
-- qi_cache_free(edesc);
-- return ERR_PTR(-ENOMEM);
-- }
-+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
-+ qm_sg_index = 1;
-
-- dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
-- qm_sg_index++;
-- if (ivsize) {
-- dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
-- qm_sg_index++;
-- }
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
- qm_sg_index += mapped_src_nents;
-
- if (mapped_dst_nents > 1)
-- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
-+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
- qm_sg_index, 0);
-
- qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, qm_sg_dma)) {
- dev_err(qidev, "unable to map S/G table\n");
-- dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
-- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
-- iv_dma, ivsize, op_type, 0, 0);
-+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
-+ ivsize, op_type, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-@@ -722,35 +1533,29 @@ static struct aead_edesc *aead_edesc_all
- edesc->qm_sg_dma = qm_sg_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
-
-- out_len = req->assoclen + req->cryptlen +
-- (encrypt ? ctx->authsize : (-ctx->authsize));
-- in_len = 4 + ivsize + req->assoclen + req->cryptlen;
-+ out_len = req->cryptlen + (encrypt ? authsize : 0);
-+ in_len = ivsize + req->assoclen + req->cryptlen;
-
- fd_sgt = &edesc->drv_req.fd_sgt[0];
++ return ret;
++}
+
- dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
-
-- if (req->dst == req->src) {
-- if (mapped_src_nents == 1)
-- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
-- out_len, 0);
-- else
-- dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-- (1 + !!ivsize) * sizeof(*sg_table),
-- out_len, 0);
-- } else if (mapped_dst_nents == 1) {
-- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
-- 0);
-- } else {
-+ if (req->dst == req->src)
-+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
-+ (sg_nents_for_len(req->src, req->assoclen) +
-+ 1) * sizeof(*sg_table), out_len, 0);
-+ else if (mapped_dst_nents == 1)
-+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
-+ else
- dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
- qm_sg_index, out_len, 0);
-- }
-
- return edesc;
- }
-
--static inline int aead_crypt(struct aead_request *req, bool encrypt)
-+static int tls_crypt(struct aead_request *req, bool encrypt)
- {
-- struct aead_edesc *edesc;
-+ struct tls_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
- struct caam_ctx *ctx = crypto_aead_ctx(aead);
- int ret;
-@@ -758,31 +1563,29 @@ static inline int aead_crypt(struct aead
- if (unlikely(caam_congested))
- return -EAGAIN;
-
-- /* allocate extended descriptor */
-- edesc = aead_edesc_alloc(req, encrypt);
-+ edesc = tls_edesc_alloc(req, encrypt);
- if (IS_ERR_OR_NULL(edesc))
- return PTR_ERR(edesc);
-
-- /* Create and submit job descriptor */
- ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
-- aead_unmap(ctx->qidev, edesc, req);
-+ tls_unmap(ctx->qidev, edesc, req);
- qi_cache_free(edesc);
- }
-
- return ret;
- }
-
--static int aead_encrypt(struct aead_request *req)
+static int tls_encrypt(struct aead_request *req)
- {
-- return aead_crypt(req, true);
++{
+ return tls_crypt(req, true);
- }
-
--static int aead_decrypt(struct aead_request *req)
++}
++
+static int tls_decrypt(struct aead_request *req)
- {
-- return aead_crypt(req, false);
++{
+ return tls_crypt(req, false);
- }
-
++}
++
static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+ {
+ struct ablkcipher_edesc *edesc;
@@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph
qm_sg_ents = 1 + mapped_src_nents;
dst_sg_idx = qm_sg_ents;