crypto: caam - fix DMA API leaks for multiple setkey() calls
authorHoria Geantă <horia.geanta@nxp.com>
Fri, 10 Feb 2017 12:07:22 +0000 (14:07 +0200)
committerHerbert Xu <herbert@gondor.apana.org.au>
Wed, 15 Feb 2017 05:23:41 +0000 (13:23 +0800)
setkey() callback may be invoked multiple times for the same tfm.
In this case, DMA API leaks are caused by shared descriptors
(and key for caamalg) being mapped several times and unmapped only once.
Fix this by performing mapping / unmapping only in crypto algorithm's
cra_init() / cra_exit() callbacks and sync_for_device in the setkey()
tfm callback.

Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c

index 71d09e896d4831bbcfc8a1067fd5e76eb967afd3..9bc80eb069345c743748dc2c6fb5956d2cc592cb 100644 (file)
@@ -134,15 +134,15 @@ struct caam_aead_alg {
  * per-session context
  */
 struct caam_ctx {
-       struct device *jrdev;
        u32 sh_desc_enc[DESC_MAX_USED_LEN];
        u32 sh_desc_dec[DESC_MAX_USED_LEN];
        u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+       u8 key[CAAM_MAX_KEY_SIZE];
        dma_addr_t sh_desc_enc_dma;
        dma_addr_t sh_desc_dec_dma;
        dma_addr_t sh_desc_givenc_dma;
-       u8 key[CAAM_MAX_KEY_SIZE];
        dma_addr_t key_dma;
+       struct device *jrdev;
        struct alginfo adata;
        struct alginfo cdata;
        unsigned int authsize;
@@ -171,13 +171,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
        /* aead_encrypt shared descriptor */
        desc = ctx->sh_desc_enc;
        cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
-       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        /*
         * Job Descriptor and Shared Descriptors
@@ -194,13 +189,8 @@ static int aead_null_set_sh_desc(struct crypto_aead *aead)
        /* aead_decrypt shared descriptor */
        desc = ctx->sh_desc_dec;
        cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
-       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        return 0;
 }
@@ -278,13 +268,8 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
        desc = ctx->sh_desc_enc;
        cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ctx->authsize,
                               is_rfc3686, nonce, ctx1_iv_off);
-       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
 skip_enc:
        /*
@@ -315,13 +300,8 @@ skip_enc:
        cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
                               ctx->authsize, alg->caam.geniv, is_rfc3686,
                               nonce, ctx1_iv_off);
-       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        if (!alg->caam.geniv)
                goto skip_givenc;
@@ -354,13 +334,8 @@ skip_enc:
        cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
                                  ctx->authsize, is_rfc3686, nonce,
                                  ctx1_iv_off);
-       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
 skip_givenc:
        return 0;
@@ -403,13 +378,8 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
 
        desc = ctx->sh_desc_enc;
        cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ctx->authsize);
-       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        /*
         * Job Descriptor and Shared Descriptors
@@ -425,13 +395,8 @@ static int gcm_set_sh_desc(struct crypto_aead *aead)
 
        desc = ctx->sh_desc_dec;
        cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ctx->authsize);
-       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        return 0;
 }
@@ -472,13 +437,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 
        desc = ctx->sh_desc_enc;
        cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ctx->authsize);
-       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        /*
         * Job Descriptor and Shared Descriptors
@@ -494,13 +454,8 @@ static int rfc4106_set_sh_desc(struct crypto_aead *aead)
 
        desc = ctx->sh_desc_dec;
        cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ctx->authsize);
-       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        return 0;
 }
@@ -542,13 +497,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 
        desc = ctx->sh_desc_enc;
        cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ctx->authsize);
-       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        /*
         * Job Descriptor and Shared Descriptors
@@ -564,13 +514,8 @@ static int rfc4543_set_sh_desc(struct crypto_aead *aead)
 
        desc = ctx->sh_desc_dec;
        cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ctx->authsize);
-       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        return 0;
 }
@@ -614,28 +559,15 @@ static int aead_setkey(struct crypto_aead *aead,
 
        /* postpend encryption key to auth split key */
        memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-
-       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->adata.keylen_pad +
-                                     keys.enckeylen, DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->key_dma)) {
-               dev_err(jrdev, "unable to map key i/o memory\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
+                                  keys.enckeylen, DMA_TO_DEVICE);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
                       ctx->adata.keylen_pad + keys.enckeylen, 1);
 #endif
-
        ctx->cdata.keylen = keys.enckeylen;
-
-       ret = aead_set_sh_desc(aead);
-       if (ret) {
-               dma_unmap_single(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
-                                keys.enckeylen, DMA_TO_DEVICE);
-       }
-
-       return ret;
+       return aead_set_sh_desc(aead);
 badkey:
        crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
        return -EINVAL;
@@ -646,7 +578,6 @@ static int gcm_setkey(struct crypto_aead *aead,
 {
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
-       int ret = 0;
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -654,21 +585,10 @@ static int gcm_setkey(struct crypto_aead *aead,
 #endif
 
        memcpy(ctx->key, key, keylen);
-       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
-                                     DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->key_dma)) {
-               dev_err(jrdev, "unable to map key i/o memory\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
        ctx->cdata.keylen = keylen;
 
-       ret = gcm_set_sh_desc(aead);
-       if (ret) {
-               dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
-                                DMA_TO_DEVICE);
-       }
-
-       return ret;
+       return gcm_set_sh_desc(aead);
 }
 
 static int rfc4106_setkey(struct crypto_aead *aead,
@@ -676,7 +596,6 @@ static int rfc4106_setkey(struct crypto_aead *aead,
 {
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
-       int ret = 0;
 
        if (keylen < 4)
                return -EINVAL;
@@ -693,21 +612,9 @@ static int rfc4106_setkey(struct crypto_aead *aead,
         * in the nonce. Update the AES key length.
         */
        ctx->cdata.keylen = keylen - 4;
-
-       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
-                                     DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->key_dma)) {
-               dev_err(jrdev, "unable to map key i/o memory\n");
-               return -ENOMEM;
-       }
-
-       ret = rfc4106_set_sh_desc(aead);
-       if (ret) {
-               dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
-                                DMA_TO_DEVICE);
-       }
-
-       return ret;
+       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+                                  DMA_TO_DEVICE);
+       return rfc4106_set_sh_desc(aead);
 }
 
 static int rfc4543_setkey(struct crypto_aead *aead,
@@ -715,7 +622,6 @@ static int rfc4543_setkey(struct crypto_aead *aead,
 {
        struct caam_ctx *ctx = crypto_aead_ctx(aead);
        struct device *jrdev = ctx->jrdev;
-       int ret = 0;
 
        if (keylen < 4)
                return -EINVAL;
@@ -732,21 +638,9 @@ static int rfc4543_setkey(struct crypto_aead *aead,
         * in the nonce. Update the AES key length.
         */
        ctx->cdata.keylen = keylen - 4;
-
-       ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->cdata.keylen,
-                                     DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->key_dma)) {
-               dev_err(jrdev, "unable to map key i/o memory\n");
-               return -ENOMEM;
-       }
-
-       ret = rfc4543_set_sh_desc(aead);
-       if (ret) {
-               dma_unmap_single(jrdev, ctx->key_dma, ctx->cdata.keylen,
-                                DMA_TO_DEVICE);
-       }
-
-       return ret;
+       dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen,
+                                  DMA_TO_DEVICE);
+       return rfc4543_set_sh_desc(aead);
 }
 
 static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
@@ -787,12 +681,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
                keylen -= CTR_RFC3686_NONCE_SIZE;
        }
 
-       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
-                                     DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->key_dma)) {
-               dev_err(jrdev, "unable to map key i/o memory\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
        ctx->cdata.keylen = keylen;
        ctx->cdata.key_virt = ctx->key;
        ctx->cdata.key_inline = true;
@@ -801,37 +690,22 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        desc = ctx->sh_desc_enc;
        cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
                                     ctx1_iv_off);
-       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        /* ablkcipher_decrypt shared descriptor */
        desc = ctx->sh_desc_dec;
        cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
                                     ctx1_iv_off);
-       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
-                                             desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        /* ablkcipher_givencrypt shared descriptor */
        desc = ctx->sh_desc_givenc;
        cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
                                        ctx1_iv_off);
-       ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
-                                                desc_bytes(desc),
-                                                DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        return 0;
 }
@@ -851,11 +725,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        }
 
        memcpy(ctx->key, key, keylen);
-       ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen, DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->key_dma)) {
-               dev_err(jrdev, "unable to map key i/o memory\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
        ctx->cdata.keylen = keylen;
        ctx->cdata.key_virt = ctx->key;
        ctx->cdata.key_inline = true;
@@ -863,24 +733,14 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        /* xts_ablkcipher_encrypt shared descriptor */
        desc = ctx->sh_desc_enc;
        cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
-       ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        /* xts_ablkcipher_decrypt shared descriptor */
        desc = ctx->sh_desc_dec;
        cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
-       ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
-               dma_unmap_single(jrdev, ctx->sh_desc_enc_dma,
-                                desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 
        return 0;
 }
@@ -3391,12 +3251,31 @@ struct caam_crypto_alg {
 
 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
 {
+       dma_addr_t dma_addr;
+
        ctx->jrdev = caam_jr_alloc();
        if (IS_ERR(ctx->jrdev)) {
                pr_err("Job Ring Device allocation for transform failed\n");
                return PTR_ERR(ctx->jrdev);
        }
 
+       dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc,
+                                       offsetof(struct caam_ctx,
+                                                sh_desc_enc_dma),
+                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+               dev_err(ctx->jrdev, "unable to map key, shared descriptors\n");
+               caam_jr_free(ctx->jrdev);
+               return -ENOMEM;
+       }
+
+       ctx->sh_desc_enc_dma = dma_addr;
+       ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
+                                                  sh_desc_dec);
+       ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
+                                                     sh_desc_givenc);
+       ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
+
        /* copy descriptor header template value */
        ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
        ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
@@ -3426,25 +3305,9 @@ static int caam_aead_init(struct crypto_aead *tfm)
 
 static void caam_exit_common(struct caam_ctx *ctx)
 {
-       if (ctx->sh_desc_enc_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
-                                desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
-       if (ctx->sh_desc_dec_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
-                                desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
-       if (ctx->sh_desc_givenc_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
-                                desc_bytes(ctx->sh_desc_givenc),
-                                DMA_TO_DEVICE);
-       if (ctx->key_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->key_dma))
-               dma_unmap_single(ctx->jrdev, ctx->key_dma,
-                                ctx->cdata.keylen + ctx->adata.keylen_pad,
-                                DMA_TO_DEVICE);
-
+       dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma,
+                              offsetof(struct caam_ctx, sh_desc_enc_dma),
+                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
        caam_jr_free(ctx->jrdev);
 }
 
index 117bbd8c08d41c9cf76d471efd6f406ee80874ea..2ad83a8dc0feb5c25d0cd3260f234bfd9d6da31b 100644 (file)
@@ -276,12 +276,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
        /* ahash_update shared descriptor */
        desc = ctx->sh_desc_update;
        ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
-       ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
-                                                DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ahash update shdesc@"__stringify(__LINE__)": ",
@@ -291,13 +287,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
        /* ahash_update_first shared descriptor */
        desc = ctx->sh_desc_update_first;
        ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
-       ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc,
-                                                      desc_bytes(desc),
-                                                      DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ahash update first shdesc@"__stringify(__LINE__)": ",
@@ -307,12 +298,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
        /* ahash_final shared descriptor */
        desc = ctx->sh_desc_fin;
        ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
-       ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
-                                             DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "ahash final shdesc@"__stringify(__LINE__)": ",
                       DUMP_PREFIX_ADDRESS, 16, 4, desc,
@@ -322,13 +309,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
        /* ahash_digest shared descriptor */
        desc = ctx->sh_desc_digest;
        ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
-       ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc,
-                                                desc_bytes(desc),
-                                                DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) {
-               dev_err(jrdev, "unable to map shared descriptor\n");
-               return -ENOMEM;
-       }
+       dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+                                  desc_bytes(desc), DMA_TO_DEVICE);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR,
                       "ahash digest shdesc@"__stringify(__LINE__)": ",
@@ -1716,6 +1698,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
                                         HASH_MSG_LEN + SHA256_DIGEST_SIZE,
                                         HASH_MSG_LEN + 64,
                                         HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+       dma_addr_t dma_addr;
 
        /*
         * Get a Job ring from Job Ring driver to ensure in-order
@@ -1726,6 +1709,26 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
                pr_err("Job Ring Device allocation for transform failed\n");
                return PTR_ERR(ctx->jrdev);
        }
+
+       dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
+                                       offsetof(struct caam_hash_ctx,
+                                                sh_desc_update_dma),
+                                       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+       if (dma_mapping_error(ctx->jrdev, dma_addr)) {
+               dev_err(ctx->jrdev, "unable to map shared descriptors\n");
+               caam_jr_free(ctx->jrdev);
+               return -ENOMEM;
+       }
+
+       ctx->sh_desc_update_dma = dma_addr;
+       ctx->sh_desc_update_first_dma = dma_addr +
+                                       offsetof(struct caam_hash_ctx,
+                                                sh_desc_update_first);
+       ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx,
+                                                  sh_desc_fin);
+       ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
+                                                     sh_desc_digest);
+
        /* copy descriptor header template value */
        ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
 
@@ -1742,26 +1745,10 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
 {
        struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       if (ctx->sh_desc_update_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma,
-                                desc_bytes(ctx->sh_desc_update),
-                                DMA_TO_DEVICE);
-       if (ctx->sh_desc_update_first_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma,
-                                desc_bytes(ctx->sh_desc_update_first),
-                                DMA_TO_DEVICE);
-       if (ctx->sh_desc_fin_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma,
-                                desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE);
-       if (ctx->sh_desc_digest_dma &&
-           !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma))
-               dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma,
-                                desc_bytes(ctx->sh_desc_digest),
-                                DMA_TO_DEVICE);
-
+       dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
+                              offsetof(struct caam_hash_ctx,
+                                       sh_desc_update_dma),
+                              DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
        caam_jr_free(ctx->jrdev);
 }