crypto: ccree - don't map AEAD key and IV on stack
authorGilad Ben-Yossef <gilad@benyossef.com>
Thu, 18 Apr 2019 13:39:05 +0000 (16:39 +0300)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 25 Apr 2019 07:38:16 +0000 (15:38 +0800)
The AEAD authenc key and IVs might be passed to us on stack. Copy it to
a slab buffer before mapping to gurantee proper DMA mapping.

Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Cc: stable@vger.kernel.org # v4.19+
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/ccree/cc_aead.c
drivers/crypto/ccree/cc_buffer_mgr.c
drivers/crypto/ccree/cc_driver.h

index a49814d297147ff4f1b09d79fafec561da484ea5..7aa4cbe19a86f523ca390d0943b75a8c27e36876 100644 (file)
@@ -424,7 +424,7 @@ static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 /* This function prepers the user key so it can pass to the hmac processing
  * (copy to intenral buffer or hash in case of key longer than block
  */
-static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
+static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
                                 unsigned int keylen)
 {
        dma_addr_t key_dma_addr = 0;
@@ -437,6 +437,7 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
        unsigned int hashmode;
        unsigned int idx = 0;
        int rc = 0;
+       u8 *key = NULL;
        struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
        dma_addr_t padded_authkey_dma_addr =
                ctx->auth_state.hmac.padded_authkey_dma_addr;
@@ -455,11 +456,17 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
        }
 
        if (keylen != 0) {
+
+               key = kmemdup(authkey, keylen, GFP_KERNEL);
+               if (!key)
+                       return -ENOMEM;
+
                key_dma_addr = dma_map_single(dev, (void *)key, keylen,
                                              DMA_TO_DEVICE);
                if (dma_mapping_error(dev, key_dma_addr)) {
                        dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
                                key, keylen);
+                       kzfree(key);
                        return -ENOMEM;
                }
                if (keylen > blocksize) {
@@ -542,6 +549,8 @@ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
        if (key_dma_addr)
                dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 
+       kzfree(key);
+
        return rc;
 }
 
index 09dceec7d82868c5bb080449a0c982ad660df330..c81ad33f91159c59b11ca9c33a482be60ea818e0 100644 (file)
@@ -557,6 +557,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
        if (areq_ctx->gen_ctx.iv_dma_addr) {
                dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr,
                                 hw_iv_size, DMA_BIDIRECTIONAL);
+               kzfree(areq_ctx->gen_ctx.iv);
        }
 
        /* Release pool */
@@ -607,19 +608,27 @@ static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
        struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
        unsigned int hw_iv_size = areq_ctx->hw_iv_size;
        struct device *dev = drvdata_to_dev(drvdata);
+       gfp_t flags = cc_gfp_flags(&req->base);
        int rc = 0;
 
        if (!req->iv) {
                areq_ctx->gen_ctx.iv_dma_addr = 0;
+               areq_ctx->gen_ctx.iv = NULL;
                goto chain_iv_exit;
        }
 
-       areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, req->iv,
-                                                      hw_iv_size,
-                                                      DMA_BIDIRECTIONAL);
+       areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags);
+       if (!areq_ctx->gen_ctx.iv)
+               return -ENOMEM;
+
+       areq_ctx->gen_ctx.iv_dma_addr =
+               dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size,
+                              DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) {
                dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n",
                        hw_iv_size, req->iv);
+               kzfree(areq_ctx->gen_ctx.iv);
+               areq_ctx->gen_ctx.iv = NULL;
                rc = -ENOMEM;
                goto chain_iv_exit;
        }
index 695ccbd52ae4b051c088684e3cb5fa1a4567b013..b76181335c08afa01ba4b15ba00eb17421eb771b 100644 (file)
@@ -199,6 +199,7 @@ struct cc_alg_template {
 
 struct async_gen_req_ctx {
        dma_addr_t iv_dma_addr;
+       u8 *iv;
        enum drv_crypto_direction op_type;
 };