-From 9c9579d76ccd6e738ab98c9b4c73c168912cdb8a Mon Sep 17 00:00:00 2001
+From a3310d64d7cb1ba0f9279e77d21f13a75fa66ab5 Mon Sep 17 00:00:00 2001
From: Yangbo Lu <yangbo.lu@nxp.com>
-Date: Wed, 27 Sep 2017 15:02:01 +0800
-Subject: [PATCH] crypto: support layerscape
+Date: Wed, 17 Jan 2018 15:29:23 +0800
+Subject: [PATCH 16/30] crypto: support layerscape
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
-This is a integrated patch for layerscape sec support.
+This is an integrated patch for layerscape sec support.
Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
Signed-off-by: Fabio Estevam <festevam@gmail.com>
crypto/crypto_user.c | 19 +
crypto/scompress.c | 356 ++
crypto/tcrypt.c | 17 +-
- crypto/testmgr.c | 1701 ++++----
- crypto/testmgr.h | 1125 +++---
+ crypto/testmgr.c | 1708 +++---
+ crypto/testmgr.h | 1125 ++--
crypto/tls.c | 607 +++
- drivers/crypto/caam/Kconfig | 72 +-
- drivers/crypto/caam/Makefile | 15 +-
- drivers/crypto/caam/caamalg.c | 2125 +++-------
- drivers/crypto/caam/caamalg_desc.c | 1913 +++++++++
+ drivers/crypto/caam/Kconfig | 77 +-
+ drivers/crypto/caam/Makefile | 16 +-
+ drivers/crypto/caam/caamalg.c | 2171 ++------
+ drivers/crypto/caam/caamalg_desc.c | 1961 +++++++
drivers/crypto/caam/caamalg_desc.h | 127 +
- drivers/crypto/caam/caamalg_qi.c | 2877 +++++++++++++
- drivers/crypto/caam/caamalg_qi2.c | 4428 +++++++++++++++++++++
- drivers/crypto/caam/caamalg_qi2.h | 265 ++
- drivers/crypto/caam/caamhash.c | 521 +--
- drivers/crypto/caam/caampkc.c | 471 ++-
+ drivers/crypto/caam/caamalg_qi.c | 2929 ++++++++++
+ drivers/crypto/caam/caamalg_qi2.c | 5920 +++++++++++++++++++++
+ drivers/crypto/caam/caamalg_qi2.h | 281 +
+ drivers/crypto/caam/caamhash.c | 550 +-
+ drivers/crypto/caam/caamhash_desc.c | 108 +
+ drivers/crypto/caam/caamhash_desc.h | 49 +
+ drivers/crypto/caam/caampkc.c | 471 +-
drivers/crypto/caam/caampkc.h | 58 +
drivers/crypto/caam/caamrng.c | 16 +-
drivers/crypto/caam/compat.h | 1 +
- drivers/crypto/caam/ctrl.c | 356 +-
+ drivers/crypto/caam/ctrl.c | 358 +-
drivers/crypto/caam/ctrl.h | 2 +
- drivers/crypto/caam/desc.h | 55 +-
- drivers/crypto/caam/desc_constr.h | 139 +-
- drivers/crypto/caam/dpseci.c | 859 ++++
+ drivers/crypto/caam/desc.h | 84 +-
+ drivers/crypto/caam/desc_constr.h | 180 +-
+ drivers/crypto/caam/dpseci.c | 859 +++
drivers/crypto/caam/dpseci.h | 395 ++
- drivers/crypto/caam/dpseci_cmd.h | 261 ++
+ drivers/crypto/caam/dpseci_cmd.h | 261 +
drivers/crypto/caam/error.c | 127 +-
drivers/crypto/caam/error.h | 10 +-
drivers/crypto/caam/intern.h | 31 +-
- drivers/crypto/caam/jr.c | 97 +-
+ drivers/crypto/caam/jr.c | 72 +-
drivers/crypto/caam/jr.h | 2 +
drivers/crypto/caam/key_gen.c | 32 +-
drivers/crypto/caam/key_gen.h | 36 +-
drivers/crypto/caam/pdb.h | 62 +
drivers/crypto/caam/pkc_desc.c | 36 +
- drivers/crypto/caam/qi.c | 797 ++++
+ drivers/crypto/caam/qi.c | 797 +++
drivers/crypto/caam/qi.h | 204 +
drivers/crypto/caam/regs.h | 63 +-
drivers/crypto/caam/sg_sw_qm.h | 126 +
drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +-
drivers/staging/wilc1000/linux_wlan.c | 2 +-
drivers/staging/wilc1000/wilc_wfi_cfgoperations.c | 2 +-
- include/crypto/acompress.h | 269 ++
+ include/crypto/acompress.h | 269 +
include/crypto/internal/acompress.h | 81 +
include/crypto/internal/scompress.h | 136 +
include/linux/crypto.h | 3 +
include/uapi/linux/cryptouser.h | 5 +
scripts/spelling.txt | 3 +
sound/soc/amd/acp-pcm-dma.c | 2 +-
- 55 files changed, 17310 insertions(+), 3955 deletions(-)
+ 57 files changed, 19177 insertions(+), 3988 deletions(-)
create mode 100644 crypto/acompress.c
create mode 100644 crypto/scompress.c
create mode 100644 crypto/tls.c
create mode 100644 drivers/crypto/caam/caamalg_qi.c
create mode 100644 drivers/crypto/caam/caamalg_qi2.c
create mode 100644 drivers/crypto/caam/caamalg_qi2.h
+ create mode 100644 drivers/crypto/caam/caamhash_desc.c
+ create mode 100644 drivers/crypto/caam/caamhash_desc.h
create mode 100644 drivers/crypto/caam/dpseci.c
create mode 100644 drivers/crypto/caam/dpseci.h
create mode 100644 drivers/crypto/caam/dpseci_cmd.h
const bool diff_dst, const int align_offset)
{
const char *algo =
-@@ -1330,7 +1571,8 @@ out_nobuf:
+@@ -1079,12 +1320,16 @@ static int __test_skcipher(struct crypto
+ const char *e, *d;
+ struct tcrypt_result result;
+ void *data;
+- char iv[MAX_IVLEN];
++ char *iv;
+ char *xbuf[XBUFSIZE];
+ char *xoutbuf[XBUFSIZE];
+ int ret = -ENOMEM;
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+
++ iv = kmalloc(MAX_IVLEN, GFP_KERNEL);
++ if (!iv)
++ return ret;
++
+ if (testmgr_alloc_buf(xbuf))
+ goto out_nobuf;
+
+@@ -1325,12 +1570,14 @@ out:
+ testmgr_free_buf(xoutbuf);
+ out_nooutbuf:
+ testmgr_free_buf(xbuf);
++ kfree(iv);
+ out_nobuf:
+ return ret;
}
static int test_skcipher(struct crypto_skcipher *tfm, int enc,
{
unsigned int alignmask;
int ret;
-@@ -1362,8 +1604,10 @@ static int test_skcipher(struct crypto_s
+@@ -1362,8 +1609,10 @@ static int test_skcipher(struct crypto_s
return 0;
}
{
const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
unsigned int i;
-@@ -1442,7 +1686,154 @@ out:
+@@ -1442,7 +1691,154 @@ out:
return ret;
}
unsigned int tcount)
{
const char *algo = crypto_tfm_alg_driver_name(crypto_rng_tfm(tfm));
-@@ -1509,7 +1900,7 @@ static int alg_test_aead(const struct al
+@@ -1509,7 +1905,7 @@ static int alg_test_aead(const struct al
struct crypto_aead *tfm;
int err = 0;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: aead: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
-@@ -1538,7 +1929,7 @@ static int alg_test_cipher(const struct
+@@ -1538,7 +1934,7 @@ static int alg_test_cipher(const struct
struct crypto_cipher *tfm;
int err = 0;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: cipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
-@@ -1567,7 +1958,7 @@ static int alg_test_skcipher(const struc
+@@ -1567,7 +1963,7 @@ static int alg_test_skcipher(const struc
struct crypto_skcipher *tfm;
int err = 0;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: skcipher: Failed to load transform for "
"%s: %ld\n", driver, PTR_ERR(tfm));
-@@ -1593,22 +1984,38 @@ out:
+@@ -1593,22 +1989,38 @@ out:
static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
u32 type, u32 mask)
{
return err;
}
-@@ -1618,7 +2025,7 @@ static int alg_test_hash(const struct al
+@@ -1618,7 +2030,7 @@ static int alg_test_hash(const struct al
struct crypto_ahash *tfm;
int err;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: hash: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
-@@ -1646,7 +2053,7 @@ static int alg_test_crc32c(const struct
+@@ -1646,7 +2058,7 @@ static int alg_test_crc32c(const struct
if (err)
goto out;
if (IS_ERR(tfm)) {
printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(tfm));
-@@ -1688,7 +2095,7 @@ static int alg_test_cprng(const struct a
+@@ -1688,7 +2100,7 @@ static int alg_test_cprng(const struct a
struct crypto_rng *rng;
int err;
if (IS_ERR(rng)) {
printk(KERN_ERR "alg: cprng: Failed to load transform for %s: "
"%ld\n", driver, PTR_ERR(rng));
-@@ -1703,7 +2110,7 @@ static int alg_test_cprng(const struct a
+@@ -1703,7 +2115,7 @@ static int alg_test_cprng(const struct a
}
const char *driver, u32 type, u32 mask)
{
int ret = -EAGAIN;
-@@ -1715,7 +2122,7 @@ static int drbg_cavs_test(struct drbg_te
+@@ -1715,7 +2127,7 @@ static int drbg_cavs_test(struct drbg_te
if (!buf)
return -ENOMEM;
if (IS_ERR(drng)) {
printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
"%s\n", driver);
-@@ -1777,7 +2184,7 @@ static int alg_test_drbg(const struct al
+@@ -1777,7 +2189,7 @@ static int alg_test_drbg(const struct al
int err = 0;
int pr = 0;
int i = 0;
unsigned int tcount = desc->suite.drbg.count;
if (0 == memcmp(driver, "drbg_pr_", 8))
-@@ -1796,7 +2203,7 @@ static int alg_test_drbg(const struct al
+@@ -1796,7 +2208,7 @@ static int alg_test_drbg(const struct al
}
const char *alg)
{
struct kpp_request *req;
-@@ -1888,7 +2295,7 @@ free_req:
+@@ -1888,7 +2300,7 @@ free_req:
}
static int test_kpp(struct crypto_kpp *tfm, const char *alg,
{
int ret, i;
-@@ -1909,7 +2316,7 @@ static int alg_test_kpp(const struct alg
+@@ -1909,7 +2321,7 @@ static int alg_test_kpp(const struct alg
struct crypto_kpp *tfm;
int err = 0;
if (IS_ERR(tfm)) {
pr_err("alg: kpp: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
-@@ -1924,7 +2331,7 @@ static int alg_test_kpp(const struct alg
+@@ -1924,7 +2336,7 @@ static int alg_test_kpp(const struct alg
}
static int test_akcipher_one(struct crypto_akcipher *tfm,
{
char *xbuf[XBUFSIZE];
struct akcipher_request *req;
-@@ -2044,7 +2451,8 @@ free_xbuf:
+@@ -2044,7 +2456,8 @@ free_xbuf:
}
static int test_akcipher(struct crypto_akcipher *tfm, const char *alg,
{
const char *algo =
crypto_tfm_alg_driver_name(crypto_akcipher_tfm(tfm));
-@@ -2068,7 +2476,7 @@ static int alg_test_akcipher(const struc
+@@ -2068,7 +2481,7 @@ static int alg_test_akcipher(const struc
struct crypto_akcipher *tfm;
int err = 0;
if (IS_ERR(tfm)) {
pr_err("alg: akcipher: Failed to load tfm for %s: %ld\n",
driver, PTR_ERR(tfm));
-@@ -2088,112 +2496,23 @@ static int alg_test_null(const struct al
+@@ -2088,112 +2501,23 @@ static int alg_test_null(const struct al
return 0;
}
}
}
}, {
-@@ -2201,12 +2520,7 @@ static const struct alg_test_desc alg_te
+@@ -2201,12 +2525,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2214,12 +2528,7 @@ static const struct alg_test_desc alg_te
+@@ -2214,12 +2533,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2228,12 +2537,7 @@ static const struct alg_test_desc alg_te
+@@ -2228,12 +2542,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2245,18 +2549,8 @@ static const struct alg_test_desc alg_te
+@@ -2245,18 +2554,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2268,12 +2562,7 @@ static const struct alg_test_desc alg_te
+@@ -2268,12 +2567,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2282,12 +2571,7 @@ static const struct alg_test_desc alg_te
+@@ -2282,12 +2576,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2296,12 +2580,7 @@ static const struct alg_test_desc alg_te
+@@ -2296,12 +2585,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2309,12 +2588,7 @@ static const struct alg_test_desc alg_te
+@@ -2309,12 +2593,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2323,12 +2597,7 @@ static const struct alg_test_desc alg_te
+@@ -2323,12 +2602,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2344,12 +2613,7 @@ static const struct alg_test_desc alg_te
+@@ -2344,12 +2618,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2358,12 +2622,7 @@ static const struct alg_test_desc alg_te
+@@ -2358,12 +2627,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2380,12 +2639,7 @@ static const struct alg_test_desc alg_te
+@@ -2380,12 +2644,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2393,12 +2647,7 @@ static const struct alg_test_desc alg_te
+@@ -2393,12 +2652,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -2407,12 +2656,7 @@ static const struct alg_test_desc alg_te
+@@ -2407,12 +2661,7 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -2429,14 +2673,8 @@ static const struct alg_test_desc alg_te
+@@ -2429,14 +2678,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -2444,14 +2682,8 @@ static const struct alg_test_desc alg_te
+@@ -2444,14 +2687,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2459,14 +2691,8 @@ static const struct alg_test_desc alg_te
+@@ -2459,14 +2696,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2474,14 +2700,8 @@ static const struct alg_test_desc alg_te
+@@ -2474,14 +2705,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2489,14 +2709,8 @@ static const struct alg_test_desc alg_te
+@@ -2489,14 +2714,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2504,14 +2718,8 @@ static const struct alg_test_desc alg_te
+@@ -2504,14 +2723,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2519,14 +2727,8 @@ static const struct alg_test_desc alg_te
+@@ -2519,14 +2732,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2535,14 +2737,8 @@ static const struct alg_test_desc alg_te
+@@ -2535,14 +2742,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -2550,14 +2746,8 @@ static const struct alg_test_desc alg_te
+@@ -2550,14 +2751,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2565,30 +2755,25 @@ static const struct alg_test_desc alg_te
+@@ -2565,30 +2760,25 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2596,14 +2781,8 @@ static const struct alg_test_desc alg_te
+@@ -2596,14 +2786,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2611,20 +2790,14 @@ static const struct alg_test_desc alg_te
+@@ -2611,20 +2795,14 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.test = alg_test_hash,
.suite = {
}
}, {
.alg = "compress_null",
-@@ -2633,94 +2806,30 @@ static const struct alg_test_desc alg_te
+@@ -2633,94 +2811,30 @@ static const struct alg_test_desc alg_te
.alg = "crc32",
.test = alg_test_hash,
.suite = {
}
}
}, {
-@@ -2728,14 +2837,8 @@ static const struct alg_test_desc alg_te
+@@ -2728,14 +2842,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2743,14 +2846,8 @@ static const struct alg_test_desc alg_te
+@@ -2743,14 +2851,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2758,14 +2855,8 @@ static const struct alg_test_desc alg_te
+@@ -2758,14 +2860,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2773,14 +2864,8 @@ static const struct alg_test_desc alg_te
+@@ -2773,14 +2869,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2788,29 +2873,18 @@ static const struct alg_test_desc alg_te
+@@ -2788,29 +2878,18 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2818,14 +2892,8 @@ static const struct alg_test_desc alg_te
+@@ -2818,14 +2897,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2833,14 +2901,8 @@ static const struct alg_test_desc alg_te
+@@ -2833,14 +2906,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2848,14 +2910,8 @@ static const struct alg_test_desc alg_te
+@@ -2848,14 +2915,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -2864,14 +2920,8 @@ static const struct alg_test_desc alg_te
+@@ -2864,14 +2925,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.comp = {
}
}
}, {
-@@ -2879,10 +2929,7 @@ static const struct alg_test_desc alg_te
+@@ -2879,10 +2934,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
}
}, {
.alg = "digest_null",
-@@ -2892,30 +2939,21 @@ static const struct alg_test_desc alg_te
+@@ -2892,30 +2944,21 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/*
-@@ -2930,11 +2968,7 @@ static const struct alg_test_desc alg_te
+@@ -2930,11 +2973,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_nopr_hmac_sha256 test */
-@@ -2954,10 +2988,7 @@ static const struct alg_test_desc alg_te
+@@ -2954,10 +2993,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_nopr_sha256 test */
-@@ -2973,10 +3004,7 @@ static const struct alg_test_desc alg_te
+@@ -2973,10 +3009,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_pr_ctr_aes128 test */
-@@ -2996,10 +3024,7 @@ static const struct alg_test_desc alg_te
+@@ -2996,10 +3029,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_pr_hmac_sha256 test */
-@@ -3019,10 +3044,7 @@ static const struct alg_test_desc alg_te
+@@ -3019,10 +3049,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_drbg,
.fips_allowed = 1,
.suite = {
}
}, {
/* covered by drbg_pr_sha256 test */
-@@ -3034,23 +3056,13 @@ static const struct alg_test_desc alg_te
+@@ -3034,23 +3061,13 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.test = alg_test_null,
}, {
}
}
}, {
-@@ -3058,14 +3070,8 @@ static const struct alg_test_desc alg_te
+@@ -3058,14 +3075,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3073,14 +3079,8 @@ static const struct alg_test_desc alg_te
+@@ -3073,14 +3084,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3088,14 +3088,8 @@ static const struct alg_test_desc alg_te
+@@ -3088,14 +3093,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3103,14 +3097,8 @@ static const struct alg_test_desc alg_te
+@@ -3103,14 +3102,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3118,14 +3106,8 @@ static const struct alg_test_desc alg_te
+@@ -3118,14 +3111,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3133,14 +3115,8 @@ static const struct alg_test_desc alg_te
+@@ -3133,14 +3120,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3151,14 +3127,8 @@ static const struct alg_test_desc alg_te
+@@ -3151,14 +3132,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3167,14 +3137,8 @@ static const struct alg_test_desc alg_te
+@@ -3167,14 +3142,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -3197,14 +3161,8 @@ static const struct alg_test_desc alg_te
+@@ -3197,14 +3166,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3212,14 +3170,8 @@ static const struct alg_test_desc alg_te
+@@ -3212,14 +3175,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3227,14 +3179,8 @@ static const struct alg_test_desc alg_te
+@@ -3227,14 +3184,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3242,14 +3188,8 @@ static const struct alg_test_desc alg_te
+@@ -3242,14 +3193,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3257,14 +3197,8 @@ static const struct alg_test_desc alg_te
+@@ -3257,14 +3202,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3272,14 +3206,8 @@ static const struct alg_test_desc alg_te
+@@ -3272,14 +3211,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3287,14 +3215,8 @@ static const struct alg_test_desc alg_te
+@@ -3287,14 +3220,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3302,14 +3224,8 @@ static const struct alg_test_desc alg_te
+@@ -3302,14 +3229,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3317,10 +3233,7 @@ static const struct alg_test_desc alg_te
+@@ -3317,10 +3238,7 @@ static const struct alg_test_desc alg_te
.test = alg_test_kpp,
.fips_allowed = 1,
.suite = {
}
}, {
.alg = "gcm(aes)",
-@@ -3328,14 +3241,8 @@ static const struct alg_test_desc alg_te
+@@ -3328,14 +3246,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -3343,136 +3250,94 @@ static const struct alg_test_desc alg_te
+@@ -3343,136 +3255,94 @@ static const struct alg_test_desc alg_te
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
}
}, {
.alg = "jitterentropy_rng",
-@@ -3484,14 +3349,8 @@ static const struct alg_test_desc alg_te
+@@ -3484,14 +3354,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -3499,14 +3358,8 @@ static const struct alg_test_desc alg_te
+@@ -3499,14 +3363,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3514,14 +3367,8 @@ static const struct alg_test_desc alg_te
+@@ -3514,14 +3372,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3529,14 +3376,8 @@ static const struct alg_test_desc alg_te
+@@ -3529,14 +3381,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3544,14 +3385,8 @@ static const struct alg_test_desc alg_te
+@@ -3544,14 +3390,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3559,14 +3394,8 @@ static const struct alg_test_desc alg_te
+@@ -3559,14 +3399,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -3575,14 +3404,8 @@ static const struct alg_test_desc alg_te
+@@ -3575,14 +3409,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.comp = {
}
}
}, {
-@@ -3591,14 +3414,8 @@ static const struct alg_test_desc alg_te
+@@ -3591,14 +3419,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.comp = {
}
}
}, {
-@@ -3607,42 +3424,27 @@ static const struct alg_test_desc alg_te
+@@ -3607,42 +3429,27 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.comp = {
}
}, {
.alg = "ofb(aes)",
-@@ -3650,14 +3452,8 @@ static const struct alg_test_desc alg_te
+@@ -3650,14 +3457,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -3665,24 +3461,15 @@ static const struct alg_test_desc alg_te
+@@ -3665,24 +3466,15 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}, {
.alg = "rfc3686(ctr(aes))",
-@@ -3690,14 +3477,8 @@ static const struct alg_test_desc alg_te
+@@ -3690,14 +3482,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -3706,14 +3487,8 @@ static const struct alg_test_desc alg_te
+@@ -3706,14 +3492,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -3722,14 +3497,8 @@ static const struct alg_test_desc alg_te
+@@ -3722,14 +3502,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.aead = {
}
}
}, {
-@@ -3737,14 +3506,8 @@ static const struct alg_test_desc alg_te
+@@ -3737,14 +3511,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -3752,14 +3515,8 @@ static const struct alg_test_desc alg_te
+@@ -3752,14 +3520,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -3767,71 +3524,47 @@ static const struct alg_test_desc alg_te
+@@ -3767,71 +3529,47 @@ static const struct alg_test_desc alg_te
.test = alg_test_aead,
.suite = {
.aead = {
}
}
}, {
-@@ -3839,162 +3572,120 @@ static const struct alg_test_desc alg_te
+@@ -3839,162 +3577,120 @@ static const struct alg_test_desc alg_te
.test = alg_test_hash,
.fips_allowed = 1,
.suite = {
}
}, {
.alg = "xts(aes)",
-@@ -4002,14 +3693,8 @@ static const struct alg_test_desc alg_te
+@@ -4002,14 +3698,8 @@ static const struct alg_test_desc alg_te
.fips_allowed = 1,
.suite = {
.cipher = {
}
}
}, {
-@@ -4017,14 +3702,8 @@ static const struct alg_test_desc alg_te
+@@ -4017,14 +3707,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -4032,14 +3711,8 @@ static const struct alg_test_desc alg_te
+@@ -4032,14 +3716,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -4047,14 +3720,8 @@ static const struct alg_test_desc alg_te
+@@ -4047,14 +3725,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
}
}
}, {
-@@ -4062,14 +3729,8 @@ static const struct alg_test_desc alg_te
+@@ -4062,14 +3734,8 @@ static const struct alg_test_desc alg_te
.test = alg_test_skcipher,
.suite = {
.cipher = {
default y
select CRYPTO_RNG
select HW_RANDOM
-@@ -124,13 +149,26 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
+@@ -124,13 +149,31 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
++ select CRYPTO_HASH
+ ---help---
+ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+ It handles DPSECI DPAA2 objects that sit on the Management Complex
+ def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
++
++config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
++ def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
++ CRYPTO_DEV_FSL_DPAA2_CAAM)
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
-@@ -5,13 +5,26 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
+@@ -5,13 +5,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG
ccflags-y := -DDEBUG
endif
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
++obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
bool rfc3686;
bool geniv;
};
-@@ -163,302 +96,67 @@ struct caam_aead_alg {
+@@ -163,302 +96,70 @@ struct caam_aead_alg {
bool registered;
};
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN -
+ ctx->adata.keylen_pad;
- DUMP_PREFIX_ADDRESS, 16, 4, desc,
- desc_bytes(desc), 1);
-#endif
-+ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize);
++ cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize,
++ ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
- desc_bytes(desc), 1);
-#endif
+ desc = ctx->sh_desc_dec;
-+ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize);
++ cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize,
++ ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
return 0;
}
-@@ -470,11 +168,11 @@ static int aead_set_sh_desc(struct crypt
+@@ -470,11 +171,12 @@ static int aead_set_sh_desc(struct crypt
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- bool keys_fit_inline;
- u32 geniv, moveiv;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 ctx1_iv_off = 0;
- u32 *desc;
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
-@@ -482,7 +180,7 @@ static int aead_set_sh_desc(struct crypt
+@@ -482,7 +184,7 @@ static int aead_set_sh_desc(struct crypt
return 0;
/* NULL encryption / decryption */
return aead_null_set_sh_desc(aead);
/*
-@@ -497,8 +195,14 @@ static int aead_set_sh_desc(struct crypt
+@@ -497,8 +199,14 @@ static int aead_set_sh_desc(struct crypt
* RFC3686 specific:
* CONTEXT1[255:128] = {NONCE, IV, COUNTER}
*/
if (alg->caam.geniv)
goto skip_enc;
-@@ -507,146 +211,64 @@ static int aead_set_sh_desc(struct crypt
+@@ -507,146 +215,64 @@ static int aead_set_sh_desc(struct crypt
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
- /* Class 2 operation */
- append_operation(desc, ctx->class2_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+-
+- /* Read and write assoclen bytes */
+- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ if (desc_inline_query(DESC_AEAD_ENC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
-- /* Read and write assoclen bytes */
-- append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-- append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+- /* Skip assoc data */
+- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
-- /* Skip assoc data */
-- append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+- /* read assoc before reading payload */
+- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+- FIFOLDST_VLF);
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
-- /* read assoc before reading payload */
-- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
-- FIFOLDST_VLF);
-+ ctx->adata.key_inline = !!(inl_mask & 1);
-+ ctx->cdata.key_inline = !!(inl_mask & 2);
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- /* Write ICV */
- append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
--
++ ctx->adata.key_inline = !!(inl_mask & 1);
++ ctx->cdata.key_inline = !!(inl_mask & 2);
+
- ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
- desc_bytes(desc),
- DMA_TO_DEVICE);
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce, ctx1_iv_off,
-+ false);
++ false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
+ desc = ctx->sh_desc_dec;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, alg->caam.geniv, is_rfc3686,
-+ nonce, ctx1_iv_off, false);
++ nonce, ctx1_iv_off, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
if (!alg->caam.geniv)
goto skip_givenc;
-@@ -655,107 +277,32 @@ skip_enc:
+@@ -655,107 +281,32 @@ skip_enc:
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
*/
+ desc = ctx->sh_desc_enc;
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize,
+ ctx->authsize, is_rfc3686, nonce,
-+ ctx1_iv_off, false);
++ ctx1_iv_off, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
skip_givenc:
return 0;
-@@ -776,12 +323,12 @@ static int gcm_set_sh_desc(struct crypto
+@@ -776,12 +327,12 @@ static int gcm_set_sh_desc(struct crypto
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
return 0;
/*
-@@ -789,175 +336,35 @@ static int gcm_set_sh_desc(struct crypto
+@@ -789,175 +340,35 @@ static int gcm_set_sh_desc(struct crypto
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
return 0;
}
-@@ -976,11 +383,12 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -976,11 +387,12 @@ static int rfc4106_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
return 0;
/*
-@@ -988,148 +396,37 @@ static int rfc4106_set_sh_desc(struct cr
+@@ -988,148 +400,37 @@ static int rfc4106_set_sh_desc(struct cr
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
return 0;
}
-@@ -1149,12 +446,12 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -1149,12 +450,12 @@ static int rfc4543_set_sh_desc(struct cr
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
return 0;
/*
-@@ -1162,151 +459,37 @@ static int rfc4543_set_sh_desc(struct cr
+@@ -1162,151 +463,37 @@ static int rfc4543_set_sh_desc(struct cr
* Job Descriptor and Shared Descriptor
* must fit into the 64-word Descriptor h/w Buffer
*/
return 0;
}
-@@ -1322,19 +505,9 @@ static int rfc4543_setauthsize(struct cr
+@@ -1322,74 +509,67 @@ static int rfc4543_setauthsize(struct cr
return 0;
}
- static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
struct crypto_authenc_keys keys;
-@@ -1343,53 +516,32 @@ static int aead_setkey(struct crypto_aea
+ int ret = 0;
+
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
goto badkey;
#endif
- ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
++ /*
++ * If DKP is supported, use it in the shared descriptor to generate
++ * the split key.
++ */
++ if (ctrlpriv->era >= 6) {
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++
++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++ goto badkey;
++
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
++ keys.enckeylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma,
++ ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
++ goto skip_split_key;
++ }
++
+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
- ctx->split_key_pad_len + keys.enckeylen, 1);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
#endif
--
+
- ctx->enckeylen = keys.enckeylen;
-
- ret = aead_set_sh_desc(aead);
- }
-
- return ret;
++skip_split_key:
+ ctx->cdata.keylen = keys.enckeylen;
+ return aead_set_sh_desc(aead);
badkey:
crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
-@@ -1400,7 +552,6 @@ static int gcm_setkey(struct crypto_aead
+@@ -1400,7 +580,6 @@ static int gcm_setkey(struct crypto_aead
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
-@@ -1408,21 +559,10 @@ static int gcm_setkey(struct crypto_aead
+@@ -1408,21 +587,10 @@ static int gcm_setkey(struct crypto_aead
#endif
memcpy(ctx->key, key, keylen);
- return -ENOMEM;
- }
- ctx->enckeylen = keylen;
--
++ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
++ ctx->cdata.keylen = keylen;
+
- ret = gcm_set_sh_desc(aead);
- if (ret) {
- dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
- DMA_TO_DEVICE);
- }
-+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
-+ ctx->cdata.keylen = keylen;
-
+-
- return ret;
+ return gcm_set_sh_desc(aead);
}
static int rfc4106_setkey(struct crypto_aead *aead,
-@@ -1430,7 +570,6 @@ static int rfc4106_setkey(struct crypto_
+@@ -1430,7 +598,6 @@ static int rfc4106_setkey(struct crypto_
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
if (keylen < 4)
return -EINVAL;
-@@ -1446,22 +585,10 @@ static int rfc4106_setkey(struct crypto_
+@@ -1446,22 +613,10 @@ static int rfc4106_setkey(struct crypto_
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
}
static int rfc4543_setkey(struct crypto_aead *aead,
-@@ -1469,7 +596,6 @@ static int rfc4543_setkey(struct crypto_
+@@ -1469,7 +624,6 @@ static int rfc4543_setkey(struct crypto_
{
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
if (keylen < 4)
return -EINVAL;
-@@ -1485,43 +611,28 @@ static int rfc4543_setkey(struct crypto_
+@@ -1485,43 +639,28 @@ static int rfc4543_setkey(struct crypto_
* The last four bytes of the key material are used as the salt value
* in the nonce. Update the AES key length.
*/
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-@@ -1544,215 +655,33 @@ static int ablkcipher_setkey(struct cryp
+@@ -1544,215 +683,33 @@ static int ablkcipher_setkey(struct cryp
keylen -= CTR_RFC3686_NONCE_SIZE;
}
}
static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-@@ -1760,8 +689,7 @@ static int xts_ablkcipher_setkey(struct
+@@ -1760,8 +717,7 @@ static int xts_ablkcipher_setkey(struct
{
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
-@@ -1771,126 +699,38 @@ static int xts_ablkcipher_setkey(struct
+@@ -1771,126 +727,38 @@ static int xts_ablkcipher_setkey(struct
}
memcpy(ctx->key, key, keylen);
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
-@@ -1899,12 +739,12 @@ struct aead_edesc {
+@@ -1899,12 +767,12 @@ struct aead_edesc {
/*
* ablkcipher_edesc - s/w-extended ablkcipher descriptor
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
struct ablkcipher_edesc {
-@@ -1924,10 +764,11 @@ static void caam_unmap(struct device *de
+@@ -1924,10 +792,11 @@ static void caam_unmap(struct device *de
int sec4_sg_bytes)
{
if (dst != src) {
}
if (iv_dma)
-@@ -2021,8 +862,7 @@ static void ablkcipher_encrypt_done(stru
+@@ -2021,8 +890,7 @@ static void ablkcipher_encrypt_done(stru
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -2031,10 +871,10 @@ static void ablkcipher_encrypt_done(stru
+@@ -2031,10 +899,10 @@ static void ablkcipher_encrypt_done(stru
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
edesc->src_nents > 1 ? 100 : ivsize, 1);
ablkcipher_unmap(jrdev, edesc, req);
-@@ -2062,8 +902,7 @@ static void ablkcipher_decrypt_done(stru
+@@ -2062,8 +930,7 @@ static void ablkcipher_decrypt_done(stru
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -2071,10 +910,10 @@ static void ablkcipher_decrypt_done(stru
+@@ -2071,10 +938,10 @@ static void ablkcipher_decrypt_done(stru
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
ivsize, 1);
ablkcipher_unmap(jrdev, edesc, req);
-@@ -2114,7 +953,7 @@ static void init_aead_job(struct aead_re
+@@ -2114,7 +981,7 @@ static void init_aead_job(struct aead_re
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (all_contig) {
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
-@@ -2129,7 +968,7 @@ static void init_aead_job(struct aead_re
+@@ -2129,7 +996,7 @@ static void init_aead_job(struct aead_re
out_options = in_options;
if (unlikely(req->src != req->dst)) {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma +
-@@ -2175,7 +1014,7 @@ static void init_gcm_job(struct aead_req
+@@ -2147,9 +1014,6 @@ static void init_aead_job(struct aead_re
+ append_seq_out_ptr(desc, dst_dma,
+ req->assoclen + req->cryptlen - authsize,
+ out_options);
+-
+- /* REG3 = assoclen */
+- append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+ }
+
+ static void init_gcm_job(struct aead_request *req,
+@@ -2164,6 +1028,7 @@ static void init_gcm_job(struct aead_req
+ unsigned int last;
+
+ init_aead_job(req, edesc, all_contig, encrypt);
++ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
+
+ /* BUG This should not be specific to generic GCM. */
+ last = 0;
+@@ -2175,7 +1040,7 @@ static void init_gcm_job(struct aead_req
FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | 12 | last);
/* Append Salt */
if (!generic_gcm)
/* Append IV */
append_data(desc, req->iv, ivsize);
/* End of blank commands */
-@@ -2190,7 +1029,7 @@ static void init_authenc_job(struct aead
+@@ -2190,7 +1055,8 @@ static void init_authenc_job(struct aead
struct caam_aead_alg, aead);
unsigned int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
const bool is_rfc3686 = alg->caam.rfc3686;
u32 *desc = edesc->hw_desc;
-@@ -2236,16 +1075,15 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2213,6 +1079,15 @@ static void init_authenc_job(struct aead
+
+ init_aead_job(req, edesc, all_contig, encrypt);
+
++ /*
++ * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports
++ * having DPOVRD as destination.
++ */
++ if (ctrlpriv->era < 3)
++ append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen);
++ else
++ append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen);
++
+ if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv))
+ append_load_as_imm(desc, req->iv, ivsize,
+ LDST_CLASS_1_CCB |
+@@ -2236,16 +1111,15 @@ static void init_ablkcipher_job(u32 *sh_
int len, sec4_sg_index = 0;
#ifdef DEBUG
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-@@ -2261,7 +1099,7 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2261,7 +1135,7 @@ static void init_ablkcipher_job(u32 *sh_
append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
if (likely(req->src == req->dst)) {
dst_dma = sg_dma_address(req->src);
} else {
dst_dma = edesc->sec4_sg_dma +
-@@ -2269,7 +1107,7 @@ static void init_ablkcipher_job(u32 *sh_
+@@ -2269,7 +1143,7 @@ static void init_ablkcipher_job(u32 *sh_
out_options = LDST_SGF;
}
} else {
dst_dma = sg_dma_address(req->dst);
} else {
dst_dma = edesc->sec4_sg_dma +
-@@ -2296,20 +1134,18 @@ static void init_ablkcipher_giv_job(u32
+@@ -2296,20 +1170,18 @@ static void init_ablkcipher_giv_job(u32
int len, sec4_sg_index = 0;
#ifdef DEBUG
src_dma = sg_dma_address(req->src);
in_options = 0;
} else {
-@@ -2340,87 +1176,100 @@ static struct aead_edesc *aead_edesc_all
+@@ -2340,87 +1212,100 @@ static struct aead_edesc *aead_edesc_all
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
edesc->sec4_sg + sec4_sg_index, 0);
}
-@@ -2573,13 +1422,9 @@ static int aead_decrypt(struct aead_requ
+@@ -2573,13 +1458,9 @@ static int aead_decrypt(struct aead_requ
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN,
-@@ -2619,51 +1464,80 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2619,51 +1500,80 @@ static struct ablkcipher_edesc *ablkciph
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
return ERR_PTR(-ENOMEM);
}
-@@ -2673,23 +1547,24 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2673,23 +1583,24 @@ static struct ablkcipher_edesc *ablkciph
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
return ERR_PTR(-ENOMEM);
}
-@@ -2701,7 +1576,7 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2701,7 +1612,7 @@ static struct ablkcipher_edesc *ablkciph
sec4_sg_bytes, 1);
#endif
return edesc;
}
-@@ -2792,30 +1667,54 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2792,30 +1703,54 @@ static struct ablkcipher_edesc *ablkciph
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
+ bool out_contig;
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int sec4_sg_index;
--
-- src_nents = sg_count(req->src, req->nbytes);
+ int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+- src_nents = sg_count(req->src, req->nbytes);
+-
- if (unlikely(req->dst != req->src))
- dst_nents = sg_count(req->dst, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
}
/*
-@@ -2825,21 +1724,29 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2825,21 +1760,29 @@ static struct ablkcipher_edesc *ablkciph
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
return ERR_PTR(-ENOMEM);
}
-@@ -2849,24 +1756,24 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2849,24 +1792,24 @@ static struct ablkcipher_edesc *ablkciph
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
desc_bytes;
return ERR_PTR(-ENOMEM);
}
edesc->iv_dma = iv_dma;
-@@ -2878,7 +1785,7 @@ static struct ablkcipher_edesc *ablkciph
+@@ -2878,7 +1821,7 @@ static struct ablkcipher_edesc *ablkciph
sec4_sg_bytes, 1);
#endif
return edesc;
}
-@@ -2889,7 +1796,7 @@ static int ablkcipher_givencrypt(struct
+@@ -2889,7 +1832,7 @@ static int ablkcipher_givencrypt(struct
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
-@@ -2933,7 +1840,6 @@ struct caam_alg_template {
+@@ -2933,7 +1876,6 @@ struct caam_alg_template {
} template_u;
u32 class1_alg_type;
u32 class2_alg_type;
};
static struct caam_alg_template driver_algs[] = {
-@@ -3118,7 +2024,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3118,7 +2060,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3140,7 +2045,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3140,7 +2081,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3162,7 +2066,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3162,7 +2102,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3184,7 +2087,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3184,7 +2123,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3206,7 +2108,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3206,7 +2144,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3228,7 +2129,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3228,7 +2165,6 @@ static struct caam_aead_alg driver_aeads
.caam = {
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3250,7 +2150,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3250,7 +2186,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3273,7 +2172,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3273,7 +2208,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3296,7 +2194,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3296,7 +2230,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3319,7 +2216,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3319,7 +2252,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3342,7 +2238,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3342,7 +2274,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3365,7 +2260,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3365,7 +2296,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3388,7 +2282,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3388,7 +2318,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3411,7 +2304,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3411,7 +2340,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3434,7 +2326,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3434,7 +2362,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3457,7 +2348,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3457,7 +2384,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3480,7 +2370,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3480,7 +2406,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3503,7 +2392,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3503,7 +2428,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3526,7 +2414,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3526,7 +2450,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
}
},
{
-@@ -3549,7 +2436,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3549,7 +2472,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
}
},
-@@ -3573,7 +2459,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3573,7 +2495,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3597,7 +2482,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3597,7 +2518,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3621,7 +2505,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3621,7 +2541,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3645,7 +2528,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3645,7 +2564,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3669,7 +2551,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3669,7 +2587,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3693,7 +2574,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3693,7 +2610,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3717,7 +2597,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3717,7 +2633,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3741,7 +2620,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3741,7 +2656,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3765,7 +2643,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3765,7 +2679,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3789,7 +2666,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3789,7 +2702,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3812,7 +2688,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3812,7 +2724,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3835,7 +2710,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3835,7 +2746,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3858,7 +2732,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3858,7 +2768,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3881,7 +2754,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3881,7 +2790,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3904,7 +2776,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3904,7 +2812,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3927,7 +2798,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3927,7 +2834,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3950,7 +2820,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3950,7 +2856,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -3973,7 +2842,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3973,7 +2878,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -3996,7 +2864,6 @@ static struct caam_aead_alg driver_aeads
+@@ -3996,7 +2900,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -4019,7 +2886,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4019,7 +2922,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -4042,7 +2908,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4042,7 +2944,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
},
},
{
-@@ -4065,7 +2930,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4065,7 +2966,6 @@ static struct caam_aead_alg driver_aeads
.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.geniv = true,
},
},
-@@ -4090,7 +2954,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4090,7 +2990,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4115,7 +2978,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4115,7 +3014,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_MD5 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4141,7 +3003,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4141,7 +3039,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4166,7 +3027,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4166,7 +3063,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA1 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4192,7 +3052,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4192,7 +3088,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4217,7 +3076,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4217,7 +3112,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4243,7 +3101,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4243,7 +3137,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4268,7 +3125,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4268,7 +3161,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4294,7 +3150,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4294,7 +3186,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4319,7 +3174,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4319,7 +3210,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4345,7 +3199,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4345,7 +3235,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
},
},
-@@ -4370,7 +3223,6 @@ static struct caam_aead_alg driver_aeads
+@@ -4370,7 +3259,6 @@ static struct caam_aead_alg driver_aeads
OP_ALG_AAI_CTR_MOD128,
.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
OP_ALG_AAI_HMAC_PRECOMP,
.rfc3686 = true,
.geniv = true,
},
-@@ -4385,16 +3237,34 @@ struct caam_crypto_alg {
+@@ -4385,16 +3273,34 @@ struct caam_crypto_alg {
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam)
{
return 0;
}
-@@ -4421,25 +3291,9 @@ static int caam_aead_init(struct crypto_
+@@ -4421,25 +3327,9 @@ static int caam_aead_init(struct crypto_
static void caam_exit_common(struct caam_ctx *ctx)
{
caam_jr_free(ctx->jrdev);
}
-@@ -4515,7 +3369,6 @@ static struct caam_crypto_alg *caam_alg_
+@@ -4515,7 +3405,6 @@ static struct caam_crypto_alg *caam_alg_
t_alg->caam.class1_alg_type = template->class1_alg_type;
t_alg->caam.class2_alg_type = template->class2_alg_type;
}
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_desc.c
-@@ -0,0 +1,1913 @@
+@@ -0,0 +1,1961 @@
+/*
+ * Shared descriptors for aead, ablkcipher algorithms
+ *
+ * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor
+ * (non-protocol) with no (null) encryption.
+ * @desc: pointer to buffer used for descriptor construction
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
-+ unsigned int icvsize)
++ unsigned int icvsize, int era)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd;
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+ KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6) {
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ } else {
++ append_proto_dkp(desc, adata);
++ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* assoclen + cryptlen = seqinlen */
+ * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor
+ * (non-protocol) with no (null) decryption.
+ * @desc: pointer to buffer used for descriptor construction
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @icvsize: integrity check value (ICV) size (truncated or full)
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
-+ unsigned int icvsize)
++ unsigned int icvsize, int era)
+{
+ u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd;
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+ adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6) {
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ } else {
++ append_proto_dkp(desc, adata);
++ }
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+static void init_sh_desc_key_aead(u32 * const desc,
+ struct alginfo * const cdata,
+ struct alginfo * const adata,
-+ const bool is_rfc3686, u32 *nonce)
++ const bool is_rfc3686, u32 *nonce, int era)
+{
+ u32 *key_jump_cmd;
+ unsigned int enckeylen = cdata->keylen;
+ if (is_rfc3686)
+ enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+ adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6) {
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ } else {
++ append_proto_dkp(desc, adata);
++ }
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, enckeylen,
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
-+ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi)
++ u32 *nonce, const u32 ctx1_iv_off, const bool is_qi,
++ int era)
+{
+ /* Note: Context registers are saved. */
-+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ }
+
+ /* Read and write assoclen bytes */
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (is_qi || era < 3) {
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ } else {
++ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ }
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
-+ const u32 ctx1_iv_off, const bool is_qi)
++ const u32 ctx1_iv_off, const bool is_qi, int era)
+{
+ /* Note: Context registers are saved. */
-+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+ /* Class 2 operation */
+ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
+ }
+
+ /* Read and write assoclen bytes */
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ if (geniv)
-+ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize);
-+ else
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (is_qi || era < 3) {
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (geniv)
++ append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM,
++ ivsize);
++ else
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3,
++ CAAM_CMD_SZ);
++ } else {
++ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ if (geniv)
++ append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM,
++ ivsize);
++ else
++ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD,
++ CAAM_CMD_SZ);
++ }
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
+ * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128.
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1,
-+ * SHA224, SHA256, SHA384, SHA512} ANDed with OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values - one of
++ * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed
++ * with OP_ALG_AAI_HMAC_PRECOMP.
+ * @ivsize: initialization vector size
+ * @icvsize: integrity check value (ICV) size (truncated or full)
+ * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
+ * @nonce: pointer to rfc3686 nonce
+ * @ctx1_iv_off: IV offset in CONTEXT1 register
+ * @is_qi: true when called from caam/qi
-+ *
-+ * Note: Requires an MDHA split key.
++ * @era: SEC Era
+ */
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
-+ const bool is_qi)
++ const bool is_qi, int era)
+{
+ u32 geniv, moveiv;
+
+ /* Note: Context registers are saved. */
-+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce);
++ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+
+ if (is_qi) {
+ u32 *wait_load_cmd;
+ OP_ALG_ENCRYPT);
+
+ /* Read and write assoclen bytes */
-+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
-+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ if (is_qi || era < 3) {
++ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
++ } else {
++ append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ);
++ }
+
+ /* Skip assoc data */
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
+ * with OP_ALG_AAI_CBC
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values OP_ALG_ALGSEL_SHA1 ANDed with
-+ * OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
++ * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @assoclen: associated data length
+ * @ivsize: initialization vector size
+ * @authsize: authentication data size
+ * @blocksize: block cipher size
++ * @era: SEC Era
+ */
+void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize)
++ unsigned int blocksize, int era)
+{
+ u32 *key_jump_cmd, *zero_payload_jump_cmd;
+ u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
-+ if (adata->key_inline)
-+ append_key_as_imm(desc, adata->key_virt, adata->keylen_pad,
-+ adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT |
-+ KEY_ENC);
-+ else
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6) {
++ if (adata->key_inline)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT |
++ KEY_ENC);
++ else
++ append_key(desc, adata->key_dma, adata->keylen,
++ CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ } else {
++ append_proto_dkp(desc, adata);
++ }
+
+ if (cdata->key_inline)
+ append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
+ * @cdata: pointer to block cipher transform definitions
+ * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
+ * with OP_ALG_AAI_CBC
-+ * @adata: pointer to authentication transform definitions. Note that since a
-+ * split key is to be used, the size of the split key itself is
-+ * specified. Valid algorithm values OP_ALG_ALGSEL_ SHA1 ANDed with
-+ * OP_ALG_AAI_HMAC_PRECOMP.
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
++ * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
+ * @assoclen: associated data length
+ * @ivsize: initialization vector size
+ * @authsize: authentication data size
+ * @blocksize: block cipher size
++ * @era: SEC Era
+ */
+void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize)
++ unsigned int blocksize, int era)
+{
+ u32 stidx, jumpback;
+ u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
-+ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ if (era < 6)
++ append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ else
++ append_proto_dkp(desc, adata);
+
+ append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
-+ u8 *nonce = cdata->key_virt + cdata->keylen;
++ const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+
+ /* Load nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
-+ u8 *nonce = cdata->key_virt + cdata->keylen;
++ const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+
+ /* Load Nonce into CONTEXT1 reg */
+ if (is_rfc3686) {
-+ u8 *nonce = cdata->key_virt + cdata->keylen;
++ const u8 *nonce = cdata->key_virt + cdata->keylen;
+
+ append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
+ LDST_CLASS_IND_CCB |
+ 15 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
-+ unsigned int icvsize);
++ unsigned int icvsize, int era);
+
+void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata,
-+ unsigned int icvsize);
++ unsigned int icvsize, int era);
+
+void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
-+ const bool is_qi);
++ const bool is_qi, int era);
+
+void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool geniv,
+ const bool is_rfc3686, u32 *nonce,
-+ const u32 ctx1_iv_off, const bool is_qi);
++ const u32 ctx1_iv_off, const bool is_qi, int era);
+
+void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int ivsize,
+ unsigned int icvsize, const bool is_rfc3686,
+ u32 *nonce, const u32 ctx1_iv_off,
-+ const bool is_qi);
++ const bool is_qi, int era);
+
+void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize);
++ unsigned int blocksize, int era);
+
+void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
+ struct alginfo *adata, unsigned int assoclen,
+ unsigned int ivsize, unsigned int authsize,
-+ unsigned int blocksize);
++ unsigned int blocksize, int era);
+
+void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, unsigned int icvsize,
+#endif /* _CAAMALG_DESC_H_ */
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi.c
-@@ -0,0 +1,2877 @@
+@@ -0,0 +1,2929 @@
+/*
+ * Freescale FSL CAAM support for crypto API over QI backend.
+ * Based on caamalg.c
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
-+ ctx1_iv_off, true);
++ ctx1_iv_off, true, ctrlpriv->era);
+
+skip_enc:
+ /* aead_decrypt shared descriptor */
+
+ cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
-+ is_rfc3686, nonce, ctx1_iv_off, true);
++ is_rfc3686, nonce, ctx1_iv_off, true,
++ ctrlpriv->era);
+
+ if (!alg->caam.geniv)
+ goto skip_givenc;
+
+ cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
-+ ctx1_iv_off, true);
++ ctx1_iv_off, true, ctrlpriv->era);
+
+skip_givenc:
+ return 0;
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+ struct crypto_authenc_keys keys;
+ int ret = 0;
+
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
++ /*
++ * If DKP is supported, use it in the shared descriptor to generate
++ * the split key.
++ */
++ if (ctrlpriv->era >= 6) {
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++
++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++ goto badkey;
++
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
++ keys.enckeylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma,
++ ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
++ goto skip_split_key;
++ }
++
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
++skip_split_key:
+ ctx->cdata.keylen = keys.enckeylen;
+
+ ret = aead_set_sh_desc(aead);
+ unsigned int assoclen = 13; /* always 13 bytes for TLS */
+ unsigned int data_len[2];
+ u32 inl_mask;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
-+ assoclen, ivsize, ctx->authsize, blocksize);
++ assoclen, ivsize, ctx->authsize, blocksize,
++ ctrlpriv->era);
+
+ /*
+ * TLS 1.0 decrypt shared descriptor
+ * Keys do not fit inline, regardless of algorithms used
+ */
++ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
-+ assoclen, ivsize, ctx->authsize, blocksize);
++ assoclen, ivsize, ctx->authsize, blocksize,
++ ctrlpriv->era);
+
+ return 0;
+}
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ struct device *jrdev = ctx->jrdev;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
+ struct crypto_authenc_keys keys;
+ int ret = 0;
+
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
++ /*
++ * If DKP is supported, use it in the shared descriptor to generate
++ * the split key.
++ */
++ if (ctrlpriv->era >= 6) {
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++
++ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
++ goto badkey;
++
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
++ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
++ keys.enckeylen);
++ dma_sync_single_for_device(jrdev, ctx->key_dma,
++ ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_TO_DEVICE);
++ goto skip_split_key;
++ }
++
+ ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
+ keys.authkeylen, CAAM_MAX_KEY_SIZE -
+ keys.enckeylen);
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+#endif
+
++skip_split_key:
+ ctx->cdata.keylen = keys.enckeylen;
+
+ ret = tls_set_sh_desc(tls);
+MODULE_AUTHOR("Freescale Semiconductor");
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
-@@ -0,0 +1,4428 @@
+@@ -0,0 +1,5920 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+#include "sg_sw_qm2.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
++#include "caamhash_desc.h"
+#include "../../../drivers/staging/fsl-mc/include/mc.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-io.h"
+#include "../../../drivers/staging/fsl-mc/include/dpaa2-fd.h"
+ * caam_ctx - per-session context
+ * @flc: Flow Contexts array
+ * @key: virtual address of the key(s): [authentication key], encryption key
++ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @key_dma: I/O virtual address of the key
+ * @dev: dpseci device
+ * @adata: authentication algorithm details
+struct caam_ctx {
+ struct caam_flc flc[NUM_OP];
+ u8 key[CAAM_MAX_KEY_SIZE];
++ dma_addr_t flc_dma[NUM_OP];
+ dma_addr_t key_dma;
+ struct device *dev;
+ struct alginfo adata;
+ case CRYPTO_ALG_TYPE_AEAD:
+ return aead_request_ctx(container_of(areq, struct aead_request,
+ base));
++ case CRYPTO_ALG_TYPE_AHASH:
++ return ahash_request_ctx(ahash_request_cast(areq));
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
++ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct caam_flc *flc;
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ if (alg->caam.geniv)
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686,
-+ nonce, ctx1_iv_off, true);
++ nonce, ctx1_iv_off, true,
++ priv->sec_attr.era);
+ else
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
-+ ctx1_iv_off, true);
++ ctx1_iv_off, true, priv->sec_attr.era);
+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
-+ is_rfc3686, nonce, ctx1_iv_off, true);
-+
++ is_rfc3686, nonce, ctx1_iv_off, true,
++ priv->sec_attr.era);
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ return 0;
+}
+ complete(&res->completion);
+}
+
-+static int gen_split_key_sh(struct device *dev, u8 *key_out,
-+ struct alginfo * const adata, const u8 *key_in,
-+ u32 keylen)
-+{
-+ struct caam_request *req_ctx;
-+ u32 *desc;
-+ struct split_key_sh_result result;
-+ dma_addr_t dma_addr_in, dma_addr_out;
-+ struct caam_flc *flc;
-+ struct dpaa2_fl_entry *in_fle, *out_fle;
-+ int ret = -ENOMEM;
-+
-+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
-+ if (!req_ctx)
-+ return -ENOMEM;
-+
-+ in_fle = &req_ctx->fd_flt[1];
-+ out_fle = &req_ctx->fd_flt[0];
-+
-+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
-+ if (!flc)
-+ goto err_flc;
-+
-+ dma_addr_in = dma_map_single(dev, (void *)key_in, keylen,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, dma_addr_in)) {
-+ dev_err(dev, "unable to map key input memory\n");
-+ goto err_dma_addr_in;
-+ }
-+
-+ dma_addr_out = dma_map_single(dev, key_out, adata->keylen_pad,
-+ DMA_FROM_DEVICE);
-+ if (dma_mapping_error(dev, dma_addr_out)) {
-+ dev_err(dev, "unable to map key output memory\n");
-+ goto err_dma_addr_out;
-+ }
-+
-+ desc = flc->sh_desc;
-+
-+ init_sh_desc(desc, 0);
-+ append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
-+
-+ /* Sets MDHA up into an HMAC-INIT */
-+ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) |
-+ OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT |
-+ OP_ALG_AS_INIT);
-+
-+ /*
-+ * do a FIFO_LOAD of zero, this will trigger the internal key expansion
-+ * into both pads inside MDHA
-+ */
-+ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB |
-+ FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
-+
-+ /*
-+ * FIFO_STORE with the explicit split-key content store
-+ * (0x26 output type)
-+ */
-+ append_fifo_store(desc, dma_addr_out, adata->keylen,
-+ LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
-+
-+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ goto err_flc_dma;
-+ }
-+
-+ dpaa2_fl_set_final(in_fle, true);
-+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(in_fle, dma_addr_in);
-+ dpaa2_fl_set_len(in_fle, keylen);
-+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
-+ dpaa2_fl_set_addr(out_fle, dma_addr_out);
-+ dpaa2_fl_set_len(out_fle, adata->keylen_pad);
-+
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1);
-+ print_hex_dump(KERN_ERR, "desc@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-+#endif
-+
-+ result.err = 0;
-+ init_completion(&result.completion);
-+ result.dev = dev;
-+
-+ req_ctx->flc = flc;
-+ req_ctx->cbk = split_key_sh_done;
-+ req_ctx->ctx = &result;
-+
-+ ret = dpaa2_caam_enqueue(dev, req_ctx);
-+ if (ret == -EINPROGRESS) {
-+ /* in progress */
-+ wait_for_completion(&result.completion);
-+ ret = result.err;
-+#ifdef DEBUG
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, key_out,
-+ adata->keylen_pad, 1);
-+#endif
-+ }
-+
-+ dma_unmap_single(dev, flc->flc_dma, sizeof(flc->flc) + desc_bytes(desc),
-+ DMA_TO_DEVICE);
-+err_flc_dma:
-+ dma_unmap_single(dev, dma_addr_out, adata->keylen_pad, DMA_FROM_DEVICE);
-+err_dma_addr_out:
-+ dma_unmap_single(dev, dma_addr_in, keylen, DMA_TO_DEVICE);
-+err_dma_addr_in:
-+ kfree(flc);
-+err_flc:
-+ kfree(req_ctx);
-+ return ret;
-+}
-+
-+static int gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
-+ u32 authkeylen)
-+{
-+ return gen_split_key_sh(ctx->dev, ctx->key, &ctx->adata, key_in,
-+ authkeylen);
-+}
-+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
-+ int ret;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
-+ ctx->adata.keylen = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+
-+#ifdef DEBUG
-+ dev_err(dev, "split keylen %d split keylen padded %d\n",
-+ ctx->adata.keylen, ctx->adata.keylen_pad);
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey, keylen, 1);
-+#endif
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
-+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
-+ if (ret)
-+ goto badkey;
-+
-+ /* postpend encryption key to auth split key */
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-+
-+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_BIDIRECTIONAL);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+
+ ctx->cdata.keylen = keys.enckeylen;
+
-+ ret = aead_set_sh_desc(aead);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
-+
-+ return ret;
++ return aead_set_sh_desc(aead);
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ unsigned int ivsize = crypto_aead_ivsize(tls);
+ unsigned int blocksize = crypto_aead_blocksize(tls);
+ struct device *dev = ctx->dev;
++ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct caam_flc *flc;
+ u32 *desc;
+ unsigned int assoclen = 13; /* always 13 bytes for TLS */
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_tls_encap(desc, &ctx->cdata, &ctx->adata,
-+ assoclen, ivsize, ctx->authsize, blocksize);
-+
++ assoclen, ivsize, ctx->authsize, blocksize,
++ priv->sec_attr.era);
+ flc->flc[1] = desc_len(desc);
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ /*
+ * TLS 1.0 decrypt shared descriptor
+ * Keys do not fit inline, regardless of algorithms used
+ */
++ ctx->adata.key_inline = false;
+ ctx->adata.key_dma = ctx->key_dma;
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_tls_decap(desc, &ctx->cdata, &ctx->adata, assoclen, ivsize,
-+ ctx->authsize, blocksize);
-+
++ ctx->authsize, blocksize, priv->sec_attr.era);
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ return 0;
+}
+ struct caam_ctx *ctx = crypto_aead_ctx(tls);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
-+ int ret;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
-+ ctx->adata.keylen = split_key_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+ ctx->adata.keylen_pad = split_key_pad_len(ctx->adata.algtype &
-+ OP_ALG_ALGSEL_MASK);
-+
-+#ifdef DEBUG
-+ dev_err(dev, "split keylen %d split keylen padded %d\n",
-+ ctx->adata.keylen, ctx->adata.keylen_pad);
-+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
-+ DUMP_PREFIX_ADDRESS, 16, 4, keys.authkey,
-+ keys.authkeylen + keys.enckeylen, 1);
-+#endif
++ ctx->adata.keylen = keys.authkeylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
-+ ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
-+ if (ret)
-+ goto badkey;
-+
-+ /* postpend encryption key to auth split key */
++ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
-+
-+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
++ keys.enckeylen, DMA_BIDIRECTIONAL);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+
+ ctx->cdata.keylen = keys.enckeylen;
+
-+ ret = tls_set_sh_desc(tls);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->adata.keylen_pad +
-+ keys.enckeylen, DMA_TO_DEVICE);
-+
-+ return ret;
++ return tls_set_sh_desc(tls);
+badkey:
+ crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ return 0;
+}
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
-+ int ret;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+#endif
+
+ memcpy(ctx->key, key, keylen);
-+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->key_dma, keylen,
++ DMA_BIDIRECTIONAL);
+ ctx->cdata.keylen = keylen;
+
-+ ret = gcm_set_sh_desc(aead);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
-+
-+ return ret;
++ return gcm_set_sh_desc(aead);
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ return 0;
+}
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
-+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
-+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
-+
-+ ret = rfc4106_set_sh_desc(aead);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
++ DMA_BIDIRECTIONAL);
+
-+ return ret;
++ return rfc4106_set_sh_desc(aead);
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ return 0;
+}
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
-+ int ret;
+
+ if (keylen < 4)
+ return -EINVAL;
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
-+ ctx->key_dma = dma_map_single(dev, ctx->key, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
-+
-+ ret = rfc4543_set_sh_desc(aead);
-+ if (ret)
-+ dma_unmap_single(dev, ctx->key_dma, ctx->cdata.keylen,
-+ DMA_TO_DEVICE);
++ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
++ DMA_BIDIRECTIONAL);
+
-+ return ret;
++ return rfc4543_set_sh_desc(aead);
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+
-+ memcpy(ctx->key, key, keylen);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
-+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* ablkcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ /* ablkcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ /* ablkcipher_givencrypt shared descriptor */
+ flc = &ctx->flc[GIVENCRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata,
+ ivsize, is_rfc3686, ctx1_iv_off);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[GIVENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ return 0;
+}
+ return -EINVAL;
+ }
+
-+ memcpy(ctx->key, key, keylen);
-+ ctx->key_dma = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, ctx->key_dma)) {
-+ dev_err(dev, "unable to map key i/o memory\n");
-+ return -ENOMEM;
-+ }
+ ctx->cdata.keylen = keylen;
-+ ctx->cdata.key_virt = ctx->key;
++ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* xts_ablkcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ /* xts_ablkcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
-+
+ cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
-+
+ flc->flc[1] = desc_len(desc); /* SDL */
-+ flc->flc_dma = dma_map_single(dev, flc, sizeof(flc->flc) +
-+ desc_bytes(desc), DMA_TO_DEVICE);
-+ if (dma_mapping_error(dev, flc->flc_dma)) {
-+ dev_err(dev, "unable to map shared descriptor\n");
-+ return -ENOMEM;
-+ }
++ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
++ sizeof(flc->flc) + desc_bytes(desc),
++ DMA_BIDIRECTIONAL);
+
+ return 0;
+}
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
++ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->op_type = ENCRYPT;
+ caam_req->cbk = aead_encrypt_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
++ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->op_type = DECRYPT;
+ caam_req->cbk = aead_decrypt_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
++ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->op_type = ENCRYPT;
+ caam_req->cbk = tls_encrypt_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
++ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->op_type = DECRYPT;
+ caam_req->cbk = tls_decrypt_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
++ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->op_type = ENCRYPT;
+ caam_req->cbk = ablkcipher_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[GIVENCRYPT];
++ caam_req->flc_dma = ctx->flc_dma[GIVENCRYPT];
+ caam_req->op_type = GIVENCRYPT;
+ caam_req->cbk = ablkcipher_done;
+ caam_req->ctx = &req->base;
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
++ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->op_type = DECRYPT;
+ caam_req->cbk = ablkcipher_done;
+ caam_req->ctx = &req->base;
+ struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ crypto_alg);
+ struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
++ dma_addr_t dma_addr;
++ int i;
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG |
+
+ ctx->dev = caam_alg->caam.dev;
+
++ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
++ offsetof(struct caam_ctx, flc_dma),
++ DMA_BIDIRECTIONAL,
++ DMA_ATTR_SKIP_CPU_SYNC);
++ if (dma_mapping_error(ctx->dev, dma_addr)) {
++ dev_err(ctx->dev, "unable to map key, shared descriptors\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < NUM_OP; i++)
++ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
++ ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
++
+ return 0;
+}
+
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
-+ int i;
-+
-+ for (i = 0; i < NUM_OP; i++) {
-+ if (!ctx->flc[i].flc_dma)
-+ continue;
-+ dma_unmap_single(ctx->dev, ctx->flc[i].flc_dma,
-+ sizeof(ctx->flc[i].flc) +
-+ desc_bytes(ctx->flc[i].sh_desc),
-+ DMA_TO_DEVICE);
-+ }
-+
-+ if (ctx->key_dma)
-+ dma_unmap_single(ctx->dev, ctx->key_dma,
-+ ctx->cdata.keylen + ctx->adata.keylen_pad,
-+ DMA_TO_DEVICE);
++ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
++ offsetof(struct caam_ctx, flc_dma),
++ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void caam_cra_exit(struct crypto_tfm *tfm)
+ alg->exit = caam_cra_exit_aead;
+}
+
-+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
-+{
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+
-+ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
-+ napi_schedule_irqoff(&ppriv->napi);
-+}
-+
-+static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
-+{
-+ struct device *dev = priv->dev;
-+ struct dpaa2_io_notification_ctx *nctx;
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ int err, i = 0, cpu;
++/* max hash key is max split key size */
++#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
+
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ ppriv->priv = priv;
-+ nctx = &ppriv->nctx;
-+ nctx->is_cdan = 0;
-+ nctx->id = ppriv->rsp_fqid;
-+ nctx->desired_cpu = cpu;
-+ nctx->cb = dpaa2_caam_fqdan_cb;
++#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
++#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+
-+ /* Register notification callbacks */
-+ err = dpaa2_io_service_register(NULL, nctx);
-+ if (unlikely(err)) {
-+ dev_err(dev, "notification register failed\n");
-+ nctx->cb = NULL;
-+ goto err;
-+ }
++#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
++ CAAM_MAX_HASH_KEY_SIZE)
++#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
+
-+ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
-+ dev);
-+ if (unlikely(!ppriv->store)) {
-+ dev_err(dev, "dpaa2_io_store_create() failed\n");
-+ goto err;
-+ }
++/* caam context sizes for hashes: running digest + 8 */
++#define HASH_MSG_LEN 8
++#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
-+ if (++i == priv->num_pairs)
-+ break;
-+ }
++enum hash_optype {
++ UPDATE = 0,
++ UPDATE_FIRST,
++ FINALIZE,
++ DIGEST,
++ HASH_NUM_OP
++};
+
-+ return 0;
++/**
++ * caam_hash_ctx - ahash per-session context
++ * @flc: Flow Contexts array
++ * @flc_dma: I/O virtual addresses of the Flow Contexts
++ * @key: virtual address of the authentication key
++ * @dev: dpseci device
++ * @ctx_len: size of Context Register
++ * @adata: hashing algorithm details
++ */
++struct caam_hash_ctx {
++ struct caam_flc flc[HASH_NUM_OP];
++ dma_addr_t flc_dma[HASH_NUM_OP];
++ u8 key[CAAM_MAX_HASH_KEY_SIZE];
++ struct device *dev;
++ int ctx_len;
++ struct alginfo adata;
++};
+
-+err:
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ if (!ppriv->nctx.cb)
-+ break;
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
-+ }
++/* ahash state */
++struct caam_hash_state {
++ struct caam_request caam_req;
++ dma_addr_t buf_dma;
++ dma_addr_t ctx_dma;
++ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
++ int buflen_0;
++ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
++ int buflen_1;
++ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
++ int (*update)(struct ahash_request *req);
++ int (*final)(struct ahash_request *req);
++ int (*finup)(struct ahash_request *req);
++ int current_buf;
++};
+
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ if (!ppriv->store)
-+ break;
-+ dpaa2_io_store_destroy(ppriv->store);
-+ }
++struct caam_export_state {
++ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
++ u8 caam_ctx[MAX_CTX_LEN];
++ int buflen;
++ int (*update)(struct ahash_request *req);
++ int (*final)(struct ahash_request *req);
++ int (*finup)(struct ahash_request *req);
++};
+
-+ return err;
++static inline void switch_buf(struct caam_hash_state *state)
++{
++ state->current_buf ^= 1;
+}
+
-+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
++static inline u8 *current_buf(struct caam_hash_state *state)
+{
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ int i = 0, cpu;
-+
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
-+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
-+ dpaa2_io_store_destroy(ppriv->store);
++ return state->current_buf ? state->buf_1 : state->buf_0;
++}
+
-+ if (++i == priv->num_pairs)
-+ return;
-+ }
++static inline u8 *alt_buf(struct caam_hash_state *state)
++{
++ return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
-+static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
++static inline int *current_buflen(struct caam_hash_state *state)
+{
-+ struct dpseci_rx_queue_cfg rx_queue_cfg;
-+ struct device *dev = priv->dev;
-+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
-+ struct dpaa2_caam_priv_per_cpu *ppriv;
-+ int err = 0, i = 0, cpu;
++ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
++}
+
-+ /* Configure Rx queues */
-+ for_each_online_cpu(cpu) {
-+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++static inline int *alt_buflen(struct caam_hash_state *state)
++{
++ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
++}
+
-+ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
-+ DPSECI_QUEUE_OPT_USER_CTX;
-+ rx_queue_cfg.order_preservation_en = 0;
-+ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
-+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
-+ /*
-+ * Rx priority (WQ) doesn't really matter, since we use
-+ * pull mode, i.e. volatile dequeues from specific FQs
-+ */
-+ rx_queue_cfg.dest_cfg.priority = 0;
-+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
++/* Map current buffer in state (if length > 0) and put it in link table */
++static inline int buf_map_to_qm_sg(struct device *dev,
++ struct dpaa2_sg_entry *qm_sg,
++ struct caam_hash_state *state)
++{
++ int buflen = *current_buflen(state);
+
-+ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
-+ &rx_queue_cfg);
-+ if (err) {
-+ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
-+ err);
-+ return err;
-+ }
++ if (!buflen)
++ return 0;
+
-+ if (++i == priv->num_pairs)
-+ break;
++ state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(dev, state->buf_dma)) {
++ dev_err(dev, "unable to map buf\n");
++ state->buf_dma = 0;
++ return -ENOMEM;
+ }
+
-+ return err;
++ dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
++
++ return 0;
+}
+
-+static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
++/* Map state->caam_ctx, and add it to link table */
++static inline int ctx_map_to_qm_sg(struct device *dev,
++ struct caam_hash_state *state, int ctx_len,
++ struct dpaa2_sg_entry *qm_sg, u32 flag)
+{
-+ struct device *dev = priv->dev;
-+
++ state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
++ if (dma_mapping_error(dev, state->ctx_dma)) {
++ dev_err(dev, "unable to map ctx\n");
++ state->ctx_dma = 0;
++ return -ENOMEM;
++ }
++
++ dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
++
++ return 0;
++}
++
++static int ahash_set_sh_desc(struct crypto_ahash *ahash)
++{
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
++ struct caam_flc *flc;
++ u32 *desc;
++
++ ctx->adata.key_virt = ctx->key;
++ ctx->adata.key_inline = true;
++
++ /* ahash_update shared descriptor */
++ flc = &ctx->flc[UPDATE];
++ desc = flc->sh_desc;
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
++ ctx->ctx_len, true, priv->sec_attr.era);
++ flc->flc[1] = desc_len(desc); /* SDL */
++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
++ desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ahash update shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ /* ahash_update_first shared descriptor */
++ flc = &ctx->flc[UPDATE_FIRST];
++ desc = flc->sh_desc;
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
++ ctx->ctx_len, false, priv->sec_attr.era);
++ flc->flc[1] = desc_len(desc); /* SDL */
++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
++ desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ahash update first shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ /* ahash_final shared descriptor */
++ flc = &ctx->flc[FINALIZE];
++ desc = flc->sh_desc;
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
++ ctx->ctx_len, true, priv->sec_attr.era);
++ flc->flc[1] = desc_len(desc); /* SDL */
++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
++ desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ahash final shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ /* ahash_digest shared descriptor */
++ flc = &ctx->flc[DIGEST];
++ desc = flc->sh_desc;
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
++ ctx->ctx_len, false, priv->sec_attr.era);
++ flc->flc[1] = desc_len(desc); /* SDL */
++ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
++ desc_bytes(desc), DMA_BIDIRECTIONAL);
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "ahash digest shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ return 0;
++}
++
++/* Digest hash size if it is too large */
++static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
++ u32 *keylen, u8 *key_out, u32 digestsize)
++{
++ struct caam_request *req_ctx;
++ u32 *desc;
++ struct split_key_sh_result result;
++ dma_addr_t src_dma, dst_dma;
++ struct caam_flc *flc;
++ dma_addr_t flc_dma;
++ int ret = -ENOMEM;
++ struct dpaa2_fl_entry *in_fle, *out_fle;
++
++ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
++ if (!req_ctx)
++ return -ENOMEM;
++
++ in_fle = &req_ctx->fd_flt[1];
++ out_fle = &req_ctx->fd_flt[0];
++
++ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
++ if (!flc)
++ goto err_flc;
++
++ src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, src_dma)) {
++ dev_err(ctx->dev, "unable to map key input memory\n");
++ goto err_src_dma;
++ }
++ dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, dst_dma)) {
++ dev_err(ctx->dev, "unable to map key output memory\n");
++ goto err_dst_dma;
++ }
++
++ desc = flc->sh_desc;
++
++ init_sh_desc(desc, 0);
++
++ /* descriptor to perform unkeyed hash on key_in */
++ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
++ OP_ALG_AS_INITFINAL);
++ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
++ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
++ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++ flc->flc[1] = desc_len(desc); /* SDL */
++ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
++ desc_bytes(desc), DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, flc_dma)) {
++ dev_err(ctx->dev, "unable to map shared descriptor\n");
++ goto err_flc_dma;
++ }
++
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, src_dma);
++ dpaa2_fl_set_len(in_fle, *keylen);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "key_in@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
++ print_hex_dump(KERN_ERR, "shdesc@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
++#endif
++
++ result.err = 0;
++ init_completion(&result.completion);
++ result.dev = ctx->dev;
++
++ req_ctx->flc = flc;
++ req_ctx->flc_dma = flc_dma;
++ req_ctx->cbk = split_key_sh_done;
++ req_ctx->ctx = &result;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS) {
++ /* in progress */
++ wait_for_completion(&result.completion);
++ ret = result.err;
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR,
++ "digested key@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, key_in, digestsize,
++ 1);
++#endif
++ }
++
++ dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
++ DMA_TO_DEVICE);
++err_flc_dma:
++ dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
++err_dst_dma:
++ dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
++err_src_dma:
++ kfree(flc);
++err_flc:
++ kfree(req_ctx);
++
++ *keylen = digestsize;
++
++ return ret;
++}
++
++static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
++ unsigned int keylen)
++{
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
++ unsigned int digestsize = crypto_ahash_digestsize(ahash);
++ int ret;
++ u8 *hashed_key = NULL;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
++#endif
++
++ if (keylen > blocksize) {
++ hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
++ GFP_KERNEL | GFP_DMA);
++ if (!hashed_key)
++ return -ENOMEM;
++ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
++ digestsize);
++ if (ret)
++ goto bad_free_key;
++ key = hashed_key;
++ }
++
++ ctx->adata.keylen = keylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
++ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
++ goto bad_free_key;
++
++ memcpy(ctx->key, key, keylen);
++
++ kfree(hashed_key);
++ return ahash_set_sh_desc(ahash);
++bad_free_key:
++ kfree(hashed_key);
++ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ return -EINVAL;
++}
++
++static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
++ struct ahash_request *req, int dst_len)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ if (edesc->src_nents)
++ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
++ if (edesc->dst_dma)
++ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
++
++ if (edesc->qm_sg_bytes)
++ dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
++ DMA_TO_DEVICE);
++
++ if (state->buf_dma) {
++ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
++ DMA_TO_DEVICE);
++ state->buf_dma = 0;
++ }
++}
++
++static inline void ahash_unmap_ctx(struct device *dev,
++ struct ahash_edesc *edesc,
++ struct ahash_request *req, int dst_len,
++ u32 flag)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ if (state->ctx_dma) {
++ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
++ state->ctx_dma = 0;
++ }
++ ahash_unmap(dev, edesc, req, dst_len);
++}
++
++static void ahash_done(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct ahash_request *req = ahash_request_cast(areq);
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct ahash_edesc *edesc = state->caam_req.edesc;
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int digestsize = crypto_ahash_digestsize(ahash);
++ int ecode = 0;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ ahash_unmap(ctx->dev, edesc, req, digestsize);
++ qi_cache_free(edesc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++ ctx->ctx_len, 1);
++ if (req->result)
++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++ digestsize, 1);
++#endif
++
++ req->base.complete(&req->base, ecode);
++}
++
++static void ahash_done_bi(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct ahash_request *req = ahash_request_cast(areq);
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct ahash_edesc *edesc = state->caam_req.edesc;
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int ecode = 0;
++#ifdef DEBUG
++ int digestsize = crypto_ahash_digestsize(ahash);
++
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
++ switch_buf(state);
++ qi_cache_free(edesc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++ ctx->ctx_len, 1);
++ if (req->result)
++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++ digestsize, 1);
++#endif
++
++ req->base.complete(&req->base, ecode);
++}
++
++static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct ahash_request *req = ahash_request_cast(areq);
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct ahash_edesc *edesc = state->caam_req.edesc;
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int digestsize = crypto_ahash_digestsize(ahash);
++ int ecode = 0;
++
++#ifdef DEBUG
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++ ctx->ctx_len, 1);
++ if (req->result)
++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++ digestsize, 1);
++#endif
++
++ req->base.complete(&req->base, ecode);
++}
++
++static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
++{
++ struct crypto_async_request *areq = cbk_ctx;
++ struct ahash_request *req = ahash_request_cast(areq);
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct ahash_edesc *edesc = state->caam_req.edesc;
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ int ecode = 0;
++#ifdef DEBUG
++ int digestsize = crypto_ahash_digestsize(ahash);
++
++ dev_err(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
++#endif
++
++ if (unlikely(status)) {
++ caam_qi2_strstatus(ctx->dev, status);
++ ecode = -EIO;
++ }
++
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
++ switch_buf(state);
++ qi_cache_free(edesc);
++
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "ctx@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
++ ctx->ctx_len, 1);
++ if (req->result)
++ print_hex_dump(KERN_ERR, "result@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
++ digestsize, 1);
++#endif
++
++ req->base.complete(&req->base, ecode);
++}
++
++static int ahash_update_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *buf = current_buf(state);
++ int *buflen = current_buflen(state);
++ u8 *next_buf = alt_buf(state);
++ int *next_buflen = alt_buflen(state), last_buflen;
++ int in_len = *buflen + req->nbytes, to_hash;
++ int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
++ struct ahash_edesc *edesc;
++ int ret = 0;
++
++ last_buflen = *next_buflen;
++ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
++ to_hash = in_len - *next_buflen;
++
++ if (to_hash) {
++ struct dpaa2_sg_entry *sg_table;
++
++ src_nents = sg_nents_for_len(req->src,
++ req->nbytes - (*next_buflen));
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to DMA map source\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ qm_sg_src_index = 1 + (*buflen ? 1 : 0);
++ qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
++ sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
++ DMA_BIDIRECTIONAL);
++ if (ret)
++ goto unmap_ctx;
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
++ if (ret)
++ goto unmap_ctx;
++
++ if (mapped_nents) {
++ sg_to_qm_sg_last(req->src, mapped_nents,
++ sg_table + qm_sg_src_index, 0);
++ if (*next_buflen)
++ scatterwalk_map_and_copy(next_buf, req->src,
++ to_hash - *buflen,
++ *next_buflen, 0);
++ } else {
++ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
++ true);
++ }
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++ qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
++ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
++
++ req_ctx->flc = &ctx->flc[UPDATE];
++ req_ctx->flc_dma = ctx->flc_dma[UPDATE];
++ req_ctx->cbk = ahash_done_bi;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY &&
++ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ goto unmap_ctx;
++ } else if (*next_buflen) {
++ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
++ req->nbytes, 0);
++ *buflen = *next_buflen;
++ *next_buflen = last_buflen;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
++ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
++ *next_buflen, 1);
++#endif
++
++ return ret;
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_final_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int buflen = *current_buflen(state);
++ int qm_sg_bytes, qm_sg_src_index;
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct ahash_edesc *edesc;
++ struct dpaa2_sg_entry *sg_table;
++ int ret;
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc)
++ return -ENOMEM;
++
++ qm_sg_src_index = 1 + (buflen ? 1 : 0);
++ qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
++ DMA_TO_DEVICE);
++ if (ret)
++ goto unmap_ctx;
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
++ if (ret)
++ goto unmap_ctx;
++
++ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[FINALIZE];
++ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
++ req_ctx->cbk = ahash_done_ctx_src;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS ||
++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ return ret;
++
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_finup_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int buflen = *current_buflen(state);
++ int qm_sg_bytes, qm_sg_src_index;
++ int src_nents, mapped_nents;
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct ahash_edesc *edesc;
++ struct dpaa2_sg_entry *sg_table;
++ int ret;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to DMA map source\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ qm_sg_src_index = 1 + (buflen ? 1 : 0);
++ qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
++ DMA_TO_DEVICE);
++ if (ret)
++ goto unmap_ctx;
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
++ if (ret)
++ goto unmap_ctx;
++
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[FINALIZE];
++ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
++ req_ctx->cbk = ahash_done_ctx_src;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS ||
++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ return ret;
++
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_digest(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int digestsize = crypto_ahash_digestsize(ahash);
++ int src_nents, mapped_nents;
++ struct ahash_edesc *edesc;
++ int ret = -ENOMEM;
++
++ state->buf_dma = 0;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to map source for DMA\n");
++ return ret;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
++ return ret;
++ }
++
++ edesc->src_nents = src_nents;
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++
++ if (mapped_nents > 1) {
++ int qm_sg_bytes;
++ struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
++
++ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++ qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ goto unmap;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ } else {
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
++ }
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ goto unmap;
++ }
++
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_len(in_fle, req->nbytes);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[DIGEST];
++ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
++ req_ctx->cbk = ahash_done;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS ||
++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ return ret;
++
++unmap:
++ ahash_unmap(ctx->dev, edesc, req, digestsize);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_final_no_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *buf = current_buf(state);
++ int buflen = *current_buflen(state);
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct ahash_edesc *edesc;
++ int ret = -ENOMEM;
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc)
++ return ret;
++
++ state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
++ dev_err(ctx->dev, "unable to map src\n");
++ goto unmap;
++ }
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ goto unmap;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, state->buf_dma);
++ dpaa2_fl_set_len(in_fle, buflen);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[DIGEST];
++ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
++ req_ctx->cbk = ahash_done;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret == -EINPROGRESS ||
++ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ return ret;
++
++unmap:
++ ahash_unmap(ctx->dev, edesc, req, digestsize);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_update_no_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *buf = current_buf(state);
++ int *buflen = current_buflen(state);
++ u8 *next_buf = alt_buf(state);
++ int *next_buflen = alt_buflen(state);
++ int in_len = *buflen + req->nbytes, to_hash;
++ int qm_sg_bytes, src_nents, mapped_nents;
++ struct ahash_edesc *edesc;
++ int ret = 0;
++
++ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
++ to_hash = in_len - *next_buflen;
++
++ if (to_hash) {
++ struct dpaa2_sg_entry *sg_table;
++
++ src_nents = sg_nents_for_len(req->src,
++ req->nbytes - *next_buflen);
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to DMA map source\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
++ if (ret)
++ goto unmap_ctx;
++
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
++
++ if (*next_buflen)
++ scatterwalk_map_and_copy(next_buf, req->src,
++ to_hash - *buflen,
++ *next_buflen, 0);
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++ qm_sg_bytes, DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
++ ctx->ctx_len, DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
++ dev_err(ctx->dev, "unable to map ctx\n");
++ state->ctx_dma = 0;
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, to_hash);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
++ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
++
++ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
++ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
++ req_ctx->cbk = ahash_done_ctx_dst;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY &&
++ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ goto unmap_ctx;
++
++ state->update = ahash_update_ctx;
++ state->finup = ahash_finup_ctx;
++ state->final = ahash_final_ctx;
++ } else if (*next_buflen) {
++ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
++ req->nbytes, 0);
++ *buflen = *next_buflen;
++ *next_buflen = 0;
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
++ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, next_buf,
++ *next_buflen, 1);
++#endif
++
++ return ret;
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_finup_no_ctx(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ int buflen = *current_buflen(state);
++ int qm_sg_bytes, src_nents, mapped_nents;
++ int digestsize = crypto_ahash_digestsize(ahash);
++ struct ahash_edesc *edesc;
++ struct dpaa2_sg_entry *sg_table;
++ int ret;
++
++ src_nents = sg_nents_for_len(req->src, req->nbytes);
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to DMA map source\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
++ sg_table = &edesc->sgt[0];
++
++ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
++ if (ret)
++ goto unmap;
++
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
++
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++
++ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
++ dev_err(ctx->dev, "unable to map dst\n");
++ edesc->dst_dma = 0;
++ ret = -ENOMEM;
++ goto unmap;
++ }
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
++ dpaa2_fl_set_len(out_fle, digestsize);
++
++ req_ctx->flc = &ctx->flc[DIGEST];
++ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
++ req_ctx->cbk = ahash_done;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ goto unmap;
++
++ return ret;
++unmap:
++ ahash_unmap(ctx->dev, edesc, req, digestsize);
++ qi_cache_free(edesc);
++ return -ENOMEM;
++}
++
++static int ahash_update_first(struct ahash_request *req)
++{
++ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
++ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_request *req_ctx = &state->caam_req;
++ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
++ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
++ GFP_KERNEL : GFP_ATOMIC;
++ u8 *next_buf = alt_buf(state);
++ int *next_buflen = alt_buflen(state);
++ int to_hash;
++ int src_nents, mapped_nents;
++ struct ahash_edesc *edesc;
++ int ret = 0;
++
++ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
++ 1);
++ to_hash = req->nbytes - *next_buflen;
++
++ if (to_hash) {
++ struct dpaa2_sg_entry *sg_table;
++
++ src_nents = sg_nents_for_len(req->src,
++ req->nbytes - (*next_buflen));
++ if (src_nents < 0) {
++ dev_err(ctx->dev, "Invalid number of src SG.\n");
++ return src_nents;
++ }
++
++ if (src_nents) {
++ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ if (!mapped_nents) {
++ dev_err(ctx->dev, "unable to map source for DMA\n");
++ return -ENOMEM;
++ }
++ } else {
++ mapped_nents = 0;
++ }
++
++ /* allocate space for base edesc and link tables */
++ edesc = qi_cache_zalloc(GFP_DMA | flags);
++ if (!edesc) {
++ dma_unmap_sg(ctx->dev, req->src, src_nents,
++ DMA_TO_DEVICE);
++ return -ENOMEM;
++ }
++
++ edesc->src_nents = src_nents;
++ sg_table = &edesc->sgt[0];
++
++ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
++ dpaa2_fl_set_final(in_fle, true);
++ dpaa2_fl_set_len(in_fle, to_hash);
++
++ if (mapped_nents > 1) {
++ int qm_sg_bytes;
++
++ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
++ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
++ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
++ qm_sg_bytes,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
++ dev_err(ctx->dev, "unable to map S/G table\n");
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++ edesc->qm_sg_bytes = qm_sg_bytes;
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
++ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
++ } else {
++ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
++ }
++
++ if (*next_buflen)
++ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
++ *next_buflen, 0);
++
++ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
++ ctx->ctx_len, DMA_FROM_DEVICE);
++ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
++ dev_err(ctx->dev, "unable to map ctx\n");
++ state->ctx_dma = 0;
++ ret = -ENOMEM;
++ goto unmap_ctx;
++ }
++
++ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
++ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
++ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
++
++ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
++ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
++ req_ctx->cbk = ahash_done_ctx_dst;
++ req_ctx->ctx = &req->base;
++ req_ctx->edesc = edesc;
++
++ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
++ if (ret != -EINPROGRESS &&
++ !(ret == -EBUSY && req->base.flags &
++ CRYPTO_TFM_REQ_MAY_BACKLOG))
++ goto unmap_ctx;
++
++ state->update = ahash_update_ctx;
++ state->finup = ahash_finup_ctx;
++ state->final = ahash_final_ctx;
++ } else if (*next_buflen) {
++ state->update = ahash_update_no_ctx;
++ state->finup = ahash_finup_no_ctx;
++ state->final = ahash_final_no_ctx;
++ scatterwalk_map_and_copy(next_buf, req->src, 0,
++ req->nbytes, 0);
++ switch_buf(state);
++ }
++#ifdef DEBUG
++ print_hex_dump(KERN_ERR, "next buf@" __stringify(__LINE__)": ",
++ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen, 1);
++#endif
++
++ return ret;
++unmap_ctx:
++ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
++ qi_cache_free(edesc);
++ return ret;
++}
++
++static int ahash_finup_first(struct ahash_request *req)
++{
++ return ahash_digest(req);
++}
++
++static int ahash_init(struct ahash_request *req)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ state->update = ahash_update_first;
++ state->finup = ahash_finup_first;
++ state->final = ahash_final_no_ctx;
++
++ state->ctx_dma = 0;
++ state->current_buf = 0;
++ state->buf_dma = 0;
++ state->buflen_0 = 0;
++ state->buflen_1 = 0;
++
++ return 0;
++}
++
++static int ahash_update(struct ahash_request *req)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ return state->update(req);
++}
++
++static int ahash_finup(struct ahash_request *req)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ return state->finup(req);
++}
++
++static int ahash_final(struct ahash_request *req)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++
++ return state->final(req);
++}
++
++static int ahash_export(struct ahash_request *req, void *out)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ struct caam_export_state *export = out;
++ int len;
++ u8 *buf;
++
++ if (state->current_buf) {
++ buf = state->buf_1;
++ len = state->buflen_1;
++ } else {
++ buf = state->buf_0;
++ len = state->buflen_0;
++ }
++
++ memcpy(export->buf, buf, len);
++ memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
++ export->buflen = len;
++ export->update = state->update;
++ export->final = state->final;
++ export->finup = state->finup;
++
++ return 0;
++}
++
++static int ahash_import(struct ahash_request *req, const void *in)
++{
++ struct caam_hash_state *state = ahash_request_ctx(req);
++ const struct caam_export_state *export = in;
++
++ memset(state, 0, sizeof(*state));
++ memcpy(state->buf_0, export->buf, export->buflen);
++ memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
++ state->buflen_0 = export->buflen;
++ state->update = export->update;
++ state->final = export->final;
++ state->finup = export->finup;
++
++ return 0;
++}
++
++struct caam_hash_template {
++ char name[CRYPTO_MAX_ALG_NAME];
++ char driver_name[CRYPTO_MAX_ALG_NAME];
++ char hmac_name[CRYPTO_MAX_ALG_NAME];
++ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
++ unsigned int blocksize;
++ struct ahash_alg template_ahash;
++ u32 alg_type;
++};
++
++/* ahash descriptors */
++static struct caam_hash_template driver_hash[] = {
++ {
++ .name = "sha1",
++ .driver_name = "sha1-caam-qi2",
++ .hmac_name = "hmac(sha1)",
++ .hmac_driver_name = "hmac-sha1-caam-qi2",
++ .blocksize = SHA1_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA1_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
++ },
++ },
++ .alg_type = OP_ALG_ALGSEL_SHA1,
++ }, {
++ .name = "sha224",
++ .driver_name = "sha224-caam-qi2",
++ .hmac_name = "hmac(sha224)",
++ .hmac_driver_name = "hmac-sha224-caam-qi2",
++ .blocksize = SHA224_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA224_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
++ },
++ },
++ .alg_type = OP_ALG_ALGSEL_SHA224,
++ }, {
++ .name = "sha256",
++ .driver_name = "sha256-caam-qi2",
++ .hmac_name = "hmac(sha256)",
++ .hmac_driver_name = "hmac-sha256-caam-qi2",
++ .blocksize = SHA256_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA256_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
++ },
++ },
++ .alg_type = OP_ALG_ALGSEL_SHA256,
++ }, {
++ .name = "sha384",
++ .driver_name = "sha384-caam-qi2",
++ .hmac_name = "hmac(sha384)",
++ .hmac_driver_name = "hmac-sha384-caam-qi2",
++ .blocksize = SHA384_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA384_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
++ },
++ },
++ .alg_type = OP_ALG_ALGSEL_SHA384,
++ }, {
++ .name = "sha512",
++ .driver_name = "sha512-caam-qi2",
++ .hmac_name = "hmac(sha512)",
++ .hmac_driver_name = "hmac-sha512-caam-qi2",
++ .blocksize = SHA512_BLOCK_SIZE,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = SHA512_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
++ },
++ },
++ .alg_type = OP_ALG_ALGSEL_SHA512,
++ }, {
++ .name = "md5",
++ .driver_name = "md5-caam-qi2",
++ .hmac_name = "hmac(md5)",
++ .hmac_driver_name = "hmac-md5-caam-qi2",
++ .blocksize = MD5_BLOCK_WORDS * 4,
++ .template_ahash = {
++ .init = ahash_init,
++ .update = ahash_update,
++ .final = ahash_final,
++ .finup = ahash_finup,
++ .digest = ahash_digest,
++ .export = ahash_export,
++ .import = ahash_import,
++ .setkey = ahash_setkey,
++ .halg = {
++ .digestsize = MD5_DIGEST_SIZE,
++ .statesize = sizeof(struct caam_export_state),
++ },
++ },
++ .alg_type = OP_ALG_ALGSEL_MD5,
++ }
++};
++
++struct caam_hash_alg {
++ struct list_head entry;
++ struct device *dev;
++ int alg_type;
++ struct ahash_alg ahash_alg;
++};
++
++static int caam_hash_cra_init(struct crypto_tfm *tfm)
++{
++ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
++ struct crypto_alg *base = tfm->__crt_alg;
++ struct hash_alg_common *halg =
++ container_of(base, struct hash_alg_common, base);
++ struct ahash_alg *alg =
++ container_of(halg, struct ahash_alg, halg);
++ struct caam_hash_alg *caam_hash =
++ container_of(alg, struct caam_hash_alg, ahash_alg);
++ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
++ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
++ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
++ HASH_MSG_LEN + 32,
++ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
++ HASH_MSG_LEN + 64,
++ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
++ dma_addr_t dma_addr;
++ int i;
++
++ ctx->dev = caam_hash->dev;
++
++ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
++ DMA_BIDIRECTIONAL,
++ DMA_ATTR_SKIP_CPU_SYNC);
++ if (dma_mapping_error(ctx->dev, dma_addr)) {
++ dev_err(ctx->dev, "unable to map shared descriptors\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < HASH_NUM_OP; i++)
++ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
++
++ /* copy descriptor header template value */
++ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
++
++ ctx->ctx_len = runninglen[(ctx->adata.algtype &
++ OP_ALG_ALGSEL_SUBMASK) >>
++ OP_ALG_ALGSEL_SHIFT];
++
++ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
++ sizeof(struct caam_hash_state));
++
++ return ahash_set_sh_desc(ahash);
++}
++
++static void caam_hash_cra_exit(struct crypto_tfm *tfm)
++{
++ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
++ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
++}
++
++static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
++ struct caam_hash_template *template, bool keyed)
++{
++ struct caam_hash_alg *t_alg;
++ struct ahash_alg *halg;
++ struct crypto_alg *alg;
++
++ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
++ if (!t_alg)
++ return ERR_PTR(-ENOMEM);
++
++ t_alg->ahash_alg = template->template_ahash;
++ halg = &t_alg->ahash_alg;
++ alg = &halg->halg.base;
++
++ if (keyed) {
++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->hmac_name);
++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->hmac_driver_name);
++ } else {
++ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->name);
++ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
++ template->driver_name);
++ }
++ alg->cra_module = THIS_MODULE;
++ alg->cra_init = caam_hash_cra_init;
++ alg->cra_exit = caam_hash_cra_exit;
++ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
++ alg->cra_priority = CAAM_CRA_PRIORITY;
++ alg->cra_blocksize = template->blocksize;
++ alg->cra_alignmask = 0;
++ alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH;
++ alg->cra_type = &crypto_ahash_type;
++
++ t_alg->alg_type = template->alg_type;
++ t_alg->dev = dev;
++
++ return t_alg;
++}
++
++static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
++{
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++
++ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
++ napi_schedule_irqoff(&ppriv->napi);
++}
++
++static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
++{
++ struct device *dev = priv->dev;
++ struct dpaa2_io_notification_ctx *nctx;
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int err, i = 0, cpu;
++
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++ ppriv->priv = priv;
++ nctx = &ppriv->nctx;
++ nctx->is_cdan = 0;
++ nctx->id = ppriv->rsp_fqid;
++ nctx->desired_cpu = cpu;
++ nctx->cb = dpaa2_caam_fqdan_cb;
++
++ /* Register notification callbacks */
++ err = dpaa2_io_service_register(NULL, nctx);
++ if (unlikely(err)) {
++ dev_err(dev, "notification register failed\n");
++ nctx->cb = NULL;
++ goto err;
++ }
++
++ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
++ dev);
++ if (unlikely(!ppriv->store)) {
++ dev_err(dev, "dpaa2_io_store_create() failed\n");
++ goto err;
++ }
++
++ if (++i == priv->num_pairs)
++ break;
++ }
++
++ return 0;
++
++err:
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++ if (!ppriv->nctx.cb)
++ break;
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ }
++
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++ if (!ppriv->store)
++ break;
++ dpaa2_io_store_destroy(ppriv->store);
++ }
++
++ return err;
++}
++
++static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
++{
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int i = 0, cpu;
++
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
++ dpaa2_io_store_destroy(ppriv->store);
++
++ if (++i == priv->num_pairs)
++ return;
++ }
++}
++
++static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
++{
++ struct dpseci_rx_queue_cfg rx_queue_cfg;
++ struct device *dev = priv->dev;
++ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
++ struct dpaa2_caam_priv_per_cpu *ppriv;
++ int err = 0, i = 0, cpu;
++
++ /* Configure Rx queues */
++ for_each_online_cpu(cpu) {
++ ppriv = per_cpu_ptr(priv->ppriv, cpu);
++
++ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
++ DPSECI_QUEUE_OPT_USER_CTX;
++ rx_queue_cfg.order_preservation_en = 0;
++ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
++ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
++ /*
++ * Rx priority (WQ) doesn't really matter, since we use
++ * pull mode, i.e. volatile dequeues from specific FQs
++ */
++ rx_queue_cfg.dest_cfg.priority = 0;
++ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
++
++ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
++ &rx_queue_cfg);
++ if (err) {
++ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
++ err);
++ return err;
++ }
++
++ if (++i == priv->num_pairs)
++ break;
++ }
++
++ return err;
++}
++
++static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
++{
++ struct device *dev = priv->dev;
++
+ if (!priv->cscn_mem)
+ return;
+
+}
+
+static struct list_head alg_list;
++static struct list_head hash_list;
+
+static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
+{
+ if (registered)
+ dev_info(dev, "algorithms registered in /proc/crypto\n");
+
++ /* register hash algorithms the device supports */
++ INIT_LIST_HEAD(&hash_list);
++
++ /*
++ * Skip registration of any hashing algorithms if MD block
++ * is not present.
++ */
++ if (!priv->sec_attr.md_acc_num)
++ return 0;
++
++ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
++ struct caam_hash_alg *t_alg;
++ struct caam_hash_template *alg = driver_hash + i;
++
++ /* register hmac version */
++ t_alg = caam_hash_alloc(dev, alg, true);
++ if (IS_ERR(t_alg)) {
++ err = PTR_ERR(t_alg);
++ dev_warn(dev, "%s hash alg allocation failed: %d\n",
++ alg->driver_name, err);
++ continue;
++ }
++
++ err = crypto_register_ahash(&t_alg->ahash_alg);
++ if (err) {
++ dev_warn(dev, "%s alg registration failed: %d\n",
++ t_alg->ahash_alg.halg.base.cra_driver_name,
++ err);
++ kfree(t_alg);
++ } else {
++ list_add_tail(&t_alg->entry, &hash_list);
++ }
++
++ /* register unkeyed version */
++ t_alg = caam_hash_alloc(dev, alg, false);
++ if (IS_ERR(t_alg)) {
++ err = PTR_ERR(t_alg);
++ dev_warn(dev, "%s alg allocation failed: %d\n",
++ alg->driver_name, err);
++ continue;
++ }
++
++ err = crypto_register_ahash(&t_alg->ahash_alg);
++ if (err) {
++ dev_warn(dev, "%s alg registration failed: %d\n",
++ t_alg->ahash_alg.halg.base.cra_driver_name,
++ err);
++ kfree(t_alg);
++ } else {
++ list_add_tail(&t_alg->entry, &hash_list);
++ }
++ }
++ if (!list_empty(&hash_list))
++ dev_info(dev, "hash algorithms registered in /proc/crypto\n");
++
+ return err;
+
+err_bind:
+ }
+ }
+
++ if (hash_list.next) {
++ struct caam_hash_alg *t_hash_alg, *p;
++
++ list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
++ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
++ list_del(&t_hash_alg->entry);
++ kfree(t_hash_alg);
++ }
++ }
++
+ dpaa2_dpseci_disable(priv);
+ dpaa2_dpseci_dpio_free(priv);
+ dpaa2_dpseci_free(priv);
+ }
+ }
+
-+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc->flc_dma);
++ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
+
+ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
+ dpaa2_fd_set_len(&fd, req->fd_flt[1].len);
-+ dpaa2_fd_set_flc(&fd, req->flc->flc_dma);
++ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
+ /*
+ * There is no guarantee that preemption is disabled here,
+module_fsl_mc_driver(dpaa2_caam_driver);
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
-@@ -0,0 +1,265 @@
+@@ -0,0 +1,281 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017 NXP
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @sgt: the h/w link table
+ */
-+struct ablkcipher_edesc {
++struct ablkcipher_edesc {
++ int src_nents;
++ int dst_nents;
++ dma_addr_t iv_dma;
++ int qm_sg_bytes;
++ dma_addr_t qm_sg_dma;
++#define CAAM_QI_MAX_ABLKCIPHER_SG \
++ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
++ sizeof(struct dpaa2_sg_entry))
++ struct dpaa2_sg_entry sgt[0];
++};
++
++/*
++ * ahash_edesc - s/w-extended ahash descriptor
++ * @dst_dma: I/O virtual address of req->result
++ * @qm_sg_dma: I/O virtual address of h/w link table
++ * @src_nents: number of segments in input scatterlist
++ * @qm_sg_bytes: length of dma mapped qm_sg space
++ * @sgt: pointer to h/w link table
++ */
++struct ahash_edesc {
++ dma_addr_t dst_dma;
++ dma_addr_t qm_sg_dma;
+ int src_nents;
-+ int dst_nents;
-+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
-+ dma_addr_t qm_sg_dma;
-+#define CAAM_QI_MAX_ABLKCIPHER_SG \
-+ ((CAAM_QI_MEMCACHE_SIZE - offsetof(struct ablkcipher_edesc, sgt)) / \
-+ sizeof(struct dpaa2_sg_entry))
+ struct dpaa2_sg_entry sgt[0];
+};
+
+ * caam_flc - Flow Context (FLC)
+ * @flc: Flow Context options
+ * @sh_desc: Shared Descriptor
-+ * @flc_dma: DMA address of the Flow Context
+ */
+struct caam_flc {
+ u32 flc[16];
+ u32 sh_desc[MAX_SDLEN];
-+ dma_addr_t flc_dma;
+} ____cacheline_aligned;
+
+enum optype {
+ * fd_flt[1] - FLE pointing to input buffer
+ * @fd_flt_dma: DMA address for the frame list table
+ * @flc: Flow Context
++ * @flc_dma: I/O virtual address of Flow Context
+ * @op_type: operation type
+ * @cbk: Callback function to invoke when job is completed
+ * @ctx: arbit context attached with request by the application
+ struct dpaa2_fl_entry fd_flt[2];
+ dma_addr_t fd_flt_dma;
+ struct caam_flc *flc;
++ dma_addr_t flc_dma;
+ enum optype op_type;
+ void (*cbk)(void *ctx, u32 err);
+ void *ctx;
+#endif /* _CAAMALG_QI2_H_ */
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
-@@ -72,7 +72,7 @@
+@@ -62,6 +62,7 @@
+ #include "error.h"
+ #include "sg_sw_sec4.h"
+ #include "key_gen.h"
++#include "caamhash_desc.h"
+
+ #define CAAM_CRA_PRIORITY 3000
+
+@@ -71,14 +72,6 @@
+ #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
- /* length of descriptors text */
+-/* length of descriptors text */
-#define DESC_AHASH_BASE (4 * CAAM_CMD_SZ)
-+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
- #define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
- #define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
- #define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-@@ -103,20 +103,14 @@ struct caam_hash_ctx {
+-#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+-#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+-#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+-#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+-#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+-
+ #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
+ CAAM_MAX_HASH_KEY_SIZE)
+ #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
+@@ -103,20 +96,14 @@ struct caam_hash_ctx {
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
};
/* ahash state */
-@@ -143,6 +137,31 @@ struct caam_export_state {
+@@ -143,6 +130,31 @@ struct caam_export_state {
int (*finup)(struct ahash_request *req);
};
/* Common job descriptor seq in/out ptr routines */
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
-@@ -175,36 +194,27 @@ static inline dma_addr_t map_seq_out_ptr
+@@ -175,40 +187,31 @@ static inline dma_addr_t map_seq_out_ptr
return dst_dma;
}
}
/* Map state->caam_ctx, and add it to link table */
-@@ -224,89 +234,54 @@ static inline int ctx_map_to_sec4_sg(u32
+-static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
++static inline int ctx_map_to_sec4_sg(struct device *jrdev,
+ struct caam_hash_state *state, int ctx_len,
+ struct sec4_sg_entry *sec4_sg, u32 flag)
+ {
+@@ -224,124 +227,22 @@ static inline int ctx_map_to_sec4_sg(u32
return 0;
}
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
- /*
+-/*
- * For ahash read data from seqin following state->caam_ctx,
- * and write resulting class2 context to seqout, which may be state->caam_ctx
- * or req->result
-+ * For ahash update, final and finup (import_ctx = true)
-+ * import context, read and write to seqout
-+ * For ahash firsts and digest (import_ctx = false)
-+ * read and write to seqout
- */
+- */
-static inline void ahash_append_load_str(u32 *desc, int digestsize)
-+static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
-+ struct caam_hash_ctx *ctx, bool import_ctx)
- {
+-{
- /* Calculate remaining bytes to read */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-+ u32 op = ctx->adata.algtype;
-+ u32 *skip_key_load;
-
+-
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
-+ init_sh_desc(desc, HDR_SHARE_SERIAL);
-
+-
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
-+ /* Append key if it has been set; ahash update excluded */
-+ if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
-+ /* Skip key loading if already shared */
-+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-+ JUMP_COND_SHRD);
-
+-
-/*
- * For ahash update, final and finup, import context, read and write to seqout
- */
- /* Import context from software */
- append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_2_CCB | ctx->ctx_len);
-+ append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
-+ ctx->adata.keylen, CLASS_2 |
-+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
-
+-
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
-+ set_jump_tgt_here(desc, skip_key_load);
-
+-
- /*
- * Load from buf and/or src and write to req->result or state->context
- */
- ahash_append_load_str(desc, digestsize);
-}
-+ op |= OP_ALG_AAI_HMAC_PRECOMP;
-+ }
-
+-
-/* For ahash firsts and digest, read and write to seqout */
-static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state,
- int digestsize, struct caam_hash_ctx *ctx)
-{
- init_sh_desc_key_ahash(desc, ctx);
-+ /* If needed, import context from software */
-+ if (import_ctx)
-+ append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT);
-
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
-
- /*
- * Load from buf and/or src and write to req->result or state->context
-+ * Calculate remaining bytes to read
- */
+-
+- /* Class 2 operation */
+- append_operation(desc, op | state | OP_ALG_ENCRYPT);
+-
+- /*
+- * Load from buf and/or src and write to req->result or state->context
+- */
- ahash_append_load_str(desc, digestsize);
-+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
-+ /* Read remaining bytes */
-+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
-+ FIFOLD_TYPE_MSG | KEY_VLF);
-+ /* Store class2 context bytes */
-+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
-+ LDST_SRCDST_BYTE_CONTEXT);
- }
-
+-}
+-
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
-@@ -314,34 +289,13 @@ static int ahash_set_sh_desc(struct cryp
+ {
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
int digestsize = crypto_ahash_digestsize(ahash);
struct device *jrdev = ctx->jrdev;
- u32 have_key = 0;
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
- if (ctx->split_key_len)
- have_key = OP_ALG_AAI_HMAC_PRECOMP;
--
++ ctx->adata.key_virt = ctx->key;
+
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
-
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-+ ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true);
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
++ ctx->ctx_len, true, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update shdesc@"__stringify(__LINE__)": ",
-@@ -350,17 +304,9 @@ static int ahash_set_sh_desc(struct cryp
+@@ -350,17 +251,10 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-+ ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false);
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
++ ctx->ctx_len, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash update first shdesc@"__stringify(__LINE__)": ",
-@@ -369,53 +315,20 @@ static int ahash_set_sh_desc(struct cryp
+@@ -369,53 +263,22 @@ static int ahash_set_sh_desc(struct cryp
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-+ ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true);
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
++ ctx->ctx_len, true, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
#ifdef DEBUG
- dev_err(jrdev, "unable to map shared descriptor\n");
- return -ENOMEM;
- }
-+ ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false);
++ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
++ ctx->ctx_len, false, ctrlpriv->era);
+ dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
+ desc_bytes(desc), DMA_TO_DEVICE);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
"ahash digest shdesc@"__stringify(__LINE__)": ",
-@@ -426,14 +339,6 @@ static int ahash_set_sh_desc(struct cryp
+@@ -426,14 +289,6 @@ static int ahash_set_sh_desc(struct cryp
return 0;
}
/* Digest hash size if it is too large */
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
u32 *keylen, u8 *key_out, u32 digestsize)
-@@ -469,7 +374,7 @@ static int hash_digest_key(struct caam_h
+@@ -469,7 +324,7 @@ static int hash_digest_key(struct caam_h
}
/* Job descriptor to perform unkeyed hash on key_in */
OP_ALG_AS_INITFINAL);
append_seq_in_ptr(desc, src_dma, *keylen, 0);
append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
-@@ -513,10 +418,7 @@ static int hash_digest_key(struct caam_h
+@@ -513,12 +368,10 @@ static int hash_digest_key(struct caam_h
static int ahash_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- struct device *jrdev = ctx->jrdev;
int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
int digestsize = crypto_ahash_digestsize(ahash);
++ struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
int ret;
-@@ -539,43 +441,19 @@ static int ahash_setkey(struct crypto_ah
+ u8 *hashed_key = NULL;
+
+@@ -539,43 +392,29 @@ static int ahash_setkey(struct crypto_ah
key = hashed_key;
}
- print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
--
++ /*
++ * If DKP is supported, use it in the shared descriptor to generate
++ * the split key.
++ */
++ if (ctrlpriv->era >= 6) {
++ ctx->adata.key_inline = true;
++ ctx->adata.keylen = keylen;
++ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
++ OP_ALG_ALGSEL_MASK);
+
- ret = gen_split_hash_key(ctx, key, keylen);
-+ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen,
-+ CAAM_MAX_HASH_KEY_SIZE);
- if (ret)
- goto bad_free_key;
+- if (ret)
+- goto bad_free_key;
++ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
++ goto bad_free_key;
- ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len,
- DMA_TO_DEVICE);
- dev_err(jrdev, "unable to map key i/o memory\n");
- ret = -ENOMEM;
- goto error_free_key;
-- }
- #ifdef DEBUG
- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
++ memcpy(ctx->key, key, keylen);
++ } else {
++ ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key,
++ keylen, CAAM_MAX_HASH_KEY_SIZE);
++ if (ret)
++ goto bad_free_key;
+ }
+-#ifdef DEBUG
+- print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+- DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
- ctx->split_key_pad_len, 1);
-+ ctx->adata.keylen_pad, 1);
- #endif
+-#endif
- ret = ahash_set_sh_desc(ahash);
- if (ret) {
bad_free_key:
kfree(hashed_key);
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
-@@ -604,6 +482,8 @@ static inline void ahash_unmap(struct de
+@@ -604,6 +443,8 @@ static inline void ahash_unmap(struct de
struct ahash_edesc *edesc,
struct ahash_request *req, int dst_len)
{
if (edesc->src_nents)
dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
if (edesc->dst_dma)
-@@ -612,6 +492,12 @@ static inline void ahash_unmap(struct de
+@@ -612,6 +453,12 @@ static inline void ahash_unmap(struct de
if (edesc->sec4_sg_bytes)
dma_unmap_single(dev, edesc->sec4_sg_dma,
edesc->sec4_sg_bytes, DMA_TO_DEVICE);
}
static inline void ahash_unmap_ctx(struct device *dev,
-@@ -643,8 +529,7 @@ static void ahash_done(struct device *jr
+@@ -643,8 +490,7 @@ static void ahash_done(struct device *jr
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -671,19 +556,19 @@ static void ahash_done_bi(struct device
+@@ -671,19 +517,19 @@ static void ahash_done_bi(struct device
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
kfree(edesc);
#ifdef DEBUG
-@@ -713,8 +598,7 @@ static void ahash_done_ctx_src(struct de
+@@ -713,8 +559,7 @@ static void ahash_done_ctx_src(struct de
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
if (err)
caam_jr_strstatus(jrdev, err);
-@@ -741,19 +625,19 @@ static void ahash_done_ctx_dst(struct de
+@@ -741,19 +586,19 @@ static void ahash_done_ctx_dst(struct de
struct ahash_edesc *edesc;
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
kfree(edesc);
#ifdef DEBUG
-@@ -835,13 +719,12 @@ static int ahash_update_ctx(struct ahash
+@@ -835,13 +680,12 @@ static int ahash_update_ctx(struct ahash
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
int in_len = *buflen + req->nbytes, to_hash;
u32 *desc;
int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index;
-@@ -895,10 +778,9 @@ static int ahash_update_ctx(struct ahash
+@@ -890,15 +734,14 @@ static int ahash_update_ctx(struct ahash
+ edesc->src_nents = src_nents;
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+
+- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_BIDIRECTIONAL);
if (ret)
goto unmap_ctx;
if (mapped_nents) {
sg_to_sec4_sg_last(req->src, mapped_nents,
-@@ -909,12 +791,10 @@ static int ahash_update_ctx(struct ahash
+@@ -909,12 +752,10 @@ static int ahash_update_ctx(struct ahash
to_hash - *buflen,
*next_buflen, 0);
} else {
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-@@ -969,12 +849,9 @@ static int ahash_final_ctx(struct ahash_
+@@ -969,12 +810,9 @@ static int ahash_final_ctx(struct ahash_
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index;
int digestsize = crypto_ahash_digestsize(ahash);
-@@ -1001,11 +878,11 @@ static int ahash_final_ctx(struct ahash_
+@@ -994,18 +832,17 @@ static int ahash_final_ctx(struct ahash_
+ desc = edesc->hw_desc;
+
+ edesc->sec4_sg_bytes = sec4_sg_bytes;
+- edesc->src_nents = 0;
+
+- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
sec4_sg_bytes, DMA_TO_DEVICE);
-@@ -1048,12 +925,9 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1048,12 +885,9 @@ static int ahash_finup_ctx(struct ahash_
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int sec4_sg_src_index;
int src_nents, mapped_nents;
-@@ -1082,7 +956,7 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1082,7 +916,7 @@ static int ahash_finup_ctx(struct ahash_
/* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents,
flags);
if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
-@@ -1098,9 +972,9 @@ static int ahash_finup_ctx(struct ahash_
+@@ -1093,14 +927,14 @@ static int ahash_finup_ctx(struct ahash_
+
+ edesc->src_nents = src_nents;
+
+- ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
++ ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len,
+ edesc->sec4_sg, DMA_TO_DEVICE);
if (ret)
goto unmap_ctx;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents,
sec4_sg_src_index, ctx->ctx_len + buflen,
-@@ -1136,15 +1010,18 @@ static int ahash_digest(struct ahash_req
+@@ -1136,15 +970,18 @@ static int ahash_digest(struct ahash_req
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
src_nents = sg_nents_for_len(req->src, req->nbytes);
if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n");
-@@ -1215,10 +1092,10 @@ static int ahash_final_no_ctx(struct aha
+@@ -1215,10 +1052,10 @@ static int ahash_final_no_ctx(struct aha
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash);
struct ahash_edesc *edesc;
-@@ -1276,13 +1153,12 @@ static int ahash_update_no_ctx(struct ah
+@@ -1246,7 +1083,6 @@ static int ahash_final_no_ctx(struct aha
+ dev_err(jrdev, "unable to map dst\n");
+ goto unmap;
+ }
+- edesc->src_nents = 0;
+
+ #ifdef DEBUG
+ print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
+@@ -1276,13 +1112,12 @@ static int ahash_update_no_ctx(struct ah
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
int in_len = *buflen + req->nbytes, to_hash;
int sec4_sg_bytes, src_nents, mapped_nents;
struct ahash_edesc *edesc;
-@@ -1331,8 +1207,10 @@ static int ahash_update_no_ctx(struct ah
+@@ -1329,10 +1164,11 @@ static int ahash_update_no_ctx(struct ah
+
+ edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->dst_dma = 0;
+- edesc->dst_dma = 0;
- state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
- buf, *buflen);
sg_to_sec4_sg_last(req->src, mapped_nents,
edesc->sec4_sg + 1, 0);
-@@ -1342,8 +1220,6 @@ static int ahash_update_no_ctx(struct ah
+@@ -1342,8 +1178,6 @@ static int ahash_update_no_ctx(struct ah
*next_buflen, 0);
}
desc = edesc->hw_desc;
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-@@ -1403,12 +1279,9 @@ static int ahash_finup_no_ctx(struct aha
+@@ -1403,12 +1237,9 @@ static int ahash_finup_no_ctx(struct aha
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
int digestsize = crypto_ahash_digestsize(ahash);
-@@ -1450,9 +1323,9 @@ static int ahash_finup_no_ctx(struct aha
+@@ -1450,9 +1281,9 @@ static int ahash_finup_no_ctx(struct aha
edesc->src_nents = src_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen,
req->nbytes);
-@@ -1496,11 +1369,10 @@ static int ahash_update_first(struct aha
+@@ -1496,11 +1327,10 @@ static int ahash_update_first(struct aha
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev;
int to_hash;
u32 *desc;
int src_nents, mapped_nents;
-@@ -1582,6 +1454,7 @@ static int ahash_update_first(struct aha
+@@ -1545,7 +1375,6 @@ static int ahash_update_first(struct aha
+ }
+
+ edesc->src_nents = src_nents;
+- edesc->dst_dma = 0;
+
+ ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0,
+ to_hash);
+@@ -1582,6 +1411,7 @@ static int ahash_update_first(struct aha
state->final = ahash_final_no_ctx;
scatterwalk_map_and_copy(next_buf, req->src, 0,
req->nbytes, 0);
}
#ifdef DEBUG
print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ",
-@@ -1688,7 +1561,6 @@ struct caam_hash_template {
+@@ -1688,7 +1518,6 @@ struct caam_hash_template {
unsigned int blocksize;
struct ahash_alg template_ahash;
u32 alg_type;
};
/* ahash descriptors */
-@@ -1714,7 +1586,6 @@ static struct caam_hash_template driver_
+@@ -1714,7 +1543,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA1,
}, {
.name = "sha224",
.driver_name = "sha224-caam",
-@@ -1736,7 +1607,6 @@ static struct caam_hash_template driver_
+@@ -1736,7 +1564,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA224,
}, {
.name = "sha256",
.driver_name = "sha256-caam",
-@@ -1758,7 +1628,6 @@ static struct caam_hash_template driver_
+@@ -1758,7 +1585,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA256,
}, {
.name = "sha384",
.driver_name = "sha384-caam",
-@@ -1780,7 +1649,6 @@ static struct caam_hash_template driver_
+@@ -1780,7 +1606,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA384,
}, {
.name = "sha512",
.driver_name = "sha512-caam",
-@@ -1802,7 +1670,6 @@ static struct caam_hash_template driver_
+@@ -1802,7 +1627,6 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_SHA512,
}, {
.name = "md5",
.driver_name = "md5-caam",
-@@ -1824,14 +1691,12 @@ static struct caam_hash_template driver_
+@@ -1824,14 +1648,12 @@ static struct caam_hash_template driver_
},
},
.alg_type = OP_ALG_ALGSEL_MD5,
struct ahash_alg ahash_alg;
};
-@@ -1853,6 +1718,7 @@ static int caam_hash_cra_init(struct cry
+@@ -1853,6 +1675,7 @@ static int caam_hash_cra_init(struct cry
HASH_MSG_LEN + SHA256_DIGEST_SIZE,
HASH_MSG_LEN + 64,
HASH_MSG_LEN + SHA512_DIGEST_SIZE };
/*
* Get a Job ring from Job Ring driver to ensure in-order
-@@ -1863,11 +1729,31 @@ static int caam_hash_cra_init(struct cry
+@@ -1863,11 +1686,31 @@ static int caam_hash_cra_init(struct cry
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(ctx->jrdev);
}
OP_ALG_ALGSEL_SHIFT];
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-@@ -1879,30 +1765,10 @@ static void caam_hash_cra_exit(struct cr
+@@ -1879,30 +1722,10 @@ static void caam_hash_cra_exit(struct cr
{
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
caam_jr_free(ctx->jrdev);
}
-@@ -1961,7 +1827,6 @@ caam_hash_alloc(struct caam_hash_templat
+@@ -1961,7 +1784,6 @@ caam_hash_alloc(struct caam_hash_templat
alg->cra_type = &crypto_ahash_type;
t_alg->alg_type = template->alg_type;
return t_alg;
}
+--- /dev/null
++++ b/drivers/crypto/caam/caamhash_desc.c
+@@ -0,0 +1,108 @@
++/*
++ * Shared descriptors for ahash algorithms
++ *
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "compat.h"
++#include "desc_constr.h"
++#include "caamhash_desc.h"
++
++/**
++ * cnstr_shdsc_ahash - ahash shared descriptor
++ * @desc: pointer to buffer used for descriptor construction
++ * @adata: pointer to authentication transform definitions.
++ * A split key is required for SEC Era < 6; the size of the split key
++ * is specified in this case.
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
++ * SHA256, SHA384, SHA512}.
++ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
++ * @digestsize: algorithm's digest size
++ * @ctx_len: size of Context Register
++ * @import_ctx: true if previous Context Register needs to be restored
++ * must be true for ahash update and final
++ * must be false for for ahash first and digest
++ * @era: SEC Era
++ */
++void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
++ int digestsize, int ctx_len, bool import_ctx, int era)
++{
++ u32 op = adata->algtype;
++
++ init_sh_desc(desc, HDR_SHARE_SERIAL);
++
++ /* Append key if it has been set; ahash update excluded */
++ if (state != OP_ALG_AS_UPDATE && adata->keylen) {
++ u32 *skip_key_load;
++
++ /* Skip key loading if already shared */
++ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
++ JUMP_COND_SHRD);
++
++ if (era < 6)
++ append_key_as_imm(desc, adata->key_virt,
++ adata->keylen_pad,
++ adata->keylen, CLASS_2 |
++ KEY_DEST_MDHA_SPLIT | KEY_ENC);
++ else
++ append_proto_dkp(desc, adata);
++
++ set_jump_tgt_here(desc, skip_key_load);
++
++ op |= OP_ALG_AAI_HMAC_PRECOMP;
++ }
++
++ /* If needed, import context from software */
++ if (import_ctx)
++ append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++
++ /* Class 2 operation */
++ append_operation(desc, op | state | OP_ALG_ENCRYPT);
++
++ /*
++ * Load from buf and/or src and write to req->result or state->context
++ * Calculate remaining bytes to read
++ */
++ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++ /* Read remaining bytes */
++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
++ FIFOLD_TYPE_MSG | KEY_VLF);
++ /* Store class2 context bytes */
++ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
++ LDST_SRCDST_BYTE_CONTEXT);
++}
++EXPORT_SYMBOL(cnstr_shdsc_ahash);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
++MODULE_AUTHOR("NXP Semiconductors");
+--- /dev/null
++++ b/drivers/crypto/caam/caamhash_desc.h
+@@ -0,0 +1,49 @@
++/*
++ * Shared descriptors for ahash algorithms
++ *
++ * Copyright 2017 NXP
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the names of the above-listed copyright holders nor the
++ * names of any contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ *
++ * ALTERNATIVELY, this software may be distributed under the terms of the
++ * GNU General Public License ("GPL") as published by the Free Software
++ * Foundation, either version 2 of that License or (at your option) any
++ * later version.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _CAAMHASH_DESC_H_
++#define _CAAMHASH_DESC_H_
++
++/* length of descriptors text */
++#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
++#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
++#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
++#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
++#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
++
++void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
++ int digestsize, int ctx_len, bool import_ctx, int era);
++
++#endif /* _CAAMHASH_DESC_H_ */
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -18,6 +18,10 @@
/*
* Read the Compile Time paramters and SCFGR to determine
-@@ -590,64 +597,67 @@ static int caam_probe(struct platform_de
+@@ -590,64 +597,69 @@ static int caam_probe(struct platform_de
JRSTART_JR1_START | JRSTART_JR2_START |
JRSTART_JR3_START);
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
- else
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
--
++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
++ } else {
++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
++ }
++ if (ret) {
++ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
++ goto iounmap_ctrl;
++ }
+
- /*
- * Detect and enable JobRs
- * First, find out how many ring spec'ed, allocate references
- if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
- of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
- rspec++;
-+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
-+ } else {
-+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-+ }
-+ if (ret) {
-+ dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
-+ goto iounmap_ctrl;
-+ }
++ ctrlpriv->era = caam_get_era();
- ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
- sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
}
/* If no QI and no rings specified, quit and go home */
-@@ -662,8 +672,10 @@ static int caam_probe(struct platform_de
+@@ -662,8 +674,10 @@ static int caam_probe(struct platform_de
/*
* If SEC has RNG version >= 4 and RNG state handle has not been
* already instantiated, do RNG instantiation
ctrlpriv->rng4_sh_init =
rd_reg32(&ctrl->r4tst[0].rdsta);
/*
-@@ -731,77 +743,46 @@ static int caam_probe(struct platform_de
+@@ -730,78 +744,47 @@ static int caam_probe(struct platform_de
+
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era());
+- caam_get_era());
- dev_info(dev, "job rings = %d, qi = %d\n",
- ctrlpriv->total_jobrs, ctrlpriv->qi_present);
++ ctrlpriv->era);
+ dev_info(dev, "job rings = %d, qi = %d, dpaa2 = %s\n",
+ ctrlpriv->total_jobrs, ctrlpriv->qi_present,
+ caam_dpaa2 ? "yes" : "no");
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
S_IRUSR |
-@@ -809,7 +790,7 @@ static int caam_probe(struct platform_de
+@@ -809,7 +792,7 @@ static int caam_probe(struct platform_de
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
S_IRUSR |
-@@ -817,7 +798,7 @@ static int caam_probe(struct platform_de
+@@ -817,7 +800,7 @@ static int caam_probe(struct platform_de
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
S_IRUSR |
-@@ -828,13 +809,17 @@ static int caam_probe(struct platform_de
+@@ -828,13 +811,17 @@ static int caam_probe(struct platform_de
return 0;
caam_remove:
disable_caam_aclk:
clk_disable_unprepare(ctrlpriv->caam_aclk);
disable_caam_mem:
-@@ -844,17 +829,6 @@ disable_caam_ipg:
+@@ -844,17 +831,6 @@ disable_caam_ipg:
return ret;
}
#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
-@@ -1107,8 +1104,8 @@ struct sec4_sg_entry {
+@@ -449,6 +446,18 @@ struct sec4_sg_entry {
+ #define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+ #define OP_PCLID_RSAENC_PUBKEY (0x18 << OP_PCLID_SHIFT)
+ #define OP_PCLID_RSADEC_PRVKEY (0x19 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_MD5 (0x20 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA1 (0x21 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA224 (0x22 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA256 (0x23 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA384 (0x24 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_SHA512 (0x25 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_MD5 (0x60 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA1 (0x61 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA224 (0x62 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA256 (0x63 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA384 (0x64 << OP_PCLID_SHIFT)
++#define OP_PCLID_DKP_RIF_SHA512 (0x65 << OP_PCLID_SHIFT)
+
+ /* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
+ #define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+@@ -1098,6 +1107,22 @@ struct sec4_sg_entry {
+ /* MacSec protinfos */
+ #define OP_PCL_MACSEC 0x0001
+
++/* Derived Key Protocol (DKP) Protinfo */
++#define OP_PCL_DKP_SRC_SHIFT 14
++#define OP_PCL_DKP_SRC_MASK (3 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_IMM (0 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_SEQ (1 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_PTR (2 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_SRC_SGF (3 << OP_PCL_DKP_SRC_SHIFT)
++#define OP_PCL_DKP_DST_SHIFT 12
++#define OP_PCL_DKP_DST_MASK (3 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_IMM (0 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_SEQ (1 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_PTR (2 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_DST_SGF (3 << OP_PCL_DKP_DST_SHIFT)
++#define OP_PCL_DKP_KEY_SHIFT 0
++#define OP_PCL_DKP_KEY_MASK (0xfff << OP_PCL_DKP_KEY_SHIFT)
++
+ /* PKI unidirectional protocol protinfo bits */
+ #define OP_PCL_PKPROT_TEST 0x0008
+ #define OP_PCL_PKPROT_DECRYPT 0x0004
+@@ -1107,8 +1132,8 @@ struct sec4_sg_entry {
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
#define OP_ALG_TYPE_MASK (0x7 << OP_ALG_TYPE_SHIFT)
#define OP_ALG_ALGSEL_SHIFT 16
#define OP_ALG_ALGSEL_MASK (0xff << OP_ALG_ALGSEL_SHIFT)
-@@ -1249,7 +1246,7 @@ struct sec4_sg_entry {
+@@ -1249,7 +1274,7 @@ struct sec4_sg_entry {
#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
#define OP_ALG_PKMODE_DST_REG_SHIFT 10
#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
-@@ -1445,10 +1442,11 @@ struct sec4_sg_entry {
+@@ -1445,10 +1470,11 @@ struct sec4_sg_entry {
#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
/* Destination selectors */
#define MATH_DEST_SHIFT 8
-@@ -1629,4 +1627,31 @@ struct sec4_sg_entry {
+@@ -1457,6 +1483,7 @@ struct sec4_sg_entry {
+ #define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+ #define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+ #define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
++#define MATH_DEST_DPOVRD (0x07 << MATH_DEST_SHIFT)
+ #define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+ #define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+ #define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+@@ -1629,4 +1656,31 @@ struct sec4_sg_entry {
/* Frame Descriptor Command for Replacement Job Descriptor */
#define FD_CMD_REPLACE_JOB_DESC 0x20000000
}
-static inline void append_data(u32 *desc, void *data, int len)
-+static inline void append_data(u32 * const desc, void *data, int len)
++static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
}
-static inline void append_cmd_data(u32 *desc, void *data, int len,
-+static inline void append_cmd_data(u32 * const desc, void *data, int len,
++static inline void append_cmd_data(u32 * const desc, const void *data, int len,
u32 command)
{
append_cmd(desc, command | IMMEDIATE | len);
#define APPEND_CMD_PTR_TO_IMM(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
-+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
++static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int len, u32 options) \
{ \
PRINT_POS; \
*/
#define APPEND_CMD_PTR_TO_IMM2(cmd, op) \
-static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
-+static inline void append_##cmd##_as_imm(u32 * const desc, void *data, \
++static inline void append_##cmd##_as_imm(u32 * const desc, const void *data, \
unsigned int data_len, \
unsigned int len, u32 options) \
{ \
u32 options) \
{ \
PRINT_POS; \
-@@ -426,3 +434,66 @@ do { \
+@@ -426,3 +434,107 @@ do { \
APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
#define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
+ unsigned int keylen_pad;
+ union {
+ dma_addr_t key_dma;
-+ void *key_virt;
++ const void *key_virt;
+ };
+ bool key_inline;
+};
+ return (rem_bytes >= 0) ? 0 : -1;
+}
+
++/**
++ * append_proto_dkp - Derived Key Protocol (DKP): key -> split key
++ * @desc: pointer to buffer used for descriptor construction
++ * @adata: pointer to authentication transform definitions.
++ * keylen should be the length of initial key, while keylen_pad
++ * the length of the derived (split) key.
++ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
++ * SHA256, SHA384, SHA512}.
++ */
++static inline void append_proto_dkp(u32 * const desc, struct alginfo *adata)
++{
++ u32 protid;
++
++ /*
++ * Quick & dirty translation from OP_ALG_ALGSEL_{MD5, SHA*}
++ * to OP_PCLID_DKP_{MD5, SHA*}
++ */
++ protid = (adata->algtype & OP_ALG_ALGSEL_SUBMASK) |
++ (0x20 << OP_ALG_ALGSEL_SHIFT);
++
++ if (adata->key_inline) {
++ int words;
++
++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
++ OP_PCL_DKP_SRC_IMM | OP_PCL_DKP_DST_IMM |
++ adata->keylen);
++ append_data(desc, adata->key_virt, adata->keylen);
++
++ /* Reserve space in descriptor buffer for the derived key */
++ words = (ALIGN(adata->keylen_pad, CAAM_CMD_SZ) -
++ ALIGN(adata->keylen, CAAM_CMD_SZ)) / CAAM_CMD_SZ;
++ if (words)
++ (*desc) = cpu_to_caam32(caam32_to_cpu(*desc) + words);
++ } else {
++ append_operation(desc, OP_TYPE_UNI_PROTOCOL | protid |
++ OP_PCL_DKP_SRC_PTR | OP_PCL_DKP_DST_PTR |
++ adata->keylen);
++ append_ptr(desc, adata->key_dma);
++ }
++}
++
+#endif /* DESC_CONSTR_H */
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
/* Physical-presence section */
struct caam_ctrl __iomem *ctrl; /* controller region */
-@@ -103,11 +102,6 @@ struct caam_drv_private {
+@@ -84,6 +83,7 @@ struct caam_drv_private {
+ u8 qi_present; /* Nonzero if QI present in device */
+ int secvio_irq; /* Security violation interrupt number */
+ int virt_en; /* Virtualization enabled in CAAM */
++ int era; /* CAAM Era (internal HW revision) */
+
+ #define RNG4_MAX_HANDLES 2
+ /* RNG4 block */
+@@ -103,11 +103,6 @@ struct caam_drv_private {
#ifdef CONFIG_DEBUG_FS
struct dentry *dfs_root;
struct dentry *ctl; /* controller dir */
struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
struct dentry *ctl_kek, *ctl_tkek, *ctl_tdsk;
#endif
-@@ -115,4 +109,22 @@ struct caam_drv_private {
+@@ -115,4 +110,22 @@ struct caam_drv_private {
void caam_jr_algapi_init(struct device *dev);
void caam_jr_algapi_remove(struct device *dev);
+
+ fd.cmd = 0;
+ fd.format = qm_fd_compound;
-+ fd.cong_weight = req->fd_sgt[1].length;
++ fd.cong_weight = caam32_to_cpu(req->fd_sgt[1].length);
+ fd.addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(qidev, fd.addr)) {