1 From 47824cc9417946b49b80e88c74ab5ee69eacc2a7 Mon Sep 17 00:00:00 2001
2 From: Radu Alexe <radu.alexe@nxp.com>
3 Date: Thu, 25 May 2017 15:51:50 +0300
4 Subject: [PATCH] crypto: caam/qi - add support for TLS 1.0 record
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
9 TLS 1.0 descriptors run on SEC 4.x or higher.
10 For now, only tls10(hmac(sha1),cbc(aes)) algorithm
11 is registered by the driver.
14 - when src == dst - there should be no element in the src scatterlist array
15 that contains both associated data and message data.
16 - when src != dst - associated data is not copied from source into
18 - for decryption when src != dst the size of the destination should be
19 large enough so that the buffer may contain the decrypted authenc and
22 Signed-off-by: Tudor Ambarus <tudor-dan.ambarus@nxp.com>
23 Signed-off-by: Cristian Stoica <cristian.stoica@nxp.com>
24 Signed-off-by: Alex Porosanu <alexandru.porosanu@nxp.com>
25 Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
26 Signed-off-by: Radu Alexe <radu.alexe@nxp.com>
28 drivers/crypto/caam/caamalg_desc.c | 414 ++++++++++++++++++++++++++++++++
29 drivers/crypto/caam/caamalg_desc.h | 13 +
30 drivers/crypto/caam/caamalg_qi.c | 478 +++++++++++++++++++++++++++++++++++++
31 drivers/crypto/caam/desc.h | 27 +++
32 4 files changed, 932 insertions(+)
34 --- a/drivers/crypto/caam/caamalg_desc.c
35 +++ b/drivers/crypto/caam/caamalg_desc.c
36 @@ -622,6 +622,420 @@ copy_iv:
37 EXPORT_SYMBOL(cnstr_shdsc_aead_givencap);
40 + * cnstr_shdsc_tls_encap - tls encapsulation shared descriptor
41 + * @desc: pointer to buffer used for descriptor construction
42 + * @cdata: pointer to block cipher transform definitions
43 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
44 + * with OP_ALG_AAI_CBC
45 + * @adata: pointer to authentication transform definitions.
46 + * A split key is required for SEC Era < 6; the size of the split key
47 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
48 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
49 + * @assoclen: associated data length
50 + * @ivsize: initialization vector size
51 + * @authsize: authentication data size
52 + * @blocksize: block cipher size
55 +void cnstr_shdsc_tls_encap(u32 * const desc, struct alginfo *cdata,
56 + struct alginfo *adata, unsigned int assoclen,
57 + unsigned int ivsize, unsigned int authsize,
58 + unsigned int blocksize, int era)
60 + u32 *key_jump_cmd, *zero_payload_jump_cmd;
61 + u32 genpad, idx_ld_datasz, idx_ld_pad, stidx;
64 + * Compute the index (in bytes) for the LOAD with destination of
65 + * Class 1 Data Size Register and for the LOAD that generates padding
67 + if (adata->key_inline) {
68 + idx_ld_datasz = DESC_TLS10_ENC_LEN + adata->keylen_pad +
69 + cdata->keylen - 4 * CAAM_CMD_SZ;
70 + idx_ld_pad = DESC_TLS10_ENC_LEN + adata->keylen_pad +
71 + cdata->keylen - 2 * CAAM_CMD_SZ;
73 + idx_ld_datasz = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
75 + idx_ld_pad = DESC_TLS10_ENC_LEN + 2 * CAAM_PTR_SZ -
79 + stidx = 1 << HDR_START_IDX_SHIFT;
80 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
82 + /* skip key loading if they are loaded due to sharing */
83 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
87 + if (adata->key_inline)
88 + append_key_as_imm(desc, adata->key_virt,
89 + adata->keylen_pad, adata->keylen,
90 + CLASS_2 | KEY_DEST_MDHA_SPLIT |
93 + append_key(desc, adata->key_dma, adata->keylen,
94 + CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
96 + append_proto_dkp(desc, adata);
99 + if (cdata->key_inline)
100 + append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
101 + cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
103 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
104 + KEY_DEST_CLASS_REG);
106 + set_jump_tgt_here(desc, key_jump_cmd);
108 + /* class 2 operation */
109 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
111 + /* class 1 operation */
112 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
115 + /* payloadlen = input data length - (assoclen + ivlen) */
116 + append_math_sub_imm_u32(desc, REG0, SEQINLEN, IMM, assoclen + ivsize);
118 + /* math1 = payloadlen + icvlen */
119 + append_math_add_imm_u32(desc, REG1, REG0, IMM, authsize);
121 + /* padlen = block_size - math1 % block_size */
122 + append_math_and_imm_u32(desc, REG3, REG1, IMM, blocksize - 1);
123 + append_math_sub_imm_u32(desc, REG2, IMM, REG3, blocksize);
125 + /* cryptlen = payloadlen + icvlen + padlen */
126 + append_math_add(desc, VARSEQOUTLEN, REG1, REG2, 4);
129 + * update immediate data with the padding length value
130 + * for the LOAD in the class 1 data size register.
132 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
133 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 7);
134 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
135 + (idx_ld_datasz << MOVE_OFFSET_SHIFT) | 8);
137 + /* overwrite PL field for the padding iNFO FIFO entry */
138 + append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 |
139 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 7);
140 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF |
141 + (idx_ld_pad << MOVE_OFFSET_SHIFT) | 8);
143 + /* store encrypted payload, icv and padding */
144 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
146 + /* if payload length is zero, jump to zero-payload commands */
147 + append_math_add(desc, VARSEQINLEN, ZERO, REG0, 4);
148 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
151 + /* load iv in context1 */
152 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
153 + LDST_CLASS_1_CCB | ivsize);
155 + /* read assoc for authentication */
156 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
158 + /* insnoop payload */
159 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLD_TYPE_MSG |
160 + FIFOLD_TYPE_LAST2 | FIFOLDST_VLF);
162 + /* jump the zero-payload commands */
163 + append_jump(desc, JUMP_TEST_ALL | 3);
165 + /* zero-payload commands */
166 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
168 + /* load iv in context1 */
169 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
170 + LDST_CLASS_1_CCB | ivsize);
172 + /* assoc data is the only data for authentication */
173 + append_seq_fifo_load(desc, assoclen, FIFOLD_CLASS_CLASS2 |
174 + FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2);
176 + /* send icv to encryption */
177 + append_move(desc, MOVE_SRC_CLASS2CTX | MOVE_DEST_CLASS1INFIFO |
180 + /* update class 1 data size register with padding length */
181 + append_load_imm_u32(desc, 0, LDST_CLASS_1_CCB |
182 + LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
184 + /* generate padding and send it to encryption */
185 + genpad = NFIFOENTRY_DEST_CLASS1 | NFIFOENTRY_LC1 | NFIFOENTRY_FC1 |
186 + NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_PTYPE_N;
187 + append_load_imm_u32(desc, genpad, LDST_CLASS_IND_CCB |
188 + LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
191 + print_hex_dump(KERN_ERR, "tls enc shdesc@" __stringify(__LINE__) ": ",
192 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
193 + desc_bytes(desc), 1);
196 +EXPORT_SYMBOL(cnstr_shdsc_tls_encap);
199 + * cnstr_shdsc_tls_decap - tls decapsulation shared descriptor
200 + * @desc: pointer to buffer used for descriptor construction
201 + * @cdata: pointer to block cipher transform definitions
202 + * Valid algorithm values - one of OP_ALG_ALGSEL_AES ANDed
203 + * with OP_ALG_AAI_CBC
204 + * @adata: pointer to authentication transform definitions.
205 + * A split key is required for SEC Era < 6; the size of the split key
206 + * is specified in this case. Valid algorithm values OP_ALG_ALGSEL_SHA1
207 + * ANDed with OP_ALG_AAI_HMAC_PRECOMP.
208 + * @assoclen: associated data length
209 + * @ivsize: initialization vector size
210 + * @authsize: authentication data size
211 + * @blocksize: block cipher size
214 +void cnstr_shdsc_tls_decap(u32 * const desc, struct alginfo *cdata,
215 + struct alginfo *adata, unsigned int assoclen,
216 + unsigned int ivsize, unsigned int authsize,
217 + unsigned int blocksize, int era)
219 + u32 stidx, jumpback;
220 + u32 *key_jump_cmd, *zero_payload_jump_cmd, *skip_zero_jump_cmd;
222 + * Pointer Size bool determines the size of address pointers.
223 + * false - Pointers fit in one 32-bit word.
224 + * true - Pointers fit in two 32-bit words.
226 + bool ps = (CAAM_PTR_SZ != CAAM_CMD_SZ);
228 + stidx = 1 << HDR_START_IDX_SHIFT;
229 + init_sh_desc(desc, HDR_SHARE_SERIAL | stidx);
231 + /* skip key loading if they are loaded due to sharing */
232 + key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
236 + append_key(desc, adata->key_dma, adata->keylen, CLASS_2 |
237 + KEY_DEST_MDHA_SPLIT | KEY_ENC);
239 + append_proto_dkp(desc, adata);
241 + append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 |
242 + KEY_DEST_CLASS_REG);
244 + set_jump_tgt_here(desc, key_jump_cmd);
246 + /* class 2 operation */
247 + append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL |
248 + OP_ALG_DECRYPT | OP_ALG_ICV_ON);
249 + /* class 1 operation */
250 + append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
253 + /* VSIL = input data length - 2 * block_size */
254 + append_math_sub_imm_u32(desc, VARSEQINLEN, SEQINLEN, IMM, 2 *
258 + * payloadlen + icvlen + padlen = input data length - (assoclen +
261 + append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, assoclen + ivsize);
263 + /* skip data to the last but one cipher block */
264 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | LDST_VLF);
266 + /* load iv for the last cipher block */
267 + append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_CLASS_CTX |
268 + LDST_CLASS_1_CCB | ivsize);
270 + /* read last cipher block */
271 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
272 + FIFOLD_TYPE_LAST1 | blocksize);
274 + /* move decrypted block into math0 and math1 */
275 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_MATH0 |
278 + /* reset AES CHA */
279 + append_load_imm_u32(desc, CCTRL_RESET_CHA_AESA, LDST_CLASS_IND_CCB |
280 + LDST_SRCDST_WORD_CHACTRL | LDST_IMM);
282 + /* rewind input sequence */
283 + append_seq_in_ptr_intlen(desc, 0, 65535, SQIN_RTO);
285 + /* key1 is in decryption form */
286 + append_operation(desc, cdata->algtype | OP_ALG_AAI_DK |
287 + OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
289 + /* load iv in context1 */
290 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_1_CCB |
291 + LDST_SRCDST_WORD_CLASS_CTX | ivsize);
293 + /* read sequence number */
294 + append_seq_fifo_load(desc, 8, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG);
295 + /* load Type, Version and Len fields in math0 */
296 + append_cmd(desc, CMD_SEQ_LOAD | LDST_CLASS_DECO |
297 + LDST_SRCDST_WORD_DECO_MATH0 | (3 << LDST_OFFSET_SHIFT) | 5);
299 + /* compute (padlen - 1) */
300 + append_math_and_imm_u64(desc, REG1, REG1, IMM, 255);
302 + /* math2 = icvlen + (padlen - 1) + 1 */
303 + append_math_add_imm_u32(desc, REG2, REG1, IMM, authsize + 1);
305 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
307 + /* VSOL = payloadlen + icvlen + padlen */
308 + append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, 4);
310 + if (caam_little_end)
311 + append_moveb(desc, MOVE_WAITCOMP |
312 + MOVE_SRC_MATH0 | MOVE_DEST_MATH0 | 8);
314 + /* update Len field */
315 + append_math_sub(desc, REG0, REG0, REG2, 8);
317 + /* store decrypted payload, icv and padding */
318 + append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF);
320 + /* VSIL = (payloadlen + icvlen + padlen) - (icvlen + padlen)*/
321 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
323 + zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
326 + /* send Type, Version and Len(pre ICV) fields to authentication */
327 + append_move(desc, MOVE_WAITCOMP |
328 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
329 + (3 << MOVE_OFFSET_SHIFT) | 5);
331 + /* outsnooping payload */
332 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
333 + FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LAST2 |
335 + skip_zero_jump_cmd = append_jump(desc, JUMP_TEST_ALL | 2);
337 + set_jump_tgt_here(desc, zero_payload_jump_cmd);
338 + /* send Type, Version and Len(pre ICV) fields to authentication */
339 + append_move(desc, MOVE_WAITCOMP | MOVE_AUX_LS |
340 + MOVE_SRC_MATH0 | MOVE_DEST_CLASS2INFIFO |
341 + (3 << MOVE_OFFSET_SHIFT) | 5);
343 + set_jump_tgt_here(desc, skip_zero_jump_cmd);
344 + append_math_add(desc, VARSEQINLEN, ZERO, REG2, 4);
346 + /* load icvlen and padlen */
347 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG |
348 + FIFOLD_TYPE_LAST1 | FIFOLDST_VLF);
350 + /* VSIL = (payloadlen + icvlen + padlen) - icvlen + padlen */
351 + append_math_sub(desc, VARSEQINLEN, REG3, REG2, 4);
354 + * Start a new input sequence using the SEQ OUT PTR command options,
355 + * pointer and length used when the current output sequence was defined.
359 + * Move the lower 32 bits of Shared Descriptor address, the
360 + * SEQ OUT PTR command, Output Pointer (2 words) and
361 + * Output Length into math registers.
363 + if (caam_little_end)
364 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
366 + (55 * 4 << MOVE_OFFSET_SHIFT) | 20);
368 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
370 + (54 * 4 << MOVE_OFFSET_SHIFT) | 20);
372 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
373 + append_math_and_imm_u32(desc, REG0, REG0, IMM,
374 + ~(CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR));
375 + /* Append a JUMP command after the copied fields */
376 + jumpback = CMD_JUMP | (char)-9;
377 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
378 + LDST_SRCDST_WORD_DECO_MATH2 |
379 + (4 << LDST_OFFSET_SHIFT));
380 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
381 + /* Move the updated fields back to the Job Descriptor */
382 + if (caam_little_end)
383 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
384 + MOVE_DEST_DESCBUF |
385 + (55 * 4 << MOVE_OFFSET_SHIFT) | 24);
387 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
388 + MOVE_DEST_DESCBUF |
389 + (54 * 4 << MOVE_OFFSET_SHIFT) | 24);
392 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
393 + * and then jump back to the next command from the
394 + * Shared Descriptor.
396 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 6);
399 + * Move the SEQ OUT PTR command, Output Pointer (1 word) and
400 + * Output Length into math registers.
402 + if (caam_little_end)
403 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
405 + (54 * 4 << MOVE_OFFSET_SHIFT) | 12);
407 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_DESCBUF |
409 + (53 * 4 << MOVE_OFFSET_SHIFT) | 12);
411 + /* Transform SEQ OUT PTR command in SEQ IN PTR command */
412 + append_math_and_imm_u64(desc, REG0, REG0, IMM,
413 + ~(((u64)(CMD_SEQ_IN_PTR ^
414 + CMD_SEQ_OUT_PTR)) << 32));
415 + /* Append a JUMP command after the copied fields */
416 + jumpback = CMD_JUMP | (char)-7;
417 + append_load_imm_u32(desc, jumpback, LDST_CLASS_DECO | LDST_IMM |
418 + LDST_SRCDST_WORD_DECO_MATH1 |
419 + (4 << LDST_OFFSET_SHIFT));
420 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 1);
421 + /* Move the updated fields back to the Job Descriptor */
422 + if (caam_little_end)
423 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
424 + MOVE_DEST_DESCBUF |
425 + (54 * 4 << MOVE_OFFSET_SHIFT) | 16);
427 + append_move(desc, MOVE_WAITCOMP | MOVE_SRC_MATH0 |
428 + MOVE_DEST_DESCBUF |
429 + (53 * 4 << MOVE_OFFSET_SHIFT) | 16);
432 + * Read the new SEQ IN PTR command, Input Pointer, Input Length
433 + * and then jump back to the next command from the
434 + * Shared Descriptor.
436 + append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | 5);
440 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_SKIP | FIFOLDST_VLF);
442 + append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_ICV |
443 + FIFOLD_TYPE_LAST2 | authsize);
446 + print_hex_dump(KERN_ERR, "tls dec shdesc@" __stringify(__LINE__) ": ",
447 + DUMP_PREFIX_ADDRESS, 16, 4, desc,
448 + desc_bytes(desc), 1);
451 +EXPORT_SYMBOL(cnstr_shdsc_tls_decap);
454 * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor
455 * @desc: pointer to buffer used for descriptor construction
456 * @cdata: pointer to block cipher transform definitions
457 --- a/drivers/crypto/caam/caamalg_desc.h
458 +++ b/drivers/crypto/caam/caamalg_desc.h
460 #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
461 #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
463 +#define DESC_TLS_BASE (4 * CAAM_CMD_SZ)
464 +#define DESC_TLS10_ENC_LEN (DESC_TLS_BASE + 29 * CAAM_CMD_SZ)
466 /* Note: Nonce is counted in cdata.keylen */
467 #define DESC_AEAD_CTR_RFC3686_LEN (4 * CAAM_CMD_SZ)
469 @@ -72,6 +75,16 @@ void cnstr_shdsc_aead_givencap(u32 * con
470 u32 *nonce, const u32 ctx1_iv_off,
471 const bool is_qi, int era);
473 +void cnstr_shdsc_tls_encap(u32 *const desc, struct alginfo *cdata,
474 + struct alginfo *adata, unsigned int assoclen,
475 + unsigned int ivsize, unsigned int authsize,
476 + unsigned int blocksize, int era);
478 +void cnstr_shdsc_tls_decap(u32 *const desc, struct alginfo *cdata,
479 + struct alginfo *adata, unsigned int assoclen,
480 + unsigned int ivsize, unsigned int authsize,
481 + unsigned int blocksize, int era);
483 void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata,
484 unsigned int ivsize, unsigned int icvsize,
486 --- a/drivers/crypto/caam/caamalg_qi.c
487 +++ b/drivers/crypto/caam/caamalg_qi.c
488 @@ -290,6 +290,167 @@ static int des3_aead_setkey(struct crypt
492 +static int tls_set_sh_desc(struct crypto_aead *tls)
494 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
495 + unsigned int ivsize = crypto_aead_ivsize(tls);
496 + unsigned int blocksize = crypto_aead_blocksize(tls);
497 + unsigned int assoclen = 13; /* always 13 bytes for TLS */
498 + unsigned int data_len[2];
500 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
502 + if (!ctx->cdata.keylen || !ctx->authsize)
506 + * TLS 1.0 encrypt shared descriptor
507 + * Job Descriptor and Shared Descriptor
508 + * must fit into the 64-word Descriptor h/w Buffer
510 + data_len[0] = ctx->adata.keylen_pad;
511 + data_len[1] = ctx->cdata.keylen;
513 + if (desc_inline_query(DESC_TLS10_ENC_LEN, DESC_JOB_IO_LEN, data_len,
514 + &inl_mask, ARRAY_SIZE(data_len)) < 0)
518 + ctx->adata.key_virt = ctx->key;
520 + ctx->adata.key_dma = ctx->key_dma;
523 + ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
525 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
527 + ctx->adata.key_inline = !!(inl_mask & 1);
528 + ctx->cdata.key_inline = !!(inl_mask & 2);
530 + cnstr_shdsc_tls_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
531 + assoclen, ivsize, ctx->authsize, blocksize,
535 + * TLS 1.0 decrypt shared descriptor
536 + * Keys do not fit inline, regardless of algorithms used
538 + ctx->adata.key_inline = false;
539 + ctx->adata.key_dma = ctx->key_dma;
540 + ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
542 + cnstr_shdsc_tls_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
543 + assoclen, ivsize, ctx->authsize, blocksize,
549 +static int tls_setauthsize(struct crypto_aead *tls, unsigned int authsize)
551 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
553 + ctx->authsize = authsize;
554 + tls_set_sh_desc(tls);
559 +static int tls_setkey(struct crypto_aead *tls, const u8 *key,
560 + unsigned int keylen)
562 + struct caam_ctx *ctx = crypto_aead_ctx(tls);
563 + struct device *jrdev = ctx->jrdev;
564 + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
565 + struct crypto_authenc_keys keys;
568 + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
572 + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
573 + keys.authkeylen + keys.enckeylen, keys.enckeylen,
575 + print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ",
576 + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
580 + * If DKP is supported, use it in the shared descriptor to generate
583 + if (ctrlpriv->era >= 6) {
584 + ctx->adata.keylen = keys.authkeylen;
585 + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
586 + OP_ALG_ALGSEL_MASK);
588 + if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
591 + memcpy(ctx->key, keys.authkey, keys.authkeylen);
592 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
594 + dma_sync_single_for_device(jrdev, ctx->key_dma,
595 + ctx->adata.keylen_pad +
596 + keys.enckeylen, ctx->dir);
597 + goto skip_split_key;
600 + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
601 + keys.authkeylen, CAAM_MAX_KEY_SIZE -
606 + /* postpend encryption key to auth split key */
607 + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
608 + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad +
609 + keys.enckeylen, ctx->dir);
612 + dev_err(jrdev, "split keylen %d split keylen padded %d\n",
613 + ctx->adata.keylen, ctx->adata.keylen_pad);
614 + print_hex_dump(KERN_ERR, "ctx.key@" __stringify(__LINE__)": ",
615 + DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
616 + ctx->adata.keylen_pad + keys.enckeylen, 1);
620 + ctx->cdata.keylen = keys.enckeylen;
622 + ret = tls_set_sh_desc(tls);
626 + /* Now update the driver contexts with the new shared descriptor */
627 + if (ctx->drv_ctx[ENCRYPT]) {
628 + ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
631 + dev_err(jrdev, "driver enc context update failed\n");
636 + if (ctx->drv_ctx[DECRYPT]) {
637 + ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
640 + dev_err(jrdev, "driver dec context update failed\n");
645 + memzero_explicit(&keys, sizeof(keys));
648 + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN);
649 + memzero_explicit(&keys, sizeof(keys));
653 static int gcm_set_sh_desc(struct crypto_aead *aead)
655 struct caam_ctx *ctx = crypto_aead_ctx(aead);
656 @@ -809,6 +970,29 @@ struct aead_edesc {
660 + * tls_edesc - s/w-extended tls descriptor
661 + * @src_nents: number of segments in input scatterlist
662 + * @dst_nents: number of segments in output scatterlist
663 + * @iv_dma: dma address of iv for checking continuity and link table
664 + * @qm_sg_bytes: length of dma mapped h/w link table
665 + * @tmp: array of scatterlists used by 'scatterwalk_ffwd'
666 + * @qm_sg_dma: bus physical mapped address of h/w link table
667 + * @drv_req: driver-specific request structure
668 + * @sgt: the h/w link table, followed by IV
675 + dma_addr_t qm_sg_dma;
676 + struct scatterlist tmp[2];
677 + struct scatterlist *dst;
678 + struct caam_drv_req drv_req;
679 + struct qm_sg_entry sgt[0];
683 * skcipher_edesc - s/w-extended skcipher descriptor
684 * @src_nents: number of segments in input scatterlist
685 * @dst_nents: number of segments in output scatterlist
686 @@ -900,6 +1084,18 @@ static void aead_unmap(struct device *de
687 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
690 +static void tls_unmap(struct device *dev,
691 + struct tls_edesc *edesc,
692 + struct aead_request *req)
694 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
695 + int ivsize = crypto_aead_ivsize(aead);
697 + caam_unmap(dev, req->src, edesc->dst, edesc->src_nents,
698 + edesc->dst_nents, edesc->iv_dma, ivsize, DMA_TO_DEVICE,
699 + edesc->qm_sg_dma, edesc->qm_sg_bytes);
702 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
703 struct skcipher_request *req)
705 @@ -1192,6 +1388,243 @@ static int aead_decrypt(struct aead_requ
706 return aead_crypt(req, false);
709 +static void tls_done(struct caam_drv_req *drv_req, u32 status)
711 + struct device *qidev;
712 + struct tls_edesc *edesc;
713 + struct aead_request *aead_req = drv_req->app_ctx;
714 + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
715 + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead);
718 + qidev = caam_ctx->qidev;
720 + if (unlikely(status)) {
721 + caam_jr_strstatus(qidev, status);
725 + edesc = container_of(drv_req, typeof(*edesc), drv_req);
726 + tls_unmap(qidev, edesc, aead_req);
728 + aead_request_complete(aead_req, ecode);
729 + qi_cache_free(edesc);
733 + * allocate and map the tls extended descriptor
735 +static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt)
737 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
738 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
739 + unsigned int blocksize = crypto_aead_blocksize(aead);
740 + unsigned int padsize, authsize;
741 + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
742 + typeof(*alg), aead);
743 + struct device *qidev = ctx->qidev;
744 + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
745 + GFP_KERNEL : GFP_ATOMIC;
746 + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
747 + struct tls_edesc *edesc;
748 + dma_addr_t qm_sg_dma, iv_dma = 0;
751 + int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes;
752 + int in_len, out_len;
753 + struct qm_sg_entry *sg_table, *fd_sgt;
754 + struct caam_drv_ctx *drv_ctx;
755 + struct scatterlist *dst;
758 + padsize = blocksize - ((req->cryptlen + ctx->authsize) %
760 + authsize = ctx->authsize + padsize;
762 + authsize = ctx->authsize;
765 + drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
766 + if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
767 + return (struct tls_edesc *)drv_ctx;
769 + /* allocate space for base edesc, link tables and IV */
770 + edesc = qi_cache_alloc(GFP_DMA | flags);
771 + if (unlikely(!edesc)) {
772 + dev_err(qidev, "could not allocate extended descriptor\n");
773 + return ERR_PTR(-ENOMEM);
776 + if (likely(req->src == req->dst)) {
777 + src_nents = sg_nents_for_len(req->src, req->assoclen +
779 + (encrypt ? authsize : 0));
780 + if (unlikely(src_nents < 0)) {
781 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
782 + req->assoclen + req->cryptlen +
783 + (encrypt ? authsize : 0));
784 + qi_cache_free(edesc);
785 + return ERR_PTR(src_nents);
788 + mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
789 + DMA_BIDIRECTIONAL);
790 + if (unlikely(!mapped_src_nents)) {
791 + dev_err(qidev, "unable to map source\n");
792 + qi_cache_free(edesc);
793 + return ERR_PTR(-ENOMEM);
797 + src_nents = sg_nents_for_len(req->src, req->assoclen +
799 + if (unlikely(src_nents < 0)) {
800 + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
801 + req->assoclen + req->cryptlen);
802 + qi_cache_free(edesc);
803 + return ERR_PTR(src_nents);
806 + dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen);
807 + dst_nents = sg_nents_for_len(dst, req->cryptlen +
808 + (encrypt ? authsize : 0));
809 + if (unlikely(dst_nents < 0)) {
810 + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
812 + (encrypt ? authsize : 0));
813 + qi_cache_free(edesc);
814 + return ERR_PTR(dst_nents);
818 + mapped_src_nents = dma_map_sg(qidev, req->src,
819 + src_nents, DMA_TO_DEVICE);
820 + if (unlikely(!mapped_src_nents)) {
821 + dev_err(qidev, "unable to map source\n");
822 + qi_cache_free(edesc);
823 + return ERR_PTR(-ENOMEM);
826 + mapped_src_nents = 0;
829 + mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents,
831 + if (unlikely(!mapped_dst_nents)) {
832 + dev_err(qidev, "unable to map destination\n");
833 + dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
834 + qi_cache_free(edesc);
835 + return ERR_PTR(-ENOMEM);
840 + * Create S/G table: IV, src, dst.
841 + * Input is not contiguous.
843 + qm_sg_ents = 1 + mapped_src_nents +
844 + (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
845 + sg_table = &edesc->sgt[0];
846 + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
848 + ivsize = crypto_aead_ivsize(aead);
849 + iv = (u8 *)(sg_table + qm_sg_ents);
850 + /* Make sure IV is located in a DMAable area */
851 + memcpy(iv, req->iv, ivsize);
852 + iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
853 + if (dma_mapping_error(qidev, iv_dma)) {
854 + dev_err(qidev, "unable to map IV\n");
855 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0,
857 + qi_cache_free(edesc);
858 + return ERR_PTR(-ENOMEM);
861 + edesc->src_nents = src_nents;
862 + edesc->dst_nents = dst_nents;
864 + edesc->iv_dma = iv_dma;
865 + edesc->drv_req.app_ctx = req;
866 + edesc->drv_req.cbk = tls_done;
867 + edesc->drv_req.drv_ctx = drv_ctx;
869 + dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
872 + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
873 + qm_sg_index += mapped_src_nents;
875 + if (mapped_dst_nents > 1)
876 + sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table +
879 + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
880 + if (dma_mapping_error(qidev, qm_sg_dma)) {
881 + dev_err(qidev, "unable to map S/G table\n");
882 + caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma,
883 + ivsize, DMA_TO_DEVICE, 0, 0);
884 + qi_cache_free(edesc);
885 + return ERR_PTR(-ENOMEM);
888 + edesc->qm_sg_dma = qm_sg_dma;
889 + edesc->qm_sg_bytes = qm_sg_bytes;
891 + out_len = req->cryptlen + (encrypt ? authsize : 0);
892 + in_len = ivsize + req->assoclen + req->cryptlen;
894 + fd_sgt = &edesc->drv_req.fd_sgt[0];
896 + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
898 + if (req->dst == req->src)
899 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
900 + (sg_nents_for_len(req->src, req->assoclen) +
901 + 1) * sizeof(*sg_table), out_len, 0);
902 + else if (mapped_dst_nents == 1)
903 + dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0);
905 + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
906 + qm_sg_index, out_len, 0);
911 +static int tls_crypt(struct aead_request *req, bool encrypt)
913 + struct tls_edesc *edesc;
914 + struct crypto_aead *aead = crypto_aead_reqtfm(req);
915 + struct caam_ctx *ctx = crypto_aead_ctx(aead);
918 + if (unlikely(caam_congested))
921 + edesc = tls_edesc_alloc(req, encrypt);
922 + if (IS_ERR_OR_NULL(edesc))
923 + return PTR_ERR(edesc);
925 + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
927 + ret = -EINPROGRESS;
929 + tls_unmap(ctx->qidev, edesc, req);
930 + qi_cache_free(edesc);
936 +static int tls_encrypt(struct aead_request *req)
938 + return tls_crypt(req, true);
941 +static int tls_decrypt(struct aead_request *req)
943 + return tls_crypt(req, false);
946 static int ipsec_gcm_encrypt(struct aead_request *req)
948 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
949 @@ -2411,6 +2844,26 @@ static struct caam_aead_alg driver_aeads
956 + .cra_name = "tls10(hmac(sha1),cbc(aes))",
957 + .cra_driver_name = "tls10-hmac-sha1-cbc-aes-caam-qi",
958 + .cra_blocksize = AES_BLOCK_SIZE,
960 + .setkey = tls_setkey,
961 + .setauthsize = tls_setauthsize,
962 + .encrypt = tls_encrypt,
963 + .decrypt = tls_decrypt,
964 + .ivsize = AES_BLOCK_SIZE,
965 + .maxauthsize = SHA1_DIGEST_SIZE,
968 + .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
969 + .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
970 + OP_ALG_AAI_HMAC_PRECOMP,
975 static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
976 @@ -2418,6 +2871,16 @@ static int caam_init_common(struct caam_
978 struct caam_drv_private *priv;
980 + /* Digest sizes for MD5, SHA1, SHA-224, SHA-256, SHA-384, SHA-512 */
981 + static const u8 digest_size[] = {
984 + SHA224_DIGEST_SIZE,
985 + SHA256_DIGEST_SIZE,
986 + SHA384_DIGEST_SIZE,
992 * distribute tfms across job rings to ensure in-order
993 @@ -2449,6 +2912,21 @@ static int caam_init_common(struct caam_
994 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
997 + if (ctx->adata.algtype) {
998 + op_id = (ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK)
999 + >> OP_ALG_ALGSEL_SHIFT;
1000 + if (op_id < ARRAY_SIZE(digest_size)) {
1001 + ctx->authsize = digest_size[op_id];
1003 + dev_err(ctx->jrdev,
1004 + "incorrect op_id %d; must be less than %zu\n",
1005 + op_id, ARRAY_SIZE(digest_size));
1006 + caam_jr_free(ctx->jrdev);
1010 + ctx->authsize = 0;
1013 spin_lock_init(&ctx->lock);
1014 ctx->drv_ctx[ENCRYPT] = NULL;
1015 --- a/drivers/crypto/caam/desc.h
1016 +++ b/drivers/crypto/caam/desc.h
1017 @@ -1704,4 +1704,31 @@
1018 /* Frame Descriptor Command for Replacement Job Descriptor */
1019 #define FD_CMD_REPLACE_JOB_DESC 0x20000000
1021 +/* CHA Control Register bits */
1022 +#define CCTRL_RESET_CHA_ALL 0x1
1023 +#define CCTRL_RESET_CHA_AESA 0x2
1024 +#define CCTRL_RESET_CHA_DESA 0x4
1025 +#define CCTRL_RESET_CHA_AFHA 0x8
1026 +#define CCTRL_RESET_CHA_KFHA 0x10
1027 +#define CCTRL_RESET_CHA_SF8A 0x20
1028 +#define CCTRL_RESET_CHA_PKHA 0x40
1029 +#define CCTRL_RESET_CHA_MDHA 0x80
1030 +#define CCTRL_RESET_CHA_CRCA 0x100
1031 +#define CCTRL_RESET_CHA_RNG 0x200
1032 +#define CCTRL_RESET_CHA_SF9A 0x400
1033 +#define CCTRL_RESET_CHA_ZUCE 0x800
1034 +#define CCTRL_RESET_CHA_ZUCA 0x1000
1035 +#define CCTRL_UNLOAD_PK_A0 0x10000
1036 +#define CCTRL_UNLOAD_PK_A1 0x20000
1037 +#define CCTRL_UNLOAD_PK_A2 0x40000
1038 +#define CCTRL_UNLOAD_PK_A3 0x80000
1039 +#define CCTRL_UNLOAD_PK_B0 0x100000
1040 +#define CCTRL_UNLOAD_PK_B1 0x200000
1041 +#define CCTRL_UNLOAD_PK_B2 0x400000
1042 +#define CCTRL_UNLOAD_PK_B3 0x800000
1043 +#define CCTRL_UNLOAD_PK_N 0x1000000
1044 +#define CCTRL_UNLOAD_PK_A 0x4000000
1045 +#define CCTRL_UNLOAD_PK_B 0x8000000
1046 +#define CCTRL_UNLOAD_SBOX 0x10000000