/* do we need to generate IV? */
if (areq_ctx->backup_giv != NULL) {
-
/* set the DMA mapped IV address*/
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
ssi_req.ivgen_dma_addr[0] = areq_ctx->gen_ctx.iv_dma_addr + CTR_RFC3686_NONCE_SIZE;
req->iv = areq_ctx->backup_iv;
return rc;
-
}
#if SSI_CC_HAS_AES_CCM
rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
if (unlikely(rc != 0))
goto ablkcipher_exit;
-
}
SSI_LOG_DEBUG("areq_ctx->dma_buf_type = %s\n",
if (areq_ctx->ccm_hdr_size != ccm_header_size_null) {
if (unlikely((mapped_nents + 1) >
LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES)) {
-
SSI_LOG_ERR("CCM case.Too many fragments. "
"Current %d max %d\n",
(areq_ctx->assoc.nents + 1),
if (unlikely((do_chain) ||
(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI))) {
-
SSI_LOG_DEBUG("Chain assoc: buff_type=%s nents=%u\n",
GET_DMA_BUFFER_TYPE(areq_ctx->assoc_buff_type),
areq_ctx->assoc.nents);
//check where the data starts
while (sg_index <= size_to_skip) {
-
offset -= areq_ctx->dstSgl->length;
areq_ctx->dstSgl = sg_next(areq_ctx->dstSgl);
//if have reached the end of the sgl, then this is unexpected
if (unlikely(
(areq_ctx->assoc_buff_type == SSI_DMA_BUF_MLLI) ||
(areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
-
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
if (unlikely(rc != 0)) {
} else {
areq_ctx->data_dma_buf_type = SSI_DMA_BUF_MLLI;
}
-
}
/*build mlli */
mlli_params) != 0)) {
goto fail_unmap_din;
}
-
}
areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index);
dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool);
kfree(drvdata->buff_mgr_handle);
drvdata->buff_mgr_handle = NULL;
-
}
return 0;
}
#endif
default:
break;
-
}
return -EINVAL;
}
#endif /*SSI_CC_HAS_MULTI2*/
default:
break;
-
}
return -EINVAL;
}
set_cipher_mode(&desc[*seq_size], cipher_mode);
set_cipher_config0(&desc[*seq_size], direction);
if (flow_mode == S_DIN_to_AES) {
-
if (ssi_is_hw_key(tfm)) {
set_hw_crypto_key(&desc[*seq_size],
ctx_p->hw.key1_slot);
set_cipher_mode(&desc[*seq_size], ctx_p->cipher_mode);
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1);
(*seq_size)++;
-
}
#endif /*SSI_CC_HAS_MULTI2*/
}
/*For CTS in case of data size aligned to 16 use CBC mode*/
if (((nbytes % AES_BLOCK_SIZE) == 0) && (ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS)) {
-
ctx_p->cipher_mode = DRV_CIPHER_CBC;
cts_restore_flag = 1;
}
/* Mask all interrupts */
WRITE_REGISTER(drvdata->cc_base +
CC_REG_OFFSET(HOST_RGF, HOST_IMR), 0xFFFFFFFF);
-
}
static void cleanup_cc_resources(struct platform_device *plat_dev)
}
ctx->key_params.keylen = 0;
-
}
struct ssi_hash_handle *hash_handle = drvdata->hash_handle;
if (hash_handle != NULL) {
-
list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) {
crypto_unregister_ahash(&t_hash_alg->ahash_alg);
list_del(&t_hash_alg->entry);
BUG();
}
return rc;
-
}
#endif
request_mgr_handle->axi_completed +=
cc_axi_comp_count(cc_base);
}
-
}
/* after verifing that there is nothing to do, Unmask AXI completion interrupt */
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_IMR),