Fix several cases of needless braces around single statement blocks.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
/* If an IV was generated, copy it back to the user provided buffer. */
if (areq_ctx->backup_giv != NULL) {
- if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ if (ctx->cipher_mode == DRV_CIPHER_CTR)
memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_IV_SIZE);
- } else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ else if (ctx->cipher_mode == DRV_CIPHER_CCM)
memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
- }
}
}
if (unlikely(rc != 0))
SSI_LOG_ERR("send_request() failed (rc=%d)\n", rc);
- if (likely(key_dma_addr != 0)) {
+ if (likely(key_dma_addr != 0))
dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
- }
return rc;
}
set_flow_mode(&desc[idx], ctx->flow_mode);
set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
hw_iv_size, NS_BIT);
- if (ctx->cipher_mode == DRV_CIPHER_CTR) {
+ if (ctx->cipher_mode == DRV_CIPHER_CTR)
set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
- } else {
+ else
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
- }
set_cipher_mode(&desc[idx], ctx->cipher_mode);
idx++;
static unsigned int format_ccm_a0(u8 *pA0Buff, u32 headerSize)
{
unsigned int len = 0;
- if (headerSize == 0) {
+ if (headerSize == 0)
return 0;
- }
+
if (headerSize < ((1UL << 16) - (1UL << 8))) {
len = 2;
}
/* process the cipher */
- if (req_ctx->cryptlen != 0) {
+ if (req_ctx->cryptlen != 0)
ssi_aead_process_cipher_data_desc(req, cipher_flow_mode, desc, &idx);
- }
/* Read temporal MAC */
hw_desc_init(&desc[idx]);
*b0 |= 64; /* Enable bit 6 if Adata exists. */
rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */
- if (rc != 0) {
+ if (rc != 0)
return rc;
- }
/* END of "taken from crypto/ccm.c" */
/* l(a) - size of associated data. */
SSI_LOG_DEBUG("cipher_mode %d, authsize %d, enc_keylen %d, assoclen %d, cryptlen %d\n", \
ctx->cipher_mode, ctx->authsize, ctx->enc_keylen, req->assoclen, req_ctx->cryptlen);
- if (ctx->enckey != NULL) {
+ if (ctx->enckey != NULL)
dump_byte_array("mac key", ctx->enckey, 16);
- }
dump_byte_array("req->iv", req->iv, AES_BLOCK_SIZE);
dump_byte_array("gcm_len_block", req_ctx->gcm_len_block.lenA, AES_BLOCK_SIZE);
- if (req->src != NULL && req->cryptlen) {
+ if (req->src != NULL && req->cryptlen)
dump_byte_array("req->src", sg_virt(req->src), req->cryptlen + req->assoclen);
- }
- if (req->dst != NULL) {
+ if (req->dst != NULL)
dump_byte_array("req->dst", sg_virt(req->dst), req->cryptlen + ctx->authsize + req->assoclen);
- }
}
#endif
#if (SSI_CC_HAS_AES_CCM || SSI_CC_HAS_AES_GCM)
case DRV_HASH_NULL:
#if SSI_CC_HAS_AES_CCM
- if (ctx->cipher_mode == DRV_CIPHER_CCM) {
+ if (ctx->cipher_mode == DRV_CIPHER_CCM)
ssi_aead_ccm(req, desc, &seq_len);
- }
#endif /*SSI_CC_HAS_AES_CCM*/
#if SSI_CC_HAS_AES_GCM
- if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
+ if (ctx->cipher_mode == DRV_CIPHER_GCTR)
ssi_aead_gcm(req, desc, &seq_len);
- }
#endif /*SSI_CC_HAS_AES_GCM*/
break;
#endif
sg_list = sg_next(sg_list);
} else {
sg_list = (struct scatterlist *)sg_page(sg_list);
- if (is_chained != NULL) {
+ if (is_chained != NULL)
*is_chained = true;
- }
}
}
SSI_LOG_DEBUG("nents %d last bytes %d\n", nents, *lbytes);
/* Verify there is no memory overflow*/
new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1);
- if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) {
+ if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES)
return -ENOMEM;
- }
/*handle buffer longer than 64 kbytes */
while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) {
rc = ssi_buffer_mgr_render_buff_to_mlli(
sg_dma_address(curr_sgl) + sglOffset, entry_data_len, curr_nents,
&mlli_entry_p);
- if (rc != 0) {
+ if (rc != 0)
return rc;
- }
+
sglOffset = 0;
}
*mlli_entry_pp = mlli_entry_p;
sg_data->entry[i].buffer_dma,
sg_data->total_data_len[i], &total_nents,
&mlli_p);
- if (rc != 0) {
+ if (rc != 0)
return rc;
- }
/* set last bit in the current table */
if (sg_data->mlli_nents[i] != NULL) {
u32 i, j;
struct scatterlist *l_sg = sg;
for (i = 0; i < nents; i++) {
- if (l_sg == NULL) {
+ if (l_sg == NULL)
break;
- }
if (unlikely(dma_map_sg(dev, l_sg, 1, direction) != 1)) {
SSI_LOG_ERR("dma_map_page() sg buffer failed\n");
goto err;
err:
/* Restore mapped parts */
for (j = 0; j < i; j++) {
- if (sg == NULL) {
+ if (sg == NULL)
break;
- }
dma_unmap_sg(dev, sg, 1, direction);
sg = sg_next(sg);
}
SSI_LOG_DEBUG("Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, req->assoclen, req->cryptlen);
size_to_unmap = req->assoclen + req->cryptlen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_unmap += areq_ctx->req_authsize;
- }
if (areq_ctx->is_gcm4543)
size_to_unmap += crypto_aead_ivsize(tfm);
likely(req->src == req->dst))
{
u32 size_to_skip = req->assoclen;
- if (areq_ctx->is_gcm4543) {
+ if (areq_ctx->is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
- }
+
/* copy mac to a temporary location to deal with possible
* data memory overriding that caused by cache coherence problem.
*/
}
for (i = 0 ; i < (sgl_nents - MAX_ICV_NENTS_SUPPORTED) ; i++) {
- if (sgl == NULL) {
+ if (sgl == NULL)
break;
- }
sgl = sg_next(sgl);
}
- if (sgl != NULL) {
+ if (sgl != NULL)
icv_max_size = sgl->length;
- }
if (last_entry_data_size > authsize) {
nents = 0; /* ICV attached to data in last entry (not fragmented!) */
unsigned int sg_index = 0;
u32 size_of_assoc = req->assoclen;
- if (areq_ctx->is_gcm4543) {
+ if (areq_ctx->is_gcm4543)
size_of_assoc += crypto_aead_ivsize(tfm);
- }
if (sg_data == NULL) {
rc = -EINVAL;
* MAC verification upon request completion
*/
u32 size_to_skip = req->assoclen;
- if (areq_ctx->is_gcm4543) {
+ if (areq_ctx->is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
- }
+
ssi_buffer_mgr_copy_scatterlist_portion(
areq_ctx->backup_mac, req->src,
size_to_skip + req->cryptlen - areq_ctx->req_authsize,
bool chained = false;
bool is_gcm4543 = areq_ctx->is_gcm4543;
u32 size_to_skip = req->assoclen;
- if (is_gcm4543) {
+
+ if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
- }
+
offset = size_to_skip;
if (sg_data == NULL) {
areq_ctx->srcSgl = req->src;
areq_ctx->dstSgl = req->dst;
- if (is_gcm4543) {
+ if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
- }
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
src_mapped_nents = ssi_buffer_mgr_get_sgl_nents(req->src, size_for_map, &src_last_bytes, &chained);
if (req->src != req->dst) {
size_for_map = req->assoclen + req->cryptlen;
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0;
- if (is_gcm4543) {
+ if (is_gcm4543)
size_for_map += crypto_aead_ivsize(tfm);
- }
rc = ssi_buffer_mgr_map_scatterlist(dev, req->dst, size_for_map,
DMA_BIDIRECTIONAL, &(areq_ctx->dst.nents),
likely(req->src == req->dst))
{
u32 size_to_skip = req->assoclen;
- if (is_gcm4543) {
+
+ if (is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
- }
+
/* copy mac to a temporary location to deal with possible
* data memory overriding that caused by cache coherence problem.
*/
#endif /*SSI_CC_HAS_AES_GCM*/
size_to_map = req->cryptlen + req->assoclen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) {
+ if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
size_to_map += authsize;
- }
+
if (is_gcm4543)
size_to_map += crypto_aead_ivsize(tfm);
rc = ssi_buffer_mgr_map_scatterlist(dev, req->src,
(areq_ctx->data_buff_type == SSI_DMA_BUF_MLLI))) {
mlli_params->curr_pool = buff_mgr->mlli_buffs_pool;
rc = ssi_buffer_mgr_generate_mlli(dev, &sg_data, mlli_params);
- if (unlikely(rc != 0)) {
+ if (unlikely(rc != 0))
goto aead_map_failure;
- }
ssi_buffer_mgr_update_aead_mlli_nents(drvdata, req);
SSI_LOG_DEBUG("assoc params mn %d\n", areq_ctx->assoc.mlli_nents);
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
- if (*curr_buff_cnt != 0) {
+ if (*curr_buff_cnt != 0)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- }
+
return -ENOMEM;
}
dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE);
unmap_curr_buff:
- if (*curr_buff_cnt != 0) {
+ if (*curr_buff_cnt != 0)
dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE);
- }
+
return -ENOMEM;
}
{
struct ssi_crypto_alg *ssi_alg = container_of(tfm->__crt_alg, struct ssi_crypto_alg, crypto_alg);
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_ABLKCIPHER) {
+ if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_ABLKCIPHER)
return ssi_alg->crypto_alg.cra_ablkcipher.max_keysize;
- }
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER) {
+ if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_BLKCIPHER)
return ssi_alg->crypto_alg.cra_blkcipher.max_keysize;
- }
return 0;
}
/* Weak key is define as key that its first half (128/256 lsb) equals its second half (128/256 msb) */
int singleKeySize = keylen >> 1;
- if (unlikely(memcmp(key, &key[singleKeySize], singleKeySize) == 0)) {
+ if (unlikely(memcmp(key, &key[singleKeySize], singleKeySize) == 0))
return -ENOEXEC;
- }
#endif /* CCREE_FIPS_SUPPORT */
return 0;
#if SSI_CC_HAS_MULTI2
/*last byte of key buffer is round number and should not be a part of key size*/
- if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
+ if (ctx_p->flow_mode == S_DIN_to_MULTI2)
keylen -= 1;
- }
#endif /*SSI_CC_HAS_MULTI2*/
if (unlikely(validate_keys_sizes(ctx_p, keylen) != 0)) {
nbytes, NS_BIT);
set_dout_dlli(&desc[*seq_size], sg_dma_address(dst),
nbytes, NS_BIT, (!areq ? 0 : 1));
- if (areq != NULL) {
+ if (areq != NULL)
set_queue_last_ind(&desc[*seq_size]);
- }
+
set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
} else {
req_ctx->out_mlli_nents, NS_BIT,
(!areq ? 0 : 1));
}
- if (areq != NULL) {
+ if (areq != NULL)
set_queue_last_ind(&desc[*seq_size]);
- }
+
set_flow_mode(&desc[*seq_size], flow_mode);
(*seq_size)++;
}
/* Setup processing */
#if SSI_CC_HAS_MULTI2
- if (ctx_p->flow_mode == S_DIN_to_MULTI2) {
- ssi_blkcipher_create_multi2_setup_desc(tfm,
- req_ctx,
- ivsize,
- desc,
- &seq_len);
- } else
+ if (ctx_p->flow_mode == S_DIN_to_MULTI2)
+ ssi_blkcipher_create_multi2_setup_desc(tfm, req_ctx, ivsize,
+ desc, &seq_len);
+ else
#endif /*SSI_CC_HAS_MULTI2*/
- {
- ssi_blkcipher_create_setup_desc(tfm,
- req_ctx,
- ivsize,
- nbytes,
- desc,
- &seq_len);
- }
+ ssi_blkcipher_create_setup_desc(tfm, req_ctx, ivsize, nbytes,
+ desc, &seq_len);
/* Data processing */
ssi_blkcipher_create_data_desc(tfm,
req_ctx,
cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
- if (is_probe) {
+
+ if (is_probe)
SSI_LOG_INFO("Cache params previous: 0x%08X\n", val);
- }
+
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS),
cache_params);
val = CC_HAL_READ_REGISTER(CC_REG_OFFSET(CRY_KERNEL, AXIM_CACHE_PARAMS));
- if (is_probe) {
+
+ if (is_probe)
SSI_LOG_INFO("Cache params current: 0x%08X (expect: 0x%08X)\n",
val, cache_params);
- }
return 0;
}
{
int rc = 0;
- if (p_state == NULL) {
+ if (p_state == NULL)
return -EINVAL;
- }
rc = ssi_fips_ext_get_state(p_state);
{
int rc = 0;
- if (p_err == NULL) {
+ if (p_err == NULL)
return -EINVAL;
- }
rc = ssi_fips_ext_get_error(p_err);
{
int rc = 0;
- if (p_state == NULL) {
+ if (p_state == NULL)
return -EINVAL;
- }
*p_state = fips_state;
{
int rc = 0;
- if (p_err == NULL) {
+ if (p_err == NULL)
return -EINVAL;
- }
*p_err = fips_error;
void __iomem *cc_base = drvdata->cc_base;
regVal = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
- if (regVal == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) {
+ if (regVal == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
return CC_REE_FIPS_ERROR_OK;
- }
+
return CC_REE_FIPS_ERROR_FROM_TEE;
}
static void ssi_fips_update_tee_upon_ree_status(struct ssi_drvdata *drvdata, ssi_fips_error_t err)
{
void __iomem *cc_base = drvdata->cc_base;
- if (err == CC_REE_FIPS_ERROR_OK) {
+ if (err == CC_REE_FIPS_ERROR_OK)
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_OK));
- } else {
+ else
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_ERROR));
- }
}
if (irq & SSI_GPR0_IRQ_MASK) {
teeFipsError = CC_HAL_READ_REGISTER(CC_REG_OFFSET(HOST_RGF, GPR_HOST));
- if (teeFipsError != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK)) {
+ if (teeFipsError != (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK))
ssi_fips_set_error(drvdata, CC_REE_FIPS_ERROR_FROM_TEE);
- }
}
/* after verifing that there is nothing to do, Unmask AXI completion interrupt */
// the dma_handle is the returned phy address - use it in the HW descriptor
FIPS_DBG("dma_alloc_coherent \n");
cpu_addr_buffer = dma_alloc_coherent(dev, alloc_buff_size, &dma_handle, GFP_KERNEL);
- if (cpu_addr_buffer == NULL) {
+ if (cpu_addr_buffer == NULL)
return CC_REE_FIPS_ERROR_GENERAL;
- }
+
FIPS_DBG("allocated coherent buffer - addr 0x%08X , size = %d \n", (size_t)cpu_addr_buffer, alloc_buff_size);
#if FIPS_POWER_UP_TEST_CIPHER
FIPS_LOG("ssi_fips_set_error - fips_error = %d \n", err);
// setting no error is not allowed
- if (err == CC_REE_FIPS_ERROR_OK) {
+ if (err == CC_REE_FIPS_ERROR_OK)
return -ENOEXEC;
- }
+
// If error exists, do not set new error
- if (ssi_fips_get_error(¤t_err) != 0) {
+ if (ssi_fips_get_error(¤t_err) != 0)
return -ENOEXEC;
- }
- if (current_err != CC_REE_FIPS_ERROR_OK) {
+
+ if (current_err != CC_REE_FIPS_ERROR_OK)
return -ENOEXEC;
- }
+
// set REE internal error and state
rc = ssi_fips_ext_set_error(err);
- if (rc != 0) {
+ if (rc != 0)
return -ENOEXEC;
- }
+
rc = ssi_fips_ext_set_state(CC_FIPS_STATE_ERROR);
- if (rc != 0) {
+ if (rc != 0)
return -ENOEXEC;
- }
// push error towards TEE libraray, if it's not TEE error
- if (err != CC_REE_FIPS_ERROR_FROM_TEE) {
+ if (err != CC_REE_FIPS_ERROR_FROM_TEE)
ssi_fips_update_tee_upon_ree_status(p_drvdata, err);
- }
+
return rc;
}
} else { /*sha*/
memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize);
#if (DX_DEV_SHA_MAX > 256)
- if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384))) {
+ if (unlikely((ctx->hash_mode == DRV_HASH_SHA512) || (ctx->hash_mode == DRV_HASH_SHA384)))
memcpy(state->digest_bytes_len, digest_len_sha512_init, HASH_LEN_SIZE);
- } else {
+ else
memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
- }
#else
memcpy(state->digest_bytes_len, digest_len_init, HASH_LEN_SIZE);
#endif
NS_BIT);
} else {
set_din_const(&desc[idx], 0, HASH_LEN_SIZE);
- if (likely(nbytes != 0)) {
+ if (likely(nbytes != 0))
set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
- } else {
+ else
set_cipher_do(&desc[idx], DO_PAD);
- }
}
set_flow_mode(&desc[idx], S_DIN_to_HASH);
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
/* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
NS_BIT, (async_req ? 1 : 0));
- if (async_req) {
+ if (async_req)
set_queue_last_ind(&desc[idx]);
- }
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
set_cipher_mode(&desc[idx], ctx->hw_mode);
set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
HASH_LEN_SIZE, NS_BIT, (async_req ? 1 : 0));
- if (async_req) {
+ if (async_req)
set_queue_last_ind(&desc[idx]);
- }
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
idx++;
/* TODO */
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
NS_BIT, (async_req ? 1 : 0));
- if (async_req) {
+ if (async_req)
set_queue_last_ind(&desc[idx]);
- }
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
hw_desc_init(&desc[idx]);
set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
NS_BIT, (async_req ? 1 : 0));
- if (async_req) {
+ if (async_req)
set_queue_last_ind(&desc[idx]);
- }
set_flow_mode(&desc[idx], S_HASH_to_DOUT);
set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
return -ENOMEM;
}
- if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
ssi_hash_create_xcbc_setup(req, desc, &idx);
- } else {
+ else
ssi_hash_create_cmac_setup(req, desc, &idx);
- }
ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
idx++;
}
- if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
+ if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
ssi_hash_create_xcbc_setup(req, desc, &idx);
- } else {
+ else
ssi_hash_create_cmac_setup(req, desc, &idx);
- }
if (state->xcbc_count == 0) {
hw_desc_init(&desc[idx]);
set_flow_mode(&desc[idx], flow_mode);
idx++;
}
- if (is_not_last_data) {
+ if (is_not_last_data)
set_din_not_last_indication(&desc[(idx - 1)]);
- }
/* return updated desc sequence size */
*seq_size = idx;
}
/* Generate initial pool */
rc = ssi_ivgen_generate_pool(ivgen_ctx, iv_seq, &iv_seq_len);
- if (unlikely(rc != 0)) {
+ if (unlikely(rc != 0))
return rc;
- }
+
/* Fire-and-forget */
return send_request_init(drvdata, iv_seq, iv_seq_len);
}
return -EBUSY;
}
- if ((likely(req_mgr_h->q_free_slots >= total_seq_len))) {
+ if ((likely(req_mgr_h->q_free_slots >= total_seq_len)))
return 0;
- }
+
/* Wait for space in HW queue. Poll constant num of iterations. */
for (poll_queue = 0; poll_queue < SSI_MAX_POLL_ITER ; poll_queue++) {
req_mgr_h->q_free_slots =
}
used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE - 1));
- if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots)) {
+ if (unlikely(used_sw_slots > req_mgr_h->max_used_sw_slots))
req_mgr_h->max_used_sw_slots = used_sw_slots;
- }
/* Enqueue request - must be locked with HW lock*/
req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *ssi_req;
/* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */
rc = request_mgr_queues_status_check(req_mgr_h, cc_base, total_seq_len);
- if (unlikely(rc != 0)) {
+ if (unlikely(rc != 0))
return rc;
- }
+
set_queue_last_ind(&desc[(len - 1)]);
enqueue_seq(cc_base, desc, len);
u32 axi_err;
int i;
SSI_LOG_INFO("Delay\n");
- for (i = 0; i < 1000000; i++) {
+ for (i = 0; i < 1000000; i++)
axi_err = READ_REGISTER(drvdata->cc_base + CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
- }
}
#endif /* COMPLETION_DELAY */
- if (likely(ssi_req->user_cb != NULL)) {
+ if (likely(ssi_req->user_cb != NULL))
ssi_req->user_cb(&plat_dev->dev, ssi_req->user_arg, drvdata->cc_base);
- }
request_mgr_handle->req_queue_tail = (request_mgr_handle->req_queue_tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
SSI_LOG_DEBUG("Dequeue request tail=%u\n", request_mgr_handle->req_queue_tail);
SSI_LOG_DEBUG("Request completed. axi_completed=%d\n", request_mgr_handle->axi_completed);
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
rc = ssi_power_mgr_runtime_put_suspend(&plat_dev->dev);
- if (rc != 0) {
+ if (rc != 0)
SSI_LOG_ERR("Failed to set runtime suspension %d\n", rc);
- }
#endif
}
}
int i = 0, offset = 0;
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "Usage:\n");
- for (i = 0; i < ARRAY_SIZE(help_str); i += 2) {
+ for (i = 0; i < ARRAY_SIZE(help_str); i += 2)
offset += scnprintf(buf + offset, PAGE_SIZE - offset, "%s\t\t%s\n", help_str[i], help_str[i + 1]);
- }
+
return offset;
}