Remove or add blank lines as needed to match coding style.
Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
#define AES_CCM_RFC4309_NONCE_SIZE 3
#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
-
/* Value of each ICV_CMP byte (of 8) in case of success */
#define ICV_VERIF_OK 0x01
return -ENOMEM;
}
-
static void ssi_aead_complete(struct device *dev, void *ssi_req, void __iomem *cc_base)
{
struct aead_request *areq = (struct aead_request *)ssi_req;
return 0; /* All tests of keys sizes passed */
}
+
/* This function prepers the user key so it can pass to the hmac processing
* (copy to intenral buffer or hash in case of key longer than block
*/
return rc;
}
-
static int
ssi_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen)
{
goto badkey;
}
-
/* STAT_PHASE_2: Create sequence */
switch (ctx->auth_mode) {
goto badkey;
}
-
/* STAT_PHASE_3: Submit sequence to HW */
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
static unsigned int format_ccm_a0(u8 *pA0Buff, u32 headerSize)
{
unsigned int len = 0;
+
if (headerSize == 0)
return 0;
unsigned int cipher_flow_mode;
dma_addr_t mac_result;
-
if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
cipher_flow_mode = AES_to_HASH_and_DOUT;
mac_result = req_ctx->mac_buf_dma_addr;
set_aes_not_hash_mode(&desc[idx]);
idx++;
-
/* process assoc data */
if (req->assoclen > 0) {
ssi_aead_create_assoc_desc(req, DIN_HASH, desc, &idx);
req->cryptlen :
(req->cryptlen - ctx->authsize);
int rc;
+
memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
cipher_flow_mode = AES_to_HASH_and_DOUT;
}
-
//in RFC4543 no data to encrypt. just copy data from src to dest.
if (req_ctx->plaintext_authenticate_only) {
ssi_aead_process_cipher_data_desc(req, BYPASS, desc, seq_size);
memcpy(req->iv + 12, &counter, 4);
memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
-
if (!req_ctx->plaintext_authenticate_only) {
__be64 temp64;
+
temp64 = cpu_to_be64(req->assoclen * 8);
memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
temp64 = cpu_to_be64(cryptlen * 8);
memcpy(&req_ctx->gcm_len_block.lenC, &temp64, 8);
} else { //rfc4543=> all data(AAD,IV,Plain) are considered additional data that is nothing is encrypted.
__be64 temp64;
+
temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
memcpy(&req_ctx->gcm_len_block.lenA, &temp64, sizeof(temp64));
temp64 = 0;
req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
}
-
#endif /*SSI_CC_HAS_AES_GCM*/
static int ssi_aead_process(struct aead_request *req, enum drv_crypto_direction direct)
struct device *dev = &ctx->drvdata->plat_dev->dev;
struct ssi_crypto_req ssi_req = {};
-
SSI_LOG_DEBUG("%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), ctx, req, req->iv,
sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen);
areq_ctx->req_authsize = ctx->authsize;
areq_ctx->cipher_mode = ctx->cipher_mode;
-
/* STAT_PHASE_1: Map buffers */
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
}
-
/* STAT_PHASE_2: Create sequence */
/* Load MLLI tables to SRAM if necessary */
goto exit;
}
-
/* STAT_PHASE_3: Lock HW and push sequence */
rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
ssi_buffer_mgr_unmap_aead_request(dev, req);
}
-
exit:
return rc;
}
#include <crypto/algapi.h>
#include <crypto/ctr.h>
-
/* mac_cmp - HW writes 8 B but all bytes hold the same value */
#define ICV_CMP_SIZE 8
#define CCM_CONFIG_BUF_SIZE (AES_BLOCK_SIZE * 3)
#define MAX_MAC_SIZE MAX(SHA256_DIGEST_SIZE, AES_BLOCK_SIZE)
-
/* defines for AES GCM configuration buffer */
#define GCM_BLOCK_LEN_SIZE 8
#define GCM_BLOCK_RFC4_NONCE_OFFSET 0
#define GCM_BLOCK_RFC4_NONCE_SIZE 4
-
-
/* Offsets into AES CCM configuration buffer */
#define CCM_B0_OFFSET 0
#define CCM_A0_OFFSET 16
#define GET_DMA_BUFFER_TYPE(buff_type)
#endif
-
enum dma_buffer_type {
DMA_NULL_TYPE = -1,
DMA_SGL_TYPE = 1,
struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes, bool *is_chained)
{
unsigned int nents = 0;
+
while (nbytes != 0) {
if (sg_is_chain(sg_list)) {
SSI_LOG_ERR("Unexpected chained entry "
return 0;
}
-
static inline int ssi_buffer_mgr_render_scatterlist_to_mlli(
struct scatterlist *sgl, u32 sgl_data_len, u32 sglOffset, u32 *curr_nents,
u32 **mlli_entry_pp)
{
u32 i, j;
struct scatterlist *l_sg = sg;
+
for (i = 0; i < nents; i++) {
if (!l_sg)
break;
return 0;
}
-
static inline int ssi_ahash_handle_curr_buf(struct device *dev,
struct ahash_req_ctx *areq_ctx,
u8 *curr_buff,
likely(req->src == req->dst))
{
u32 size_to_skip = req->assoclen;
+
if (areq_ctx->is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
* MAC verification upon request completion
*/
u32 size_to_skip = req->assoclen;
+
if (areq_ctx->is_gcm4543)
size_to_skip += crypto_aead_ivsize(tfm);
#include "ssi_config.h"
#include "ssi_driver.h"
-
enum ssi_req_dma_buf_type {
SSI_DMA_BUF_NULL = 0,
SSI_DMA_BUF_DLLI,
u8 *key;
dma_addr_t key_dma_addr;
};
+
struct cc_hw_key_info {
enum cc_hw_crypto_key key1_slot;
enum cc_hw_crypto_key key2_slot;
static void ssi_ablkcipher_complete(struct device *dev, void *ssi_req, void __iomem *cc_base);
-
static int validate_keys_sizes(struct ssi_ablkcipher_ctx *ctx_p, u32 size) {
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
return -EINVAL;
}
-
static int validate_data_size(struct ssi_ablkcipher_ctx *ctx_p, unsigned int size) {
switch (ctx_p->flow_mode) {
case S_DIN_to_AES:
SSI_LOG_DEBUG("Free key buffer in context. key=@%p\n", ctx_p->user.key);
}
-
struct tdes_keys {
u8 key1[DES_KEY_SIZE];
u8 key2[DES_KEY_SIZE];
return -EINVAL;
}
-
-
/* STAT_PHASE_1: Copy key to ctx */
dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr,
max_key_buf_size, DMA_TO_DEVICE);
int key_len = keylen >> 1;
int err;
SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm);
+
desc->tfm = ctx_p->shash_tfm;
err = crypto_shash_digest(desc, ctx_p->user.key, key_len, ctx_p->user.key + key_len);
max_key_buf_size, DMA_TO_DEVICE);
ctx_p->keylen = keylen;
-
SSI_LOG_DEBUG("ssi_blkcipher_setkey: return safely");
return 0;
}
set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0);
(*seq_size)++;
-
/* Set state */
hw_desc_init(&desc[*seq_size]);
set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
ssi_buffer_mgr_unmap_blkcipher_request(dev, req_ctx, ivsize, src, dst);
-
/*Set the inflight couter value to local variable*/
inflight_counter = ctx_p->drvdata->inflight_counter;
/*Decrease the inflight counter*/
/* Setup request context */
req_ctx->gen_ctx.op_type = direction;
-
/* STAT_PHASE_1: Map buffers */
rc = ssi_buffer_mgr_map_blkcipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, info, src, dst);
goto exit_process;
}
-
/* STAT_PHASE_2: Create sequence */
/* Setup processing */
return ssi_blkcipher_init(tfm);
}
-
static int ssi_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
const u8 *key,
unsigned int keylen)
return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
}
-
/* DX Block cipher alg */
static struct ssi_alg_template blkcipher_algs[] = {
/* Async template */
struct ssi_blkcipher_handle *blkcipher_handle =
drvdata->blkcipher_handle;
struct device *dev;
+
dev = &drvdata->plat_dev->dev;
if (blkcipher_handle) {
return 0;
}
-
-
int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata)
{
struct ssi_blkcipher_handle *ablkcipher_handle;
#include "ssi_driver.h"
#include "ssi_buffer_mgr.h"
-
/* Crypto cipher flags */
#define CC_CRYPTO_CIPHER_KEY_KFDE0 (1 << 0)
#define CC_CRYPTO_CIPHER_KEY_KFDE1 (1 << 1)
#define CC_CRYPTO_CIPHER_KEY_KFDE_MASK (CC_CRYPTO_CIPHER_KEY_KFDE0 | CC_CRYPTO_CIPHER_KEY_KFDE1 | CC_CRYPTO_CIPHER_KEY_KFDE2 | CC_CRYPTO_CIPHER_KEY_KFDE3)
-
struct blkcipher_req_ctx {
struct async_gen_req_ctx gen_ctx;
enum ssi_req_dma_buf_type dma_buf_type;
struct mlli_params mlli_params;
};
-
-
int ssi_ablkcipher_alloc(struct ssi_drvdata *drvdata);
int ssi_ablkcipher_free(struct ssi_drvdata *drvdata);
CRYPTO_ALG_BULK_DU_4096)
#endif /* CRYPTO_ALG_BULK_MASK */
-
#ifdef CRYPTO_TFM_REQ_HW_KEY
static inline bool ssi_is_hw_key(struct crypto_tfm *tfm)
#endif /* CRYPTO_TFM_REQ_HW_KEY */
-
#endif /*__SSI_CIPHER_H__*/
#include "ssi_pm.h"
#include "ssi_fips_local.h"
-
#ifdef DX_DUMP_BYTES
void dump_byte_array(const char *name, const u8 *the_array, unsigned long size)
{
SSI_LOG_DEBUG("CC registers mapped from %pa to 0x%p\n", &new_drvdata->res_mem->start, cc_base);
new_drvdata->cc_base = cc_base;
-
/* Then IRQ */
new_drvdata->res_irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
if (unlikely(!new_drvdata->res_irq)) {
return 0;
}
+
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
static struct dev_pm_ops arm_cc7x_driver_pm = {
SET_RUNTIME_PM_OPS(ssi_power_mgr_runtime_suspend, ssi_power_mgr_runtime_resume, NULL)
#define DX_DRIVER_RUNTIME_PM NULL
#endif
-
#ifdef CONFIG_OF
static const struct of_device_id arm_cc7x_dev_of_match[] = {
{.compatible = "arm,cryptocell-712-ree"},
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-
/**************************************************************
* This file defines the driver FIPS APIs *
**************************************************************/
#include <linux/module.h>
#include "ssi_fips.h"
-
extern int ssi_fips_ext_get_state(enum cc_fips_state_t *p_state);
extern int ssi_fips_ext_get_error(enum cc_fips_error *p_err);
CC_FIPS_STATE_RESERVE32B = S32_MAX
};
-
enum cc_fips_error {
CC_REE_FIPS_ERROR_OK = 0,
CC_REE_FIPS_ERROR_GENERAL,
CC_REE_FIPS_ERROR_RESERVE32B = S32_MAX
};
-
-
int ssi_fips_get_state(enum cc_fips_state *p_state);
int ssi_fips_get_error(enum cc_fips_error *p_err);
#define NIST_AES_192_CTR_CIPHER { 0x1a, 0xbc, 0x93, 0x24, 0x17, 0x52, 0x1c, 0xa2, 0x4f, 0x2b, 0x04, 0x59, 0xfe, 0x7e, 0x6e, 0x0b }
#define NIST_AES_256_CTR_CIPHER { 0x60, 0x1e, 0xc3, 0x13, 0x77, 0x57, 0x89, 0xa5, 0xb7, 0xa7, 0xf5, 0x04, 0xbb, 0xf3, 0xd2, 0x28 }
-
#define RFC3962_AES_128_KEY { 0x63, 0x68, 0x69, 0x63, 0x6b, 0x65, 0x6e, 0x20, 0x74, 0x65, 0x72, 0x69, 0x79, 0x61, 0x6b, 0x69 }
#define RFC3962_AES_VECTOR_SIZE 17
#define RFC3962_AES_PLAIN_DATA { 0x49, 0x20, 0x77, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20 }
#define RFC3962_AES_CBC_CTS_IV { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
#define RFC3962_AES_128_CBC_CTS_CIPHER { 0xc6, 0x35, 0x35, 0x68, 0xf2, 0xbf, 0x8c, 0xb4, 0xd8, 0xa5, 0x80, 0x36, 0x2d, 0xa7, 0xff, 0x7f, 0x97 }
-
#define NIST_AES_256_XTS_KEY { 0xa1, 0xb9, 0x0c, 0xba, 0x3f, 0x06, 0xac, 0x35, 0x3b, 0x2c, 0x34, 0x38, 0x76, 0x08, 0x17, 0x62, \
0x09, 0x09, 0x23, 0x02, 0x6e, 0x91, 0x77, 0x18, 0x15, 0xf2, 0x9d, 0xab, 0x01, 0x93, 0x2f, 0x2f }
#define NIST_AES_256_XTS_IV { 0x4f, 0xae, 0xf7, 0x11, 0x7c, 0xda, 0x59, 0xc6, 0x6e, 0x4b, 0x92, 0x01, 0x3e, 0x76, 0x8a, 0xd5 }
#define NIST_AES_512_XTS_CIPHER { 0xcb, 0xaa, 0xd0, 0xe2, 0xf6, 0xce, 0xa3, 0xf5, 0x0b, 0x37, 0xf9, 0x34, 0xd4, 0x6a, 0x9b, 0x13, \
0x0b, 0x9d, 0x54, 0xf0, 0x7e, 0x34, 0xf3, 0x6a, 0xf7, 0x93, 0xe8, 0x6f, 0x73, 0xc6, 0xd7, 0xdb }
-
/* NIST AES-CMAC */
#define NIST_AES_128_CMAC_KEY { 0x67, 0x08, 0xc9, 0x88, 0x7b, 0x84, 0x70, 0x84, 0xf1, 0x23, 0xd3, 0xdd, 0x9c, 0x3a, 0x81, 0x36 }
#define NIST_AES_128_CMAC_PLAIN_DATA { 0xa8, 0xde, 0x55, 0x17, 0x0c, 0x6d, 0xc0, 0xd8, 0x0d, 0xe3, 0x2f, 0x50, 0x8b, 0xf4, 0x9b, 0x70 }
#define NIST_AES_256_CMAC_VECTOR_SIZE 16
#define NIST_AES_256_CMAC_OUTPUT_SIZE 10
-
/* NIST TDES */
#define TDES_NUM_OF_KEYS 3
#define NIST_TDES_VECTOR_SIZE 8
#define NIST_TDES_CBC3_PLAIN_DATA { 0x3b, 0xb7, 0xa7, 0xdb, 0xa3, 0xd5, 0x92, 0x91 }
#define NIST_TDES_CBC3_CIPHER { 0x5b, 0x84, 0x24, 0xd2, 0x39, 0x3e, 0x55, 0xa2 }
-
/* NIST AES-CCM */
#define NIST_AESCCM_128_BIT_KEY_SIZE 16
#define NIST_AESCCM_192_BIT_KEY_SIZE 24
#define NIST_AESCCM_256_CIPHER { 0xcc, 0x17, 0xbf, 0x87, 0x94, 0xc8, 0x43, 0x45, 0x7d, 0x89, 0x93, 0x91, 0x89, 0x8e, 0xd2, 0x2a }
#define NIST_AESCCM_256_MAC { 0x6f, 0x9d, 0x28, 0xfc, 0xb6, 0x42, 0x34, 0xe1, 0xcd, 0x79, 0x3c, 0x41, 0x44, 0xf1, 0xda, 0x50 }
-
/* NIST AES-GCM */
#define NIST_AESGCM_128_BIT_KEY_SIZE 16
#define NIST_AESGCM_192_BIT_KEY_SIZE 24
#define NIST_AESGCM_256_CIPHER { 0x42, 0x6e, 0x0e, 0xfc, 0x69, 0x3b, 0x7b, 0xe1, 0xf3, 0x01, 0x8d, 0xb7, 0xdd, 0xbb, 0x7e, 0x4d }
#define NIST_AESGCM_256_MAC { 0xee, 0x82, 0x57, 0x79, 0x5b, 0xe6, 0xa1, 0x16, 0x4d, 0x7e, 0x1d, 0x2d, 0x6c, 0xac, 0x77, 0xa7 }
-
/* NIST HASH */
#define NIST_SHA_MSG_SIZE 16
0x8f, 0x2b, 0xa9, 0x1c, 0x3a, 0x9f, 0x0c, 0x16, 0x53, 0xc4, 0xbf, 0x0a, 0xda, 0x35, 0x64, 0x55, \
0xea, 0x36, 0xfd, 0x31, 0xf8, 0xe7, 0x3e, 0x39, 0x51, 0xca, 0xd4, 0xeb, 0xba, 0x8c, 0x6e, 0x04 }
-
/* NIST HMAC */
#define NIST_HMAC_MSG_SIZE 128
#include "ssi_fips_local.h"
#include "ssi_driver.h"
-
static bool tee_error;
module_param(tee_error, bool, 0644);
MODULE_PARM_DESC(tee_error, "Simulate TEE library failure flag: 0 - no error (default), 1 - TEE error occured ");
return 0;
}
-
#include "ssi_hash.h"
#include "ssi_request_mgr.h"
-
static const u32 digest_len_init[] = {
0x00000040, 0x00000000, 0x00000000, 0x00000000 };
static const u32 sha1_init[] = {
SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
#endif
-
#define NIST_CIPHER_AES_MAX_VECTOR_SIZE 32
struct fips_cipher_ctx {
size_t dataInSize;
} FipsCipherData;
-
struct fips_cmac_ctx {
u8 key[AES_256_BIT_KEY_SIZE];
u8 din[NIST_CIPHER_AES_MAX_VECTOR_SIZE];
size_t mac_res_size;
} FipsCmacData;
-
struct fips_hash_ctx {
u8 initial_digest[CC_DIGEST_SIZE_MAX];
u8 din[NIST_SHA_MSG_SIZE];
u8 mac_res[CC_DIGEST_SIZE_MAX];
} FipsHashData;
-
/* note that the hmac key length must be equal or less than block size (block size is 64 up to sha256 and 128 for sha384/512) */
struct fips_hmac_ctx {
u8 initial_digest[CC_DIGEST_SIZE_MAX];
u8 mac_res[CC_DIGEST_SIZE_MAX];
} FipsHmacData;
-
#define FIPS_CCM_B0_A0_ADATA_SIZE (NIST_AESCCM_IV_SIZE + NIST_AESCCM_IV_SIZE + NIST_AESCCM_ADATA_SIZE)
struct fips_ccm_ctx {
u8 macResOut[NIST_AESCCM_TAG_SIZE];
} FipsCcmData;
-
struct fips_gcm_ctx {
u8 adata[NIST_AESGCM_ADATA_SIZE];
u8 key[CC_AES_KEY_SIZE_MAX];
u8 macResOut[NIST_AESGCM_TAG_SIZE];
} FipsGcmData;
-
typedef union _fips_ctx {
struct fips_cipher_ctx cipher;
struct fips_cmac_ctx cmac;
struct fips_gcm_ctx gcm;
} fips_ctx;
-
/* test data tables */
static const FipsCipherData FipsCipherDataTable[] = {
/* AES */
{ 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_ENCRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_CBC3_CIPHER, NIST_TDES_VECTOR_SIZE },
{ 0, NIST_TDES_CBC3_KEY, CC_DRV_DES_TRIPLE_KEY_SIZE, NIST_TDES_CBC3_IV, DRV_CRYPTO_DIRECTION_DECRYPT, DRV_CIPHER_CBC, NIST_TDES_CBC3_CIPHER, NIST_TDES_CBC3_PLAIN_DATA, NIST_TDES_VECTOR_SIZE },
};
+
#define FIPS_CIPHER_NUM_OF_TESTS (sizeof(FipsCipherDataTable) / sizeof(FipsCipherData))
static const FipsCmacData FipsCmacDataTable[] = {
{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_192_CMAC_KEY, AES_192_BIT_KEY_SIZE, NIST_AES_192_CMAC_PLAIN_DATA, NIST_AES_192_CMAC_VECTOR_SIZE, NIST_AES_192_CMAC_MAC, NIST_AES_192_CMAC_OUTPUT_SIZE },
{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AES_256_CMAC_KEY, AES_256_BIT_KEY_SIZE, NIST_AES_256_CMAC_PLAIN_DATA, NIST_AES_256_CMAC_VECTOR_SIZE, NIST_AES_256_CMAC_MAC, NIST_AES_256_CMAC_OUTPUT_SIZE },
};
+
#define FIPS_CMAC_NUM_OF_TESTS (sizeof(FipsCmacDataTable) / sizeof(FipsCmacData))
static const FipsHashData FipsHashDataTable[] = {
// { DRV_HASH_SHA512, NIST_SHA_512_MSG, NIST_SHA_MSG_SIZE, NIST_SHA_512_MD },
#endif
};
+
#define FIPS_HASH_NUM_OF_TESTS (sizeof(FipsHashDataTable) / sizeof(FipsHashData))
static const FipsHmacData FipsHmacDataTable[] = {
// { DRV_HASH_SHA512, NIST_HMAC_SHA512_KEY, NIST_HMAC_SHA512_KEY_SIZE, NIST_HMAC_SHA512_MSG, NIST_HMAC_MSG_SIZE, NIST_HMAC_SHA512_MD },
#endif
};
+
#define FIPS_HMAC_NUM_OF_TESTS (sizeof(FipsHmacDataTable) / sizeof(FipsHmacData))
static const FipsCcmData FipsCcmDataTable[] = {
{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
{ DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESCCM_256_KEY, NIST_AESCCM_256_BIT_KEY_SIZE, NIST_AESCCM_256_NONCE, NIST_AESCCM_256_ADATA, NIST_AESCCM_ADATA_SIZE, NIST_AESCCM_256_CIPHER, NIST_AESCCM_TEXT_SIZE, NIST_AESCCM_256_PLAIN_TEXT, NIST_AESCCM_TAG_SIZE, NIST_AESCCM_256_MAC },
};
+
#define FIPS_CCM_NUM_OF_TESTS (sizeof(FipsCcmDataTable) / sizeof(FipsCcmData))
static const FipsGcmData FipsGcmDataTable[] = {
{ DRV_CRYPTO_DIRECTION_ENCRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
{ DRV_CRYPTO_DIRECTION_DECRYPT, NIST_AESGCM_256_KEY, NIST_AESGCM_256_BIT_KEY_SIZE, NIST_AESGCM_256_IV, NIST_AESGCM_256_ADATA, NIST_AESGCM_ADATA_SIZE, NIST_AESGCM_256_CIPHER, NIST_AESGCM_TEXT_SIZE, NIST_AESGCM_256_PLAIN_TEXT, NIST_AESGCM_TAG_SIZE, NIST_AESGCM_256_MAC },
};
-#define FIPS_GCM_NUM_OF_TESTS (sizeof(FipsGcmDataTable) / sizeof(FipsGcmData))
+#define FIPS_GCM_NUM_OF_TESTS (sizeof(FipsGcmDataTable) / sizeof(FipsGcmData))
static inline enum cc_fips_error
FIPS_CipherToFipsError(enum drv_cipher_mode mode, bool is_aes)
return CC_REE_FIPS_ERROR_GENERAL;
}
-
static inline int
ssi_cipher_fips_run_test(struct ssi_drvdata *drvdata,
bool is_aes,
return rc;
}
-
enum cc_fips_error
ssi_cipher_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer)
{
return error;
}
-
static inline int
ssi_cmac_fips_run_test(struct ssi_drvdata *drvdata,
dma_addr_t key_dma_addr,
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
-
//ssi_hash_create_data_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, din_dma_addr, din_len, NS_BIT);
return error;
}
-
static inline enum cc_fips_error
FIPS_HashToFipsError(enum drv_hash_mode hash_mode)
{
return error;
}
-
static inline enum cc_fips_error
FIPS_HmacToFipsError(enum drv_hash_mode hash_mode)
{
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
idx++;
-
/* Load the hash current length*/
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hw_mode);
set_flow_mode(&desc[idx], DIN_HASH);
idx++;
-
/* Get final MAC result */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], hw_mode);
return error;
}
-
static inline int
ssi_ccm_fips_run_test(struct ssi_drvdata *drvdata,
enum drv_crypto_direction direction,
{
/* build B0 -- B0, nonce, l(m) */
__be16 data = cpu_to_be16(NIST_AESCCM_TEXT_SIZE);
+
virt_ctx->b0_a0_adata[0] = NIST_AESCCM_B0_VAL;
memcpy(virt_ctx->b0_a0_adata + 1, ccmData->nonce, NIST_AESCCM_NONCE_SIZE);
memcpy(virt_ctx->b0_a0_adata + 14, (u8 *)&data, sizeof(__be16));
return error;
}
-
static inline int
ssi_gcm_fips_run_test(struct ssi_drvdata *drvdata,
enum drv_crypto_direction direction,
set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
idx++;
-
-
///////////////////////////////// 2 ////////////////////////////////////
/* prcess(ghash) assoc data */
// if (req->assoclen > 0)
set_flow_mode(&desc[idx], DIN_HASH);
idx++;
-
///////////////////////////////// 3 ////////////////////////////////////
// ssi_aead_gcm_setup_gctr_desc(req, desc, seq_size);
///////////////////////////////// 3 ////////////////////////////////////
set_flow_mode(&desc[idx], S_DIN_to_AES);
idx++;
-
///////////////////////////////// 4 ////////////////////////////////////
/* process(gctr+ghash) */
// if (req_ctx->cryptlen != 0)
set_flow_mode(&desc[idx], cipher_flow_mode);
idx++;
-
///////////////////////////////// 5 ////////////////////////////////////
// ssi_aead_process_gcm_result_desc(req, desc, seq_size);
///////////////////////////////// 5 ////////////////////////////////////
/* len_block */
{
__be64 len_bits;
+
len_bits = cpu_to_be64(gcmData->adataSize * 8);
memcpy(virt_ctx->len_block, &len_bits, sizeof(len_bits));
len_bits = cpu_to_be64(gcmData->dataInSize * 8);
/* iv_inc1, iv_inc2 */
{
__be32 counter = cpu_to_be32(1);
+
memcpy(virt_ctx->iv_inc1, gcmData->iv, NIST_AESGCM_IV_SIZE);
memcpy(virt_ctx->iv_inc1 + NIST_AESGCM_IV_SIZE, &counter, sizeof(counter));
counter = cpu_to_be32(2);
return error;
}
-
size_t ssi_fips_max_mem_alloc_size(void)
{
FIPS_DBG("sizeof(struct fips_cipher_ctx) %d \n", sizeof(struct fips_cipher_ctx));
#include "ssi_driver.h"
#include "cc_hal.h"
-
#define FIPS_POWER_UP_TEST_CIPHER 1
#define FIPS_POWER_UP_TEST_CMAC 1
#define FIPS_POWER_UP_TEST_HASH 1
#endif
};
-
extern int ssi_fips_get_state(enum cc_fips_state_t *p_state);
extern int ssi_fips_get_error(enum cc_fips_error *p_err);
extern int ssi_fips_ext_set_state(enum cc_fips_state_t state);
extern enum cc_fips_error ssi_gcm_fips_power_up_tests(struct ssi_drvdata *drvdata, void *cpu_addr_buffer, dma_addr_t dma_coherent_buffer);
extern size_t ssi_fips_max_mem_alloc_size(void);
-
/* The function called once at driver entry point to check whether TEE FIPS error occured.*/
static enum ssi_fips_error ssi_fips_get_tee_error(struct ssi_drvdata *drvdata)
{
return CC_REE_FIPS_ERROR_FROM_TEE;
}
-
/*
* This function should push the FIPS REE library status towards the TEE library.
* By writing the error state to HOST_GPR0 register. The function is called from
static void ssi_fips_update_tee_upon_ree_status(struct ssi_drvdata *drvdata, enum cc_fips_error err)
{
void __iomem *cc_base = drvdata->cc_base;
+
if (err == CC_REE_FIPS_ERROR_OK)
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_OK));
else
CC_HAL_WRITE_REGISTER(CC_REG_OFFSET(HOST_RGF, HOST_GPR0), (CC_FIPS_SYNC_REE_STATUS | CC_FIPS_SYNC_MODULE_ERROR));
}
-
-
void ssi_fips_fini(struct ssi_drvdata *drvdata)
{
struct ssi_fips_handle *fips_h = drvdata->fips_handle;
#endif
}
-
-
#ifdef COMP_IN_WQ
static void fips_wq_handler(struct work_struct *work)
{
CC_REG_OFFSET(HOST_RGF, HOST_IMR)) & ~irq);
}
-
enum cc_fips_error cc_fips_run_power_up_tests(struct ssi_drvdata *drvdata)
{
enum cc_fips_error fips_error = CC_REE_FIPS_ERROR_OK;
return fips_error;
}
-
-
/* The function checks if FIPS supported and FIPS error exists.*
* It should be used in every driver API.
*/
return 0;
}
-
/* The function sets the REE FIPS state.*
* It should be used while driver is being loaded.
*/
return rc;
}
-
/* The function called once at driver entry point .*/
int ssi_fips_init(struct ssi_drvdata *p_drvdata)
{
#ifndef __SSI_FIPS_LOCAL_H__
#define __SSI_FIPS_LOCAL_H__
-
#ifdef CONFIG_CCX7REE_FIPS_SUPPORT
#include "ssi_fips.h"
return -ENOEXEC;\
} \
}
+
#define CHECK_AND_RETURN_VOID_UPON_FIPS_ERROR() {\
if (ssi_fips_check_fips_error() != 0) {\
return;\
} \
}
+
#define SSI_FIPS_INIT(p_drvData) (ssi_fips_init(p_drvData))
#define SSI_FIPS_FINI(p_drvData) (ssi_fips_fini(p_drvData))
#endif /* CONFIG_CC7XXREE_FIPS_SUPPORT */
-
#endif /*__SSI_FIPS_LOCAL_H__*/
struct ahash_alg ahash_alg;
};
-
struct hash_key_req_ctx {
u32 keylen;
dma_addr_t key_dma_addr;
*/
u8 digest_buff[SSI_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
u8 opad_tmp_keys_buff[SSI_MAX_HASH_OPAD_TMP_KEYS_SIZE] ____cacheline_aligned;
+
dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
dma_addr_t digest_buff_dma_addr;
/* use for hmac with key large then mode block size */
int idx = 0;
int rc = 0;
-
SSI_LOG_DEBUG("===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes);
CHECK_AND_RETURN_UPON_FIPS_ERROR();
static int ssi_hash_init(struct ahash_req_ctx *state, struct ssi_hash_ctx *ctx)
{
struct device *dev = &ctx->drvdata->plat_dev->dev;
+
state->xcbc_count = 0;
CHECK_AND_RETURN_UPON_FIPS_ERROR();
return rc;
}
-
static int ssi_xcbc_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
return rc;
}
+
#if SSI_CC_HAS_CMAC
static int ssi_cmac_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
struct ssi_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+
SSI_LOG_DEBUG("===== setkey (%d) ====\n", keylen);
CHECK_AND_RETURN_UPON_FIPS_ERROR();
ctx->key_params.keylen = keylen;
-
return 0;
}
#endif
ctx->key_params.keylen = 0;
}
-
static int ssi_hash_alloc_ctx(struct ssi_hash_ctx *ctx)
{
struct device *dev = &ctx->drvdata->plat_dev->dev;
struct ssi_hash_alg *ssi_alg =
container_of(ahash_alg, struct ssi_hash_alg, ahash_alg);
-
CHECK_AND_RETURN_UPON_FIPS_ERROR();
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct ahash_req_ctx));
u32 rem_cnt = state->buff_index ? state->buff1_cnt :
state->buff0_cnt;
-
CHECK_AND_RETURN_UPON_FIPS_ERROR();
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
keySize = CC_AES_128_BIT_KEY_SIZE;
set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
idx++;
-
/* Initiate decryption of block state to previous block_state-XOR-M[n] */
hw_desc_init(&desc[idx]);
set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
#include "cc_hw_queue_defs.h"
-
#define SSI_IVPOOL_SEQ_LEN 8
/*!
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
-
#include "ssi_config.h"
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include "ssi_hash.h"
#include "ssi_pm.h"
-
#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
#define POWER_DOWN_ENABLE 0x01
#define POWER_DOWN_DISABLE 0x00
-
int ssi_power_mgr_runtime_suspend(struct device *dev)
{
struct ssi_drvdata *drvdata =
#endif
-
-
int ssi_power_mgr_init(struct ssi_drvdata *drvdata)
{
int rc = 0;
#ifndef __SSI_POWER_MGR_H__
#define __SSI_POWER_MGR_H__
-
#include "ssi_config.h"
#include "ssi_driver.h"
-
#define SSI_SUSPEND_TIMEOUT 3000
-
int ssi_power_mgr_init(struct ssi_drvdata *drvdata);
void ssi_power_mgr_fini(struct ssi_drvdata *drvdata);
u8 *dummy_comp_buff;
dma_addr_t dummy_comp_buff_dma;
struct cc_hw_desc monitor_desc;
+
volatile unsigned long monitor_lock;
#ifdef COMP_IN_WQ
struct workqueue_struct *workq;
req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
req_mgr_h->max_used_sw_slots = 0;
-
/* Allocate DMA word for "dummy" completion descriptor use */
req_mgr_h->dummy_comp_buff = dma_alloc_coherent(&drvdata->plat_dev->dev,
sizeof(u32), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL);
static void request_mgr_complete(struct device *dev, void *dx_compl_h, void __iomem *cc_base)
{
struct completion *this_compl = dx_compl_h;
+
complete(this_compl);
}
-
static inline int request_mgr_queues_status_check(
struct ssi_request_mgr_handle *req_mgr_h,
void __iomem *cc_base,
}
}
-
/*!
* Enqueue caller request to crypto hardware during init process.
* assume this function is not called in middle of a flow,
return 0;
}
-
void complete_request(struct ssi_drvdata *drvdata)
{
struct ssi_request_mgr_handle *request_mgr_handle =
{
u32 axi_err;
int i;
+
SSI_LOG_INFO("Delay\n");
for (i = 0; i < 1000000; i++)
axi_err = READ_REGISTER(drvdata->cc_base + CC_REG_OFFSET(CRY_KERNEL, AXIM_MON_ERR));
u32 irq;
-
-
irq = (drvdata->irq & SSI_COMP_IRQ_MASK);
if (irq & SSI_COMP_IRQ_MASK) {
#include "ssi_driver.h"
#include "ssi_sram_mgr.h"
-
/**
* struct ssi_sram_mgr_ctx -Internal RAM context manager
* @sram_free_offset: the offset to the non-allocated area
ssi_sram_addr_t sram_free_offset;
};
-
/**
* ssi_sram_mgr_fini() - Cleanup SRAM pool.
*
#ifndef __SSI_SRAM_MGR_H__
#define __SSI_SRAM_MGR_H__
-
#ifndef SSI_CC_SRAM_SIZE
#define SSI_CC_SRAM_SIZE 4096
#endif
static struct stat_item stat_host_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
static struct stat_item stat_cc_db[MAX_STAT_OP_TYPES][MAX_STAT_PHASES];
-
static void init_db(struct stat_item item[MAX_STAT_OP_TYPES][MAX_STAT_PHASES])
{
unsigned int i, j;
}
}
-
/**************************************
* Attributes show functions section *
**************************************/
}
#endif /*CC_CYCLE_COUNT*/
-
-
static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
STAT_PHASE_6,
MAX_STAT_PHASES,
};
+
enum stat_op {
STAT_OP_TYPE_NULL = 0,
STAT_OP_TYPE_ENCODE,