#define ICV_VERIF_OK 0x01
struct cc_aead_handle {
- ssi_sram_addr_t sram_workspace_addr;
+ cc_sram_addr_t sram_workspace_addr;
struct list_head aead_list;
};
};
struct cc_aead_ctx {
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
u8 *enckey;
dma_addr_t enckey_dma_addr;
{
struct aead_alg *alg = crypto_aead_alg(tfm);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct ssi_crypto_alg *ssi_alg =
- container_of(alg, struct ssi_crypto_alg, aead_alg);
- struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg, aead_alg);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
crypto_tfm_alg_name(&tfm->base));
/* Initialize modes in instance */
- ctx->cipher_mode = ssi_alg->cipher_mode;
- ctx->flow_mode = ssi_alg->flow_mode;
- ctx->auth_mode = ssi_alg->auth_mode;
- ctx->drvdata = ssi_alg->drvdata;
+ ctx->cipher_mode = cc_alg->cipher_mode;
+ ctx->flow_mode = cc_alg->flow_mode;
+ ctx->auth_mode = cc_alg->auth_mode;
+ ctx->drvdata = cc_alg->drvdata;
crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
/* Allocate key buffer, cache line aligned */
return -ENOMEM;
}
-static void cc_aead_complete(struct device *dev, void *ssi_req)
+static void cc_aead_complete(struct device *dev, void *cc_req)
{
- struct aead_request *areq = (struct aead_request *)ssi_req;
+ struct aead_request *areq = (struct aead_request *)cc_req;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- struct crypto_aead *tfm = crypto_aead_reqtfm(ssi_req);
+ struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
int err = 0;
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
unsigned int blocksize;
unsigned int digestsize;
unsigned int hashmode;
idx++;
}
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 0);
if (rc)
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
{
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct rtattr *rta = (struct rtattr *)key;
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct crypto_authenc_key_param *param;
struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
int seq_len = 0, rc = -EINVAL;
/* STAT_PHASE_3: Submit sequence to HW */
if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
- rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 0);
+ rc = send_request(ctx->drvdata, &cc_req, desc, seq_len, 0);
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto setkey_error;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- enum ssi_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
+ enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
unsigned int idx = *seq_size;
struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int *seq_size, int direct)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
unsigned int idx = *seq_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
* assoc. + iv + data -compact in one table
* if assoclen is ZERO only IV perform
*/
- ssi_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
+ cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
u32 mlli_nents = areq_ctx->assoc.mlli_nents;
if (areq_ctx->is_single_pass) {
{
unsigned int idx = *seq_size;
struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
- enum ssi_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
+ enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)cc_aead_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_aead_complete;
+ cc_req.user_arg = (void *)req;
/* Setup request context */
areq_ctx->gen_ctx.op_type = direct;
if (areq_ctx->backup_giv) {
/* set the DMA mapped IV address*/
if (ctx->cipher_mode == DRV_CIPHER_CTR) {
- ssi_req.ivgen_dma_addr[0] =
+ cc_req.ivgen_dma_addr[0] =
areq_ctx->gen_ctx.iv_dma_addr +
CTR_RFC3686_NONCE_SIZE;
- ssi_req.ivgen_dma_addr_len = 1;
+ cc_req.ivgen_dma_addr_len = 1;
} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
/* In ccm, the IV needs to exist both inside B0 and
* inside the counter.It is also copied to iv_dma_addr
* for other reasons (like returning it to the user).
* So, using 3 (identical) IV outputs.
*/
- ssi_req.ivgen_dma_addr[0] =
+ cc_req.ivgen_dma_addr[0] =
areq_ctx->gen_ctx.iv_dma_addr +
CCM_BLOCK_IV_OFFSET;
- ssi_req.ivgen_dma_addr[1] =
+ cc_req.ivgen_dma_addr[1] =
sg_dma_address(&areq_ctx->ccm_adata_sg) +
CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
- ssi_req.ivgen_dma_addr[2] =
+ cc_req.ivgen_dma_addr[2] =
sg_dma_address(&areq_ctx->ccm_adata_sg) +
CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
- ssi_req.ivgen_dma_addr_len = 3;
+ cc_req.ivgen_dma_addr_len = 3;
} else {
- ssi_req.ivgen_dma_addr[0] =
+ cc_req.ivgen_dma_addr[0] =
areq_ctx->gen_ctx.iv_dma_addr;
- ssi_req.ivgen_dma_addr_len = 1;
+ cc_req.ivgen_dma_addr_len = 1;
}
/* set the IV size (8/16 B long)*/
- ssi_req.ivgen_size = crypto_aead_ivsize(tfm);
+ cc_req.ivgen_size = crypto_aead_ivsize(tfm);
}
/* STAT_PHASE_2: Create sequence */
/* STAT_PHASE_3: Lock HW and push sequence */
- rc = send_request(ctx->drvdata, &ssi_req, desc, seq_len, 1);
+ rc = send_request(ctx->drvdata, &cc_req, desc, seq_len, 1);
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
}
/* DX Block aead alg */
-static struct ssi_alg_template aead_algs[] = {
+static struct cc_alg_template aead_algs[] = {
{
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-dx",
},
};
-static struct ssi_crypto_alg *cc_create_aead_alg(struct ssi_alg_template *tmpl,
- struct device *dev)
+static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
+ struct device *dev)
{
- struct ssi_crypto_alg *t_alg;
+ struct cc_crypto_alg *t_alg;
struct aead_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
return t_alg;
}
-int cc_aead_free(struct ssi_drvdata *drvdata)
+int cc_aead_free(struct cc_drvdata *drvdata)
{
- struct ssi_crypto_alg *t_alg, *n;
+ struct cc_crypto_alg *t_alg, *n;
struct cc_aead_handle *aead_handle =
(struct cc_aead_handle *)drvdata->aead_handle;
return 0;
}
-int cc_aead_alloc(struct ssi_drvdata *drvdata)
+int cc_aead_alloc(struct cc_drvdata *drvdata)
{
struct cc_aead_handle *aead_handle;
- struct ssi_crypto_alg *t_alg;
+ struct cc_crypto_alg *t_alg;
int rc = -ENOMEM;
int alg;
struct device *dev = drvdata_to_dev(drvdata);
u8 *icv_virt_addr; /* Virt. address of ICV */
struct async_gen_req_ctx gen_ctx;
- struct ssi_mlli assoc;
- struct ssi_mlli src;
- struct ssi_mlli dst;
+ struct cc_mlli assoc;
+ struct cc_mlli src;
+ struct cc_mlli dst;
struct scatterlist *src_sgl;
struct scatterlist *dst_sgl;
unsigned int src_offset;
unsigned int dst_offset;
- enum ssi_req_dma_buf_type assoc_buff_type;
- enum ssi_req_dma_buf_type data_buff_type;
+ enum cc_req_dma_buf_type assoc_buff_type;
+ enum cc_req_dma_buf_type data_buff_type;
struct mlli_params mlli_params;
unsigned int cryptlen;
struct scatterlist ccm_adata_sg;
bool plaintext_authenticate_only; //for gcm_rfc4543
};
-int cc_aead_alloc(struct ssi_drvdata *drvdata);
-int cc_aead_free(struct ssi_drvdata *drvdata);
+int cc_aead_alloc(struct cc_drvdata *drvdata);
+int cc_aead_free(struct cc_drvdata *drvdata);
#endif /*__CC_AEAD_H__*/
u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI];
};
-static inline char *cc_dma_buf_type(enum ssi_req_dma_buf_type type)
+static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type)
{
switch (type) {
case CC_DMA_BUF_NULL:
* @dir: [IN] copy from/to sgl
*/
static void cc_copy_mac(struct device *dev, struct aead_request *req,
- enum ssi_sg_cpy_direct dir)
+ enum cc_sg_cpy_direct dir)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
* @direct:
*/
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
- u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct)
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct)
{
u32 nents, lbytes;
}
}
-int cc_map_blkcipher_request(struct ssi_drvdata *drvdata, void *ctx,
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
unsigned int ivsize, unsigned int nbytes,
void *info, struct scatterlist *src,
struct scatterlist *dst)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
u32 dummy;
bool chained;
u32 size_to_unmap = 0;
return nents;
}
-static int cc_aead_chain_iv(struct ssi_drvdata *drvdata,
+static int cc_aead_chain_iv(struct cc_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
bool is_last, bool do_chain)
return rc;
}
-static int cc_aead_chain_assoc(struct ssi_drvdata *drvdata,
+static int cc_aead_chain_assoc(struct cc_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
bool is_last, bool do_chain)
}
}
-static int cc_prepare_aead_data_mlli(struct ssi_drvdata *drvdata,
+static int cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
u32 *src_last_bytes, u32 *dst_last_bytes,
return rc;
}
-static int cc_aead_chain_data(struct ssi_drvdata *drvdata,
+static int cc_aead_chain_data(struct cc_drvdata *drvdata,
struct aead_request *req,
struct buffer_array *sg_data,
bool is_last_table, bool do_chain)
return rc;
}
-static void cc_update_aead_mlli_nents(struct ssi_drvdata *drvdata,
+static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata,
struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
}
}
-int cc_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req)
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
struct mlli_params *mlli_params = &areq_ctx->mlli_params;
return rc;
}
-int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
struct scatterlist *src, unsigned int nbytes,
bool do_update)
{
return -ENOMEM;
}
-int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
struct scatterlist *src, unsigned int nbytes,
unsigned int block_size)
{
}
}
-int cc_buffer_mgr_init(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
return -ENOMEM;
}
-int cc_buffer_mgr_fini(struct ssi_drvdata *drvdata)
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata)
{
struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle;
#include "ssi_config.h"
#include "ssi_driver.h"
-enum ssi_req_dma_buf_type {
+enum cc_req_dma_buf_type {
CC_DMA_BUF_NULL = 0,
CC_DMA_BUF_DLLI,
CC_DMA_BUF_MLLI
};
-enum ssi_sg_cpy_direct {
+enum cc_sg_cpy_direct {
CC_SG_TO_BUF = 0,
CC_SG_FROM_BUF = 1
};
-struct ssi_mlli {
- ssi_sram_addr_t sram_addr;
+struct cc_mlli {
+ cc_sram_addr_t sram_addr;
unsigned int nents; //sg nents
unsigned int mlli_nents; //mlli nents might be different than the above
};
u32 mlli_len;
};
-int cc_buffer_mgr_init(struct ssi_drvdata *drvdata);
+int cc_buffer_mgr_init(struct cc_drvdata *drvdata);
-int cc_buffer_mgr_fini(struct ssi_drvdata *drvdata);
+int cc_buffer_mgr_fini(struct cc_drvdata *drvdata);
-int cc_map_blkcipher_request(struct ssi_drvdata *drvdata, void *ctx,
+int cc_map_blkcipher_request(struct cc_drvdata *drvdata, void *ctx,
unsigned int ivsize, unsigned int nbytes,
void *info, struct scatterlist *src,
struct scatterlist *dst);
struct scatterlist *src,
struct scatterlist *dst);
-int cc_map_aead_request(struct ssi_drvdata *drvdata, struct aead_request *req);
+int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req);
void cc_unmap_aead_request(struct device *dev, struct aead_request *req);
-int cc_map_hash_request_final(struct ssi_drvdata *drvdata, void *ctx,
+int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx,
struct scatterlist *src, unsigned int nbytes,
bool do_update);
-int cc_map_hash_request_update(struct ssi_drvdata *drvdata, void *ctx,
+int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
struct scatterlist *src, unsigned int nbytes,
unsigned int block_size);
struct scatterlist *src, bool do_revert);
void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
- u32 to_skip, u32 end, enum ssi_sg_cpy_direct direct);
+ u32 to_skip, u32 end, enum cc_sg_cpy_direct direct);
void cc_zero_sgl(struct scatterlist *sgl, u32 data_len);
};
struct cc_cipher_ctx {
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
int keylen;
int key_round_number;
int cipher_mode;
struct crypto_shash *shash_tfm;
};
-static void cc_cipher_complete(struct device *dev, void *ssi_req);
+static void cc_cipher_complete(struct device *dev, void *cc_req);
static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size)
{
static unsigned int get_max_keysize(struct crypto_tfm *tfm)
{
- struct ssi_crypto_alg *ssi_alg =
- container_of(tfm->__crt_alg, struct ssi_crypto_alg,
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
crypto_alg);
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_ABLKCIPHER)
- return ssi_alg->crypto_alg.cra_ablkcipher.max_keysize;
+ return cc_alg->crypto_alg.cra_ablkcipher.max_keysize;
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
- return ssi_alg->crypto_alg.cra_blkcipher.max_keysize;
+ return cc_alg->crypto_alg.cra_blkcipher.max_keysize;
return 0;
}
{
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct crypto_alg *alg = tfm->__crt_alg;
- struct ssi_crypto_alg *ssi_alg =
- container_of(alg, struct ssi_crypto_alg, crypto_alg);
- struct device *dev = drvdata_to_dev(ssi_alg->drvdata);
+ struct cc_crypto_alg *cc_alg =
+ container_of(alg, struct cc_crypto_alg, crypto_alg);
+ struct device *dev = drvdata_to_dev(cc_alg->drvdata);
int rc = 0;
unsigned int max_key_buf_size = get_max_keysize(tfm);
struct ablkcipher_tfm *ablktfm = &tfm->crt_ablkcipher;
ablktfm->reqsize = sizeof(struct blkcipher_req_ctx);
- ctx_p->cipher_mode = ssi_alg->cipher_mode;
- ctx_p->flow_mode = ssi_alg->flow_mode;
- ctx_p->drvdata = ssi_alg->drvdata;
+ ctx_p->cipher_mode = cc_alg->cipher_mode;
+ ctx_p->flow_mode = cc_alg->flow_mode;
+ ctx_p->drvdata = cc_alg->drvdata;
/* Allocate key buffer, cache line aligned */
ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL | GFP_DMA);
dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr;
unsigned int du_size = nbytes;
- struct ssi_crypto_alg *ssi_alg =
- container_of(tfm->__crt_alg, struct ssi_crypto_alg,
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
crypto_alg);
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
CRYPTO_ALG_BULK_DU_512)
du_size = 512;
- if ((ssi_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
+ if ((cc_alg->crypto_alg.cra_flags & CRYPTO_ALG_BULK_MASK) ==
CRYPTO_ALG_BULK_DU_4096)
du_size = 4096;
}
}
-static void cc_cipher_complete(struct device *dev, void *ssi_req)
+static void cc_cipher_complete(struct device *dev, void *cc_req)
{
- struct ablkcipher_request *areq = (struct ablkcipher_request *)ssi_req;
+ struct ablkcipher_request *areq = (struct ablkcipher_request *)cc_req;
struct scatterlist *dst = areq->dst;
struct scatterlist *src = areq->src;
struct blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(areq);
struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx_p->drvdata);
struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN];
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
int rc, seq_len = 0, cts_restore_flag = 0;
dev_dbg(dev, "%s req=%p info=%p nbytes=%d\n",
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)cc_cipher_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_cipher_complete;
+ cc_req.user_arg = (void *)req;
#ifdef ENABLE_CYCLE_COUNT
- ssi_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
+ cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ?
STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE;
#endif
/* do we need to generate IV? */
if (req_ctx->is_giv) {
- ssi_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
- ssi_req.ivgen_dma_addr_len = 1;
+ cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr;
+ cc_req.ivgen_dma_addr_len = 1;
/* set the IV size (8/16 B long)*/
- ssi_req.ivgen_size = ivsize;
+ cc_req.ivgen_size = ivsize;
}
/* STAT_PHASE_3: Lock HW and push sequence */
- rc = send_request(ctx_p->drvdata, &ssi_req, desc, seq_len, 1);
+ rc = send_request(ctx_p->drvdata, &cc_req, desc, seq_len, 1);
if (rc != -EINPROGRESS) {
/* Failed to send the request or request completed
* synchronously
}
/* DX Block cipher alg */
-static struct ssi_alg_template blkcipher_algs[] = {
+static struct cc_alg_template blkcipher_algs[] = {
{
.name = "xts(aes)",
.driver_name = "xts-aes-dx",
};
static
-struct ssi_crypto_alg *cc_cipher_create_alg(struct ssi_alg_template *template,
- struct device *dev)
+struct cc_crypto_alg *cc_cipher_create_alg(struct cc_alg_template *template,
+ struct device *dev)
{
- struct ssi_crypto_alg *t_alg;
+ struct cc_crypto_alg *t_alg;
struct crypto_alg *alg;
t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
return t_alg;
}
-int cc_cipher_free(struct ssi_drvdata *drvdata)
+int cc_cipher_free(struct cc_drvdata *drvdata)
{
- struct ssi_crypto_alg *t_alg, *n;
+ struct cc_crypto_alg *t_alg, *n;
struct cc_cipher_handle *blkcipher_handle =
drvdata->blkcipher_handle;
if (blkcipher_handle) {
return 0;
}
-int cc_cipher_alloc(struct ssi_drvdata *drvdata)
+int cc_cipher_alloc(struct cc_drvdata *drvdata)
{
struct cc_cipher_handle *ablkcipher_handle;
- struct ssi_crypto_alg *t_alg;
+ struct cc_crypto_alg *t_alg;
struct device *dev = drvdata_to_dev(drvdata);
int rc = -ENOMEM;
int alg;
struct blkcipher_req_ctx {
struct async_gen_req_ctx gen_ctx;
- enum ssi_req_dma_buf_type dma_buf_type;
+ enum cc_req_dma_buf_type dma_buf_type;
u32 in_nents;
u32 in_mlli_nents;
u32 out_nents;
struct mlli_params mlli_params;
};
-int cc_cipher_alloc(struct ssi_drvdata *drvdata);
+int cc_cipher_alloc(struct cc_drvdata *drvdata);
-int cc_cipher_free(struct ssi_drvdata *drvdata);
+int cc_cipher_free(struct cc_drvdata *drvdata);
#ifndef CRYPTO_ALG_BULK_MASK
static irqreturn_t cc_isr(int irq, void *dev_id)
{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)dev_id;
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
struct device *dev = drvdata_to_dev(drvdata);
u32 irr;
u32 imr;
return IRQ_HANDLED;
}
-int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe)
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
{
unsigned int val, cache_params;
struct device *dev = drvdata_to_dev(drvdata);
static int init_cc_resources(struct platform_device *plat_dev)
{
struct resource *req_mem_cc_regs = NULL;
- struct ssi_drvdata *new_drvdata;
+ struct cc_drvdata *new_drvdata;
struct device *dev = &plat_dev->dev;
struct device_node *np = dev->of_node;
u32 signature_val;
return rc;
}
-void fini_cc_regs(struct ssi_drvdata *drvdata)
+void fini_cc_regs(struct cc_drvdata *drvdata)
{
/* Mask all interrupts */
cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
static void cleanup_cc_resources(struct platform_device *plat_dev)
{
- struct ssi_drvdata *drvdata =
- (struct ssi_drvdata *)platform_get_drvdata(plat_dev);
+ struct cc_drvdata *drvdata =
+ (struct cc_drvdata *)platform_get_drvdata(plat_dev);
cc_aead_free(drvdata);
cc_hash_free(drvdata);
cc_clk_off(drvdata);
}
-int cc_clk_on(struct ssi_drvdata *drvdata)
+int cc_clk_on(struct cc_drvdata *drvdata)
{
struct clk *clk = drvdata->clk;
int rc;
return 0;
}
-void cc_clk_off(struct ssi_drvdata *drvdata)
+void cc_clk_off(struct cc_drvdata *drvdata)
{
struct clk *clk = drvdata->clk;
*/
#define CC_MAX_IVGEN_DMA_ADDRESSES 3
-struct ssi_crypto_req {
+struct cc_crypto_req {
void (*user_cb)(struct device *dev, void *req);
void *user_arg;
dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
};
/**
- * struct ssi_drvdata - driver private data context
+ * struct cc_drvdata - driver private data context
* @cc_base: virt address of the CC registers
* @irq: device IRQ number
* @irq_mask: Interrupt mask shadow (1 for masked interrupts)
* @fw_ver: SeP loaded firmware version
*/
-struct ssi_drvdata {
+struct cc_drvdata {
void __iomem *cc_base;
int irq;
u32 irq_mask;
u32 fw_ver;
struct completion hw_queue_avail; /* wait for HW queue availability */
struct platform_device *plat_dev;
- ssi_sram_addr_t mlli_sram_addr;
+ cc_sram_addr_t mlli_sram_addr;
void *buff_mgr_handle;
void *hash_handle;
void *aead_handle;
bool coherent;
};
-struct ssi_crypto_alg {
+struct cc_crypto_alg {
struct list_head entry;
int cipher_mode;
int flow_mode; /* Note: currently, refers to the cipher mode only. */
int auth_mode;
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
struct crypto_alg crypto_alg;
struct aead_alg aead_alg;
};
-struct ssi_alg_template {
+struct cc_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
int cipher_mode;
int flow_mode; /* Note: currently, refers to the cipher mode only. */
int auth_mode;
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
};
struct async_gen_req_ctx {
enum drv_crypto_direction op_type;
};
-static inline struct device *drvdata_to_dev(struct ssi_drvdata *drvdata)
+static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
{
return &drvdata->plat_dev->dev;
}
unsigned long size) {};
#endif
-int init_cc_regs(struct ssi_drvdata *drvdata, bool is_probe);
-void fini_cc_regs(struct ssi_drvdata *drvdata);
-int cc_clk_on(struct ssi_drvdata *drvdata);
-void cc_clk_off(struct ssi_drvdata *drvdata);
+int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
+void fini_cc_regs(struct cc_drvdata *drvdata);
+int cc_clk_on(struct cc_drvdata *drvdata);
+void cc_clk_off(struct cc_drvdata *drvdata);
-static inline void cc_iowrite(struct ssi_drvdata *drvdata, u32 reg, u32 val)
+static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
{
iowrite32(val, (drvdata->cc_base + reg));
}
-static inline u32 cc_ioread(struct ssi_drvdata *drvdata, u32 reg)
+static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
{
return ioread32(drvdata->cc_base + reg);
}
static void fips_dsr(unsigned long devarg);
-struct ssi_fips_handle {
+struct cc_fips_handle {
struct tasklet_struct tasklet;
};
/* The function called once at driver entry point to check
* whether TEE FIPS error occurred.
*/
-static bool cc_get_tee_fips_status(struct ssi_drvdata *drvdata)
+static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata)
{
u32 reg;
* This function should push the FIPS REE library status towards the TEE library
* by writing the error state to HOST_GPR0 register.
*/
-void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool status)
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status)
{
int val = CC_FIPS_SYNC_REE_STATUS;
cc_iowrite(drvdata, CC_REG(HOST_GPR0), val);
}
-void ssi_fips_fini(struct ssi_drvdata *drvdata)
+void ssi_fips_fini(struct cc_drvdata *drvdata)
{
- struct ssi_fips_handle *fips_h = drvdata->fips_handle;
+ struct cc_fips_handle *fips_h = drvdata->fips_handle;
if (!fips_h)
return; /* Not allocated */
drvdata->fips_handle = NULL;
}
-void fips_handler(struct ssi_drvdata *drvdata)
+void fips_handler(struct cc_drvdata *drvdata)
{
- struct ssi_fips_handle *fips_handle_ptr =
+ struct cc_fips_handle *fips_handle_ptr =
drvdata->fips_handle;
tasklet_schedule(&fips_handle_ptr->tasklet);
/* Deferred service handler, run as interrupt-fired tasklet */
static void fips_dsr(unsigned long devarg)
{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct device *dev = drvdata_to_dev(drvdata);
u32 irq, state, val;
}
/* The function called once at driver entry point .*/
-int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+int ssi_fips_init(struct cc_drvdata *p_drvdata)
{
- struct ssi_fips_handle *fips_h;
+ struct cc_fips_handle *fips_h;
struct device *dev = drvdata_to_dev(p_drvdata);
fips_h = kzalloc(sizeof(*fips_h), GFP_KERNEL);
CC_FIPS_SYNC_STATUS_RESERVE32B = S32_MAX
};
-int ssi_fips_init(struct ssi_drvdata *p_drvdata);
-void ssi_fips_fini(struct ssi_drvdata *drvdata);
-void fips_handler(struct ssi_drvdata *drvdata);
-void cc_set_ree_fips_status(struct ssi_drvdata *drvdata, bool ok);
+int ssi_fips_init(struct cc_drvdata *p_drvdata);
+void ssi_fips_fini(struct cc_drvdata *drvdata);
+void fips_handler(struct cc_drvdata *drvdata);
+void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool ok);
#else /* CONFIG_CRYPTO_FIPS */
-static inline int ssi_fips_init(struct ssi_drvdata *p_drvdata)
+static inline int ssi_fips_init(struct cc_drvdata *p_drvdata)
{
return 0;
}
-static inline void ssi_fips_fini(struct ssi_drvdata *drvdata) {}
-static inline void cc_set_ree_fips_status(struct ssi_drvdata *drvdata,
+static inline void ssi_fips_fini(struct cc_drvdata *drvdata) {}
+static inline void cc_set_ree_fips_status(struct cc_drvdata *drvdata,
bool ok) {}
-static inline void fips_handler(struct ssi_drvdata *drvdata) {}
+static inline void fips_handler(struct cc_drvdata *drvdata) {}
#endif /* CONFIG_CRYPTO_FIPS */
#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
struct cc_hash_handle {
- ssi_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
- ssi_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
+ cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
+ cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
struct list_head hash_list;
struct completion init_comp;
};
int hash_mode;
int hw_mode;
int inter_digestsize;
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
struct ahash_alg ahash_alg;
};
/* hash per-session context */
struct cc_hash_ctx {
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
/* holds the origin digest; the digest after "setkey" if HMAC,*
* the initial digest if HASH.
*/
struct cc_hash_ctx *ctx)
{
bool is_hmac = ctx->is_hmac;
- ssi_sram_addr_t larval_digest_addr =
+ cc_sram_addr_t larval_digest_addr =
cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc;
int rc = -ENOMEM;
ctx->inter_digestsize, NS_BIT, 0);
set_flow_mode(&desc, BYPASS);
- rc = send_request(ctx->drvdata, &ssi_req, &desc, 1, 0);
+ rc = send_request(ctx->drvdata, &cc_req, &desc, 1, 0);
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto fail4;
state->digest_result_dma_addr = 0;
}
-static void cc_update_complete(struct device *dev, void *ssi_req)
+static void cc_update_complete(struct device *dev, void *cc_req)
{
- struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
dev_dbg(dev, "req=%pK\n", req);
req->base.complete(&req->base, 0);
}
-static void cc_digest_complete(struct device *dev, void *ssi_req)
+static void cc_digest_complete(struct device *dev, void *cc_req)
{
- struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
req->base.complete(&req->base, 0);
}
-static void cc_hash_complete(struct device *dev, void *ssi_req)
+static void cc_hash_complete(struct device *dev, void *cc_req)
{
- struct ahash_request *req = (struct ahash_request *)ssi_req;
+ struct ahash_request *req = (struct ahash_request *)cc_req;
struct ahash_req_ctx *state = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- ssi_sram_addr_t larval_digest_addr =
+ cc_sram_addr_t larval_digest_addr =
cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
int idx = 0;
int rc = 0;
}
/* Setup DX request structure */
- ssi_req.user_cb = cc_digest_complete;
- ssi_req.user_arg = req;
+ cc_req.user_cb = cc_digest_complete;
+ cc_req.user_arg = req;
/* If HMAC then load hash IPAD xor key, if HASH then load initial
* digest
cc_set_endianity(ctx->hash_mode, &desc[idx]);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 1);
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
struct scatterlist *src = req->src;
unsigned int nbytes = req->nbytes;
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 idx = 0;
int rc;
}
/* Setup DX request structure */
- ssi_req.user_cb = cc_update_complete;
- ssi_req.user_arg = req;
+ cc_req.user_cb = cc_update_complete;
+ cc_req.user_arg = req;
/* Restore hash digest */
hw_desc_init(&desc[idx]);
set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 1);
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc;
}
/* Setup DX request structure */
- ssi_req.user_cb = cc_hash_complete;
- ssi_req.user_arg = req;
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
/* Restore hash digest */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 1);
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
u8 *result = req->result;
struct device *dev = drvdata_to_dev(ctx->drvdata);
bool is_hmac = ctx->is_hmac;
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc;
}
/* Setup DX request structure */
- ssi_req.user_cb = cc_hash_complete;
- ssi_req.user_arg = req;
+ cc_req.user_cb = cc_hash_complete;
+ cc_req.user_arg = req;
/* Restore hash digest */
hw_desc_init(&desc[idx]);
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 1);
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, src, true);
unsigned int keylen)
{
unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hash_ctx *ctx = NULL;
int blocksize = 0;
int digestsize = 0;
int i, idx = 0, rc = 0;
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
- ssi_sram_addr_t larval_addr;
+ cc_sram_addr_t larval_addr;
struct device *dev;
ctx = crypto_ahash_ctx(ahash);
idx++;
}
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 0);
if (rc) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
goto out;
idx++;
}
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 0);
out:
if (rc)
static int cc_xcbc_setkey(struct crypto_ahash *ahash,
const u8 *key, unsigned int keylen)
{
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct device *dev = drvdata_to_dev(ctx->drvdata);
int idx = 0, rc = 0;
CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 0);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 0);
if (rc)
crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
container_of(tfm->__crt_alg, struct hash_alg_common, base);
struct ahash_alg *ahash_alg =
container_of(hash_alg_common, struct ahash_alg, halg);
- struct cc_hash_alg *ssi_alg =
+ struct cc_hash_alg *cc_alg =
container_of(ahash_alg, struct cc_hash_alg,
ahash_alg);
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct ahash_req_ctx));
- ctx->hash_mode = ssi_alg->hash_mode;
- ctx->hw_mode = ssi_alg->hw_mode;
- ctx->inter_digestsize = ssi_alg->inter_digestsize;
- ctx->drvdata = ssi_alg->drvdata;
+ ctx->hash_mode = cc_alg->hash_mode;
+ ctx->hw_mode = cc_alg->hw_mode;
+ ctx->inter_digestsize = cc_alg->inter_digestsize;
+ ctx->drvdata = cc_alg->drvdata;
return cc_alloc_ctx(ctx);
}
struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int rc;
u32 idx = 0;
idx++;
/* Setup DX request structure */
- ssi_req.user_cb = (void *)cc_update_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_update_complete;
+ cc_req.user_arg = (void *)req;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 1);
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc = 0;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)cc_hash_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
if (state->xcbc_count && rem_cnt == 0) {
/* Load key for ECB decryption */
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 1);
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
int idx = 0;
int rc = 0;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)cc_hash_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_hash_complete;
+ cc_req.user_arg = (void *)req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 1);
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct device *dev = drvdata_to_dev(ctx->drvdata);
u32 digestsize = crypto_ahash_digestsize(tfm);
- struct ssi_crypto_req ssi_req = {};
+ struct cc_crypto_req cc_req = {};
struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
u32 key_len;
int idx = 0;
}
/* Setup DX request structure */
- ssi_req.user_cb = (void *)cc_digest_complete;
- ssi_req.user_arg = (void *)req;
+ cc_req.user_cb = (void *)cc_digest_complete;
+ cc_req.user_arg = (void *)req;
if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
key_len = CC_AES_128_BIT_KEY_SIZE;
set_cipher_mode(&desc[idx], ctx->hw_mode);
idx++;
- rc = send_request(ctx->drvdata, &ssi_req, desc, idx, 1);
+ rc = send_request(ctx->drvdata, &cc_req, desc, idx, 1);
if (rc != -EINPROGRESS) {
dev_err(dev, "send_request() failed (rc=%d)\n", rc);
cc_unmap_hash_request(dev, state, req->src, true);
int hash_mode;
int hw_mode;
int inter_digestsize;
- struct ssi_drvdata *drvdata;
+ struct cc_drvdata *drvdata;
};
#define CC_STATE_SIZE(_x) \
return t_crypto_alg;
}
-int cc_init_hash_sram(struct ssi_drvdata *drvdata)
+int cc_init_hash_sram(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
- ssi_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
+ cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
unsigned int larval_seq_len = 0;
struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
struct device *dev = drvdata_to_dev(drvdata);
return rc;
}
-int cc_hash_alloc(struct ssi_drvdata *drvdata)
+int cc_hash_alloc(struct cc_drvdata *drvdata)
{
struct cc_hash_handle *hash_handle;
- ssi_sram_addr_t sram_buff;
+ cc_sram_addr_t sram_buff;
u32 sram_size_to_alloc;
struct device *dev = drvdata_to_dev(drvdata);
int rc = 0;
return rc;
}
-int cc_hash_free(struct ssi_drvdata *drvdata)
+int cc_hash_free(struct cc_drvdata *drvdata)
{
struct cc_hash_alg *t_hash_alg, *hash_n;
struct cc_hash_handle *hash_handle = drvdata->hash_handle;
*
* \return u32 The address of the initial digest in SRAM
*/
-ssi_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
{
- struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
struct device *dev = drvdata_to_dev(_drvdata);
return hash_handle->larval_digest_sram_addr;
}
-ssi_sram_addr_t
+cc_sram_addr_t
cc_digest_len_addr(void *drvdata, u32 mode)
{
- struct ssi_drvdata *_drvdata = (struct ssi_drvdata *)drvdata;
+ struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
- ssi_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
+ cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
switch (mode) {
case DRV_HASH_SHA1:
u8 *buff1;
u8 *digest_result_buff;
struct async_gen_req_ctx gen_ctx;
- enum ssi_req_dma_buf_type data_dma_buf_type;
+ enum cc_req_dma_buf_type data_dma_buf_type;
u8 *digest_buff;
u8 *opad_digest_buff;
u8 *digest_bytes_len;
struct mlli_params mlli_params;
};
-int cc_hash_alloc(struct ssi_drvdata *drvdata);
-int cc_init_hash_sram(struct ssi_drvdata *drvdata);
-int cc_hash_free(struct ssi_drvdata *drvdata);
+int cc_hash_alloc(struct cc_drvdata *drvdata);
+int cc_init_hash_sram(struct cc_drvdata *drvdata);
+int cc_hash_free(struct cc_drvdata *drvdata);
/*!
* Gets the initial digest length
*
* \return u32 returns the address of the initial digest length in SRAM
*/
-ssi_sram_addr_t
+cc_sram_addr_t
cc_digest_len_addr(void *drvdata, u32 mode);
/*!
*
* \return u32 The address of the initial digest in SRAM
*/
-ssi_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
+cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode);
#endif /*__CC_HASH_H__*/
* @pool_meta_dma: phys. address of the initial enc. key/IV
*/
struct cc_ivgen_ctx {
- ssi_sram_addr_t pool;
- ssi_sram_addr_t ctr_key;
- ssi_sram_addr_t ctr_iv;
+ cc_sram_addr_t pool;
+ cc_sram_addr_t ctr_key;
+ cc_sram_addr_t ctr_iv;
u32 next_iv_ofs;
u8 *pool_meta;
dma_addr_t pool_meta_dma;
*
* \return int Zero for success, negative value otherwise.
*/
-int cc_init_iv_sram(struct ssi_drvdata *drvdata)
+int cc_init_iv_sram(struct cc_drvdata *drvdata)
{
struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
*
* \param drvdata
*/
-void cc_ivgen_fini(struct ssi_drvdata *drvdata)
+void cc_ivgen_fini(struct cc_drvdata *drvdata)
{
struct cc_ivgen_ctx *ivgen_ctx = drvdata->ivgen_handle;
struct device *device = &drvdata->plat_dev->dev;
*
* \return int Zero for success, negative value otherwise.
*/
-int cc_ivgen_init(struct ssi_drvdata *drvdata)
+int cc_ivgen_init(struct cc_drvdata *drvdata)
{
struct cc_ivgen_ctx *ivgen_ctx;
struct device *device = &drvdata->plat_dev->dev;
*
* \return int Zero for success, negative value otherwise.
*/
-int cc_get_iv(struct ssi_drvdata *drvdata, dma_addr_t iv_out_dma[],
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
unsigned int iv_out_dma_len, unsigned int iv_out_size,
struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len)
{
*
* \return int Zero for success, negative value otherwise.
*/
-int cc_ivgen_init(struct ssi_drvdata *drvdata);
+int cc_ivgen_init(struct cc_drvdata *drvdata);
/*!
* Free iv-pool and ivgen context.
*
* \param drvdata
*/
-void cc_ivgen_fini(struct ssi_drvdata *drvdata);
+void cc_ivgen_fini(struct cc_drvdata *drvdata);
/*!
* Generates the initial pool in SRAM.
*
* \return int Zero for success, negative value otherwise.
*/
-int cc_init_iv_sram(struct ssi_drvdata *drvdata);
+int cc_init_iv_sram(struct cc_drvdata *drvdata);
/*!
* Acquires 16 Bytes IV from the iv-pool
*
* \return int Zero for success, negative value otherwise.
*/
-int cc_get_iv(struct ssi_drvdata *drvdata, dma_addr_t iv_out_dma[],
+int cc_get_iv(struct cc_drvdata *drvdata, dma_addr_t iv_out_dma[],
unsigned int iv_out_dma_len, unsigned int iv_out_size,
struct cc_hw_desc iv_seq[], unsigned int *iv_seq_len);
int cc_pm_suspend(struct device *dev)
{
- struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
int rc;
dev_dbg(dev, "set HOST_POWER_DOWN_EN\n");
int cc_pm_resume(struct device *dev)
{
int rc;
- struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n");
cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE);
int cc_pm_get(struct device *dev)
{
int rc = 0;
- struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
if (cc_req_queue_suspended(drvdata))
rc = pm_runtime_get_sync(dev);
int cc_pm_put_suspend(struct device *dev)
{
int rc = 0;
- struct ssi_drvdata *drvdata = dev_get_drvdata(dev);
+ struct cc_drvdata *drvdata = dev_get_drvdata(dev);
if (!cc_req_queue_suspended(drvdata)) {
pm_runtime_mark_last_busy(dev);
#endif
-int cc_pm_init(struct ssi_drvdata *drvdata)
+int cc_pm_init(struct cc_drvdata *drvdata)
{
int rc = 0;
#if defined(CONFIG_PM)
return rc;
}
-void cc_pm_fini(struct ssi_drvdata *drvdata)
+void cc_pm_fini(struct cc_drvdata *drvdata)
{
#if defined(CONFIG_PM)
pm_runtime_disable(drvdata_to_dev(drvdata));
#define CC_SUSPEND_TIMEOUT 3000
-int cc_pm_init(struct ssi_drvdata *drvdata);
+int cc_pm_init(struct cc_drvdata *drvdata);
-void cc_pm_fini(struct ssi_drvdata *drvdata);
+void cc_pm_fini(struct cc_drvdata *drvdata);
#if defined(CONFIG_PM)
int cc_pm_suspend(struct device *dev);
unsigned int hw_queue_size; /* HW capability */
unsigned int min_free_hw_slots;
unsigned int max_used_sw_slots;
- struct ssi_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
+ struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
u32 req_queue_head;
u32 req_queue_tail;
u32 axi_completed;
static void comp_work_handler(struct work_struct *work);
#endif
-void cc_req_mgr_fini(struct ssi_drvdata *drvdata)
+void cc_req_mgr_fini(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
drvdata->request_mgr_handle = NULL;
}
-int cc_req_mgr_init(struct ssi_drvdata *drvdata)
+int cc_req_mgr_init(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *req_mgr_h;
struct device *dev = drvdata_to_dev(drvdata);
complete(this_compl);
}
-static int cc_queues_status(struct ssi_drvdata *drvdata,
+static int cc_queues_status(struct cc_drvdata *drvdata,
struct cc_req_mgr_handle *req_mgr_h,
unsigned int total_seq_len)
{
* Enqueue caller request to crypto hardware.
*
* \param drvdata
- * \param ssi_req The request to enqueue
+ * \param cc_req The request to enqueue
* \param desc The crypto sequence
* \param len The crypto sequence length
* \param is_dout If "true": completion is handled by the caller
*
* \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
*/
-int send_request(struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
+int send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
struct cc_hw_desc *desc, unsigned int len, bool is_dout)
{
void __iomem *cc_base = drvdata->cc_base;
int rc;
unsigned int max_required_seq_len =
(total_seq_len +
- ((ssi_req->ivgen_dma_addr_len == 0) ? 0 :
+ ((cc_req->ivgen_dma_addr_len == 0) ? 0 :
CC_IVPOOL_SEQ_LEN) + (!is_dout ? 1 : 0));
#if defined(CONFIG_PM)
* enabled any DLLI/MLLI DOUT bit in the given sequence
*/
if (!is_dout) {
- init_completion(&ssi_req->seq_compl);
- ssi_req->user_cb = request_mgr_complete;
- ssi_req->user_arg = &ssi_req->seq_compl;
+ init_completion(&cc_req->seq_compl);
+ cc_req->user_cb = request_mgr_complete;
+ cc_req->user_arg = &cc_req->seq_compl;
total_seq_len++;
}
- if (ssi_req->ivgen_dma_addr_len > 0) {
+ if (cc_req->ivgen_dma_addr_len > 0) {
dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
- ssi_req->ivgen_dma_addr_len,
- &ssi_req->ivgen_dma_addr[0],
- &ssi_req->ivgen_dma_addr[1],
- &ssi_req->ivgen_dma_addr[2],
- ssi_req->ivgen_size);
+ cc_req->ivgen_dma_addr_len,
+ &cc_req->ivgen_dma_addr[0],
+ &cc_req->ivgen_dma_addr[1],
+ &cc_req->ivgen_dma_addr[2],
+ cc_req->ivgen_size);
/* Acquire IV from pool */
- rc = cc_get_iv(drvdata, ssi_req->ivgen_dma_addr,
- ssi_req->ivgen_dma_addr_len,
- ssi_req->ivgen_size,
+ rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
+ cc_req->ivgen_dma_addr_len,
+ cc_req->ivgen_size,
iv_seq, &iv_seq_len);
if (rc) {
req_mgr_h->max_used_sw_slots = used_sw_slots;
/* Enqueue request - must be locked with HW lock*/
- req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *ssi_req;
+ req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
(MAX_REQUEST_QUEUE_SIZE - 1);
/* TODO: Use circ_buf.h ? */
/* Wait upon sequence completion.
* Return "0" -Operation done successfully.
*/
- wait_for_completion(&ssi_req->seq_compl);
+ wait_for_completion(&cc_req->seq_compl);
return 0;
}
/* Operation still in process */
*
* \return int Returns "0" upon success
*/
-int send_request_init(struct ssi_drvdata *drvdata, struct cc_hw_desc *desc,
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
unsigned int len)
{
void __iomem *cc_base = drvdata->cc_base;
return 0;
}
-void complete_request(struct ssi_drvdata *drvdata)
+void complete_request(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
#ifdef COMP_IN_WQ
static void comp_work_handler(struct work_struct *work)
{
- struct ssi_drvdata *drvdata =
- container_of(work, struct ssi_drvdata, compwork.work);
+ struct cc_drvdata *drvdata =
+ container_of(work, struct cc_drvdata, compwork.work);
comp_handler((unsigned long)drvdata);
}
#endif
-static void proc_completions(struct ssi_drvdata *drvdata)
+static void proc_completions(struct cc_drvdata *drvdata)
{
- struct ssi_crypto_req *ssi_req;
+ struct cc_crypto_req *cc_req;
struct device *dev = drvdata_to_dev(drvdata);
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
break;
}
- ssi_req = &request_mgr_handle->req_queue[*tail];
+ cc_req = &request_mgr_handle->req_queue[*tail];
#ifdef FLUSH_CACHE_ALL
flush_cache_all();
}
#endif /* COMPLETION_DELAY */
- if (ssi_req->user_cb)
- ssi_req->user_cb(dev, ssi_req->user_arg);
+ if (cc_req->user_cb)
+ cc_req->user_cb(dev, cc_req->user_arg);
*tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
dev_dbg(dev, "Request completed. axi_completed=%d\n",
}
}
-static inline u32 cc_axi_comp_count(struct ssi_drvdata *drvdata)
+static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
{
return FIELD_GET(AXIM_MON_COMP_VALUE,
cc_ioread(drvdata, CC_REG(AXIM_MON_COMP)));
/* Deferred service handler, run as interrupt-fired tasklet */
static void comp_handler(unsigned long devarg)
{
- struct ssi_drvdata *drvdata = (struct ssi_drvdata *)devarg;
+ struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
* inside the spin lock protection
*/
#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct ssi_drvdata *drvdata)
+int cc_resume_req_queue(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
* suspend the queue configuration. Since it is used for the runtime suspend
* only verify that the queue can be suspended.
*/
-int cc_suspend_req_queue(struct ssi_drvdata *drvdata)
+int cc_suspend_req_queue(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
return 0;
}
-bool cc_req_queue_suspended(struct ssi_drvdata *drvdata)
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
{
struct cc_req_mgr_handle *request_mgr_handle =
drvdata->request_mgr_handle;
#include "cc_hw_queue_defs.h"
-int cc_req_mgr_init(struct ssi_drvdata *drvdata);
+int cc_req_mgr_init(struct cc_drvdata *drvdata);
/*!
* Enqueue caller request to crypto hardware.
*
* \param drvdata
- * \param ssi_req The request to enqueue
+ * \param cc_req The request to enqueue
* \param desc The crypto sequence
* \param len The crypto sequence length
* \param is_dout If "true": completion is handled by the caller
*
* \return int Returns -EINPROGRESS if "is_dout=true"; "0" if "is_dout=false"
*/
-int send_request(struct ssi_drvdata *drvdata, struct ssi_crypto_req *ssi_req,
+int send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
struct cc_hw_desc *desc, unsigned int len, bool is_dout);
-int send_request_init(struct ssi_drvdata *drvdata, struct cc_hw_desc *desc,
+int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
unsigned int len);
-void complete_request(struct ssi_drvdata *drvdata);
+void complete_request(struct cc_drvdata *drvdata);
-void cc_req_mgr_fini(struct ssi_drvdata *drvdata);
+void cc_req_mgr_fini(struct cc_drvdata *drvdata);
#if defined(CONFIG_PM)
-int cc_resume_req_queue(struct ssi_drvdata *drvdata);
+int cc_resume_req_queue(struct cc_drvdata *drvdata);
-int cc_suspend_req_queue(struct ssi_drvdata *drvdata);
+int cc_suspend_req_queue(struct cc_drvdata *drvdata);
-bool cc_req_queue_suspended(struct ssi_drvdata *drvdata);
+bool cc_req_queue_suspended(struct cc_drvdata *drvdata);
#endif
#endif /*__REQUEST_MGR_H__*/
* @sram_free_offset: the offset to the non-allocated area
*/
struct ssi_sram_mgr_ctx {
- ssi_sram_addr_t sram_free_offset;
+ cc_sram_addr_t sram_free_offset;
};
/**
*
* @drvdata: Associated device driver context
*/
-void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata)
+void ssi_sram_mgr_fini(struct cc_drvdata *drvdata)
{
struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
*
* @drvdata: Associated device driver context
*/
-int ssi_sram_mgr_init(struct ssi_drvdata *drvdata)
+int ssi_sram_mgr_init(struct cc_drvdata *drvdata)
{
/* Allocate "this" context */
drvdata->sram_mgr_handle = kzalloc(sizeof(*drvdata->sram_mgr_handle),
* \param drvdata
* \param size The requested bytes to allocate
*/
-ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size)
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size)
{
struct ssi_sram_mgr_ctx *smgr_ctx = drvdata->sram_mgr_handle;
struct device *dev = drvdata_to_dev(drvdata);
- ssi_sram_addr_t p;
+ cc_sram_addr_t p;
if ((size & 0x3)) {
dev_err(dev, "Requested buffer size (%u) is not multiple of 4",
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void cc_set_sram_desc(const u32 *src, ssi_sram_addr_t dst,
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
unsigned int nelement, struct cc_hw_desc *seq,
unsigned int *seq_len)
{
#define CC_CC_SRAM_SIZE 4096
#endif
-struct ssi_drvdata;
+struct cc_drvdata;
/**
* Address (offset) within CC internal SRAM
*/
-typedef u64 ssi_sram_addr_t;
+typedef u64 cc_sram_addr_t;
-#define NULL_SRAM_ADDR ((ssi_sram_addr_t)-1)
+#define NULL_SRAM_ADDR ((cc_sram_addr_t)-1)
/*!
* Initializes SRAM pool.
*
* \return int Zero for success, negative value otherwise.
*/
-int ssi_sram_mgr_init(struct ssi_drvdata *drvdata);
+int ssi_sram_mgr_init(struct cc_drvdata *drvdata);
/*!
* Uninits SRAM pool.
*
* \param drvdata
*/
-void ssi_sram_mgr_fini(struct ssi_drvdata *drvdata);
+void ssi_sram_mgr_fini(struct cc_drvdata *drvdata);
/*!
* Allocated buffer from SRAM pool.
* \param drvdata
* \param size The requested bytes to allocate
*/
-ssi_sram_addr_t cc_sram_alloc(struct ssi_drvdata *drvdata, u32 size);
+cc_sram_addr_t cc_sram_alloc(struct cc_drvdata *drvdata, u32 size);
/**
* cc_set_sram_desc() - Create const descriptors sequence to
* @seq: A pointer to the given IN/OUT descriptor sequence
* @seq_len: A pointer to the given IN/OUT sequence length
*/
-void cc_set_sram_desc(const u32 *src, ssi_sram_addr_t dst,
+void cc_set_sram_desc(const u32 *src, cc_sram_addr_t dst,
unsigned int nelement, struct cc_hw_desc *seq,
unsigned int *seq_len);
#ifdef ENABLE_CC_SYSFS
-static struct ssi_drvdata *sys_get_drvdata(void);
+static struct cc_drvdata *sys_get_drvdata(void);
static ssize_t ssi_sys_regdump_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct ssi_drvdata *drvdata = sys_get_drvdata();
+ struct cc_drvdata *drvdata = sys_get_drvdata();
u32 register_value;
int offset = 0;
struct attribute_group sys_dir_attr_group;
struct attribute **sys_dir_attr_list;
u32 num_of_attrs;
- struct ssi_drvdata *drvdata; /* Associated driver context */
+ struct cc_drvdata *drvdata; /* Associated driver context */
};
/* top level directory structures */
};
-static struct ssi_drvdata *sys_get_drvdata(void)
+static struct cc_drvdata *sys_get_drvdata(void)
{
/* TODO: supporting multiple SeP devices would require avoiding
* global "top_dir" and finding associated "top_dir" by traversing
return sys_top_dir.drvdata;
}
-static int sys_init_dir(struct sys_dir *sys_dir, struct ssi_drvdata *drvdata,
+static int sys_init_dir(struct sys_dir *sys_dir, struct cc_drvdata *drvdata,
struct kobject *parent_dir_kobj, const char *dir_name,
struct kobj_attribute *attrs, u32 num_of_attrs)
{
}
}
-int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata)
+int ssi_sysfs_init(struct kobject *sys_dev_obj, struct cc_drvdata *drvdata)
{
int retval;
struct device *dev = drvdata_to_dev(drvdata);
#include <asm/timex.h>
/* forward declaration */
-struct ssi_drvdata;
+struct cc_drvdata;
-int ssi_sysfs_init(struct kobject *sys_dev_obj, struct ssi_drvdata *drvdata);
+int ssi_sysfs_init(struct kobject *sys_dev_obj, struct cc_drvdata *drvdata);
void ssi_sysfs_fini(void);
#endif /*__CC_SYSFS_H__*/