struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.cipher_err_cnt);
+ atomic64_inc(&alg->stats.cipher.err_cnt);
} else {
atomic64_inc(&alg->stats.cipher.encrypt_cnt);
atomic64_add(nbytes, &alg->stats.cipher.encrypt_tlen);
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.cipher_err_cnt);
+ atomic64_inc(&alg->stats.cipher.err_cnt);
} else {
atomic64_inc(&alg->stats.cipher.decrypt_cnt);
atomic64_add(nbytes, &alg->stats.cipher.decrypt_tlen);
int ret)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.aead_err_cnt);
+ atomic64_inc(&alg->stats.aead.err_cnt);
} else {
atomic64_inc(&alg->stats.aead.encrypt_cnt);
atomic64_add(cryptlen, &alg->stats.aead.encrypt_tlen);
int ret)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.aead.aead_err_cnt);
+ atomic64_inc(&alg->stats.aead.err_cnt);
} else {
atomic64_inc(&alg->stats.aead.decrypt_cnt);
atomic64_add(cryptlen, &alg->stats.aead.decrypt_tlen);
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt);
+ atomic64_inc(&alg->stats.akcipher.err_cnt);
} else {
atomic64_inc(&alg->stats.akcipher.encrypt_cnt);
atomic64_add(src_len, &alg->stats.akcipher.encrypt_tlen);
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt);
+ atomic64_inc(&alg->stats.akcipher.err_cnt);
} else {
atomic64_inc(&alg->stats.akcipher.decrypt_cnt);
atomic64_add(src_len, &alg->stats.akcipher.decrypt_tlen);
void crypto_stats_akcipher_sign(int ret, struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt);
+ atomic64_inc(&alg->stats.akcipher.err_cnt);
else
atomic64_inc(&alg->stats.akcipher.sign_cnt);
crypto_alg_put(alg);
void crypto_stats_akcipher_verify(int ret, struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.akcipher.akcipher_err_cnt);
+ atomic64_inc(&alg->stats.akcipher.err_cnt);
else
atomic64_inc(&alg->stats.akcipher.verify_cnt);
crypto_alg_put(alg);
void crypto_stats_compress(unsigned int slen, int ret, struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.compress_err_cnt);
+ atomic64_inc(&alg->stats.compress.err_cnt);
} else {
atomic64_inc(&alg->stats.compress.compress_cnt);
atomic64_add(slen, &alg->stats.compress.compress_tlen);
void crypto_stats_decompress(unsigned int slen, int ret, struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.compress.compress_err_cnt);
+ atomic64_inc(&alg->stats.compress.err_cnt);
} else {
atomic64_inc(&alg->stats.compress.decompress_cnt);
atomic64_add(slen, &alg->stats.compress.decompress_tlen);
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.hash.hash_err_cnt);
+ atomic64_inc(&alg->stats.hash.err_cnt);
else
atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
crypto_alg_put(alg);
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.hash.hash_err_cnt);
+ atomic64_inc(&alg->stats.hash.err_cnt);
} else {
atomic64_inc(&alg->stats.hash.hash_cnt);
atomic64_add(nbytes, &alg->stats.hash.hash_tlen);
void crypto_stats_kpp_set_secret(struct crypto_alg *alg, int ret)
{
if (ret)
- atomic64_inc(&alg->stats.kpp.kpp_err_cnt);
+ atomic64_inc(&alg->stats.kpp.err_cnt);
else
atomic64_inc(&alg->stats.kpp.setsecret_cnt);
crypto_alg_put(alg);
void crypto_stats_kpp_generate_public_key(struct crypto_alg *alg, int ret)
{
if (ret)
- atomic64_inc(&alg->stats.kpp.kpp_err_cnt);
+ atomic64_inc(&alg->stats.kpp.err_cnt);
else
atomic64_inc(&alg->stats.kpp.generate_public_key_cnt);
crypto_alg_put(alg);
void crypto_stats_kpp_compute_shared_secret(struct crypto_alg *alg, int ret)
{
if (ret)
- atomic64_inc(&alg->stats.kpp.kpp_err_cnt);
+ atomic64_inc(&alg->stats.kpp.err_cnt);
else
atomic64_inc(&alg->stats.kpp.compute_shared_secret_cnt);
crypto_alg_put(alg);
void crypto_stats_rng_seed(struct crypto_alg *alg, int ret)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- atomic64_inc(&alg->stats.rng.rng_err_cnt);
+ atomic64_inc(&alg->stats.rng.err_cnt);
else
atomic64_inc(&alg->stats.rng.seed_cnt);
crypto_alg_put(alg);
int ret)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.rng.rng_err_cnt);
+ atomic64_inc(&alg->stats.rng.err_cnt);
} else {
atomic64_inc(&alg->stats.rng.generate_cnt);
atomic64_add(dlen, &alg->stats.rng.generate_tlen);
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.cipher_err_cnt);
+ atomic64_inc(&alg->stats.cipher.err_cnt);
} else {
atomic64_inc(&alg->stats.cipher.encrypt_cnt);
atomic64_add(cryptlen, &alg->stats.cipher.encrypt_tlen);
struct crypto_alg *alg)
{
if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
- atomic64_inc(&alg->stats.cipher.cipher_err_cnt);
+ atomic64_inc(&alg->stats.cipher.err_cnt);
} else {
atomic64_inc(&alg->stats.cipher.decrypt_cnt);
atomic64_add(cryptlen, &alg->stats.cipher.decrypt_tlen);
raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen);
raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt);
raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen);
- raead.stat_aead_err_cnt = atomic64_read(&alg->stats.aead.aead_err_cnt);
+ raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
}
rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen);
rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt);
rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen);
- rcipher.stat_cipher_err_cnt = atomic64_read(&alg->stats.cipher.cipher_err_cnt);
+ rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
}
rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
- rcomp.stat_compress_err_cnt = atomic64_read(&alg->stats.compress.compress_err_cnt);
+ rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
}
racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
- racomp.stat_compress_err_cnt = atomic64_read(&alg->stats.compress.compress_err_cnt);
+ racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
}
rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen);
rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt);
rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt);
- rakcipher.stat_akcipher_err_cnt = atomic64_read(&alg->stats.akcipher.akcipher_err_cnt);
+ rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
sizeof(rakcipher), &rakcipher);
rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt);
rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt);
rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt);
- rkpp.stat_kpp_err_cnt = atomic64_read(&alg->stats.kpp.kpp_err_cnt);
+ rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
}
rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
- rhash.stat_hash_err_cnt = atomic64_read(&alg->stats.hash.hash_err_cnt);
+ rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
}
rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
- rhash.stat_hash_err_cnt = atomic64_read(&alg->stats.hash.hash_err_cnt);
+ rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
}
rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt);
rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen);
rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt);
- rrng.stat_rng_err_cnt = atomic64_read(&alg->stats.rng.rng_err_cnt);
+ rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
}
* @encrypt_tlen: total data size handled by encrypt requests
* @decrypt_cnt: number of decrypt requests
* @decrypt_tlen: total data size handled by decrypt requests
- * @aead_err_cnt: number of error for AEAD requests
+ * @err_cnt: number of error for AEAD requests
*/
struct crypto_istat_aead {
atomic64_t encrypt_cnt;
atomic64_t encrypt_tlen;
atomic64_t decrypt_cnt;
atomic64_t decrypt_tlen;
- atomic64_t aead_err_cnt;
+ atomic64_t err_cnt;
};
/*
* @decrypt_tlen: total data size handled by decrypt requests
* @verify_cnt: number of verify operation
* @sign_cnt: number of sign requests
- * @akcipher_err_cnt: number of error for akcipher requests
+ * @err_cnt: number of error for akcipher requests
*/
struct crypto_istat_akcipher {
atomic64_t encrypt_cnt;
atomic64_t decrypt_tlen;
atomic64_t verify_cnt;
atomic64_t sign_cnt;
- atomic64_t akcipher_err_cnt;
+ atomic64_t err_cnt;
};
/*
* @encrypt_tlen: total data size handled by encrypt requests
* @decrypt_cnt: number of decrypt requests
* @decrypt_tlen: total data size handled by decrypt requests
- * @cipher_err_cnt: number of error for cipher requests
+ * @err_cnt: number of error for cipher requests
*/
struct crypto_istat_cipher {
atomic64_t encrypt_cnt;
atomic64_t encrypt_tlen;
atomic64_t decrypt_cnt;
atomic64_t decrypt_tlen;
- atomic64_t cipher_err_cnt;
+ atomic64_t err_cnt;
};
/*
* @compress_tlen: total data size handled by compress requests
* @decompress_cnt: number of decompress requests
* @decompress_tlen: total data size handled by decompress requests
- * @compress_err_cnt: number of error for compress requests
+ * @err_cnt: number of error for compress requests
*/
struct crypto_istat_compress {
atomic64_t compress_cnt;
atomic64_t compress_tlen;
atomic64_t decompress_cnt;
atomic64_t decompress_tlen;
- atomic64_t compress_err_cnt;
+ atomic64_t err_cnt;
};
/*
* struct crypto_istat_hash - statistics for has algorithm
* @hash_cnt: number of hash requests
* @hash_tlen: total data size hashed
- * @hash_err_cnt: number of error for hash requests
+ * @err_cnt: number of error for hash requests
*/
struct crypto_istat_hash {
atomic64_t hash_cnt;
atomic64_t hash_tlen;
- atomic64_t hash_err_cnt;
+ atomic64_t err_cnt;
};
/*
* @setsecret_cnt: number of setsecrey operation
* @generate_public_key_cnt: number of generate_public_key operation
* @compute_shared_secret_cnt: number of compute_shared_secret operation
- * @kpp_err_cnt: number of error for KPP requests
+ * @err_cnt: number of error for KPP requests
*/
struct crypto_istat_kpp {
atomic64_t setsecret_cnt;
atomic64_t generate_public_key_cnt;
atomic64_t compute_shared_secret_cnt;
- atomic64_t kpp_err_cnt;
+ atomic64_t err_cnt;
};
/*
* @generate_cnt: number of RNG generate requests
* @generate_tlen: total data size of generated data by the RNG
* @seed_cnt: number of times the RNG was seeded
- * @rng_err_cnt: number of error for RNG requests
+ * @err_cnt: number of error for RNG requests
*/
struct crypto_istat_rng {
atomic64_t generate_cnt;
atomic64_t generate_tlen;
atomic64_t seed_cnt;
- atomic64_t rng_err_cnt;
+ atomic64_t err_cnt;
};
#endif /* CONFIG_CRYPTO_STATS */
__u64 stat_encrypt_tlen;
__u64 stat_decrypt_cnt;
__u64 stat_decrypt_tlen;
- __u64 stat_aead_err_cnt;
+ __u64 stat_err_cnt;
};
struct crypto_stat_akcipher {
__u64 stat_decrypt_tlen;
__u64 stat_verify_cnt;
__u64 stat_sign_cnt;
- __u64 stat_akcipher_err_cnt;
+ __u64 stat_err_cnt;
};
struct crypto_stat_cipher {
__u64 stat_encrypt_tlen;
__u64 stat_decrypt_cnt;
__u64 stat_decrypt_tlen;
- __u64 stat_cipher_err_cnt;
+ __u64 stat_err_cnt;
};
struct crypto_stat_compress {
__u64 stat_compress_tlen;
__u64 stat_decompress_cnt;
__u64 stat_decompress_tlen;
- __u64 stat_compress_err_cnt;
+ __u64 stat_err_cnt;
};
struct crypto_stat_hash {
char type[CRYPTO_MAX_NAME];
__u64 stat_hash_cnt;
__u64 stat_hash_tlen;
- __u64 stat_hash_err_cnt;
+ __u64 stat_err_cnt;
};
struct crypto_stat_kpp {
__u64 stat_setsecret_cnt;
__u64 stat_generate_public_key_cnt;
__u64 stat_compute_shared_secret_cnt;
- __u64 stat_kpp_err_cnt;
+ __u64 stat_err_cnt;
};
struct crypto_stat_rng {
__u64 stat_generate_cnt;
__u64 stat_generate_tlen;
__u64 stat_seed_cnt;
- __u64 stat_rng_err_cnt;
+ __u64 stat_err_cnt;
};
struct crypto_stat_larval {
printf("%s\tHash\n\tHash: %llu bytes: %llu\n\tErrors: %llu\n",
drivername,
rhash->stat_hash_cnt, rhash->stat_hash_tlen,
- rhash->stat_hash_err_cnt);
+ rhash->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_COMPRESS]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_COMPRESS];
struct crypto_stat_compress *rblk =
drivername,
rblk->stat_compress_cnt, rblk->stat_compress_tlen,
rblk->stat_decompress_cnt, rblk->stat_decompress_tlen,
- rblk->stat_compress_err_cnt);
+ rblk->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_ACOMP]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_ACOMP];
struct crypto_stat_compress *rcomp =
drivername,
rcomp->stat_compress_cnt, rcomp->stat_compress_tlen,
rcomp->stat_decompress_cnt, rcomp->stat_decompress_tlen,
- rcomp->stat_compress_err_cnt);
+ rcomp->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_AEAD]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_AEAD];
struct crypto_stat_aead *raead =
drivername,
raead->stat_encrypt_cnt, raead->stat_encrypt_tlen,
raead->stat_decrypt_cnt, raead->stat_decrypt_tlen,
- raead->stat_aead_err_cnt);
+ raead->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_BLKCIPHER]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_BLKCIPHER];
struct crypto_stat_cipher *rblk =
drivername,
rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
- rblk->stat_cipher_err_cnt);
+ rblk->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_AKCIPHER]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_AKCIPHER];
struct crypto_stat_akcipher *rblk =
rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
rblk->stat_sign_cnt, rblk->stat_verify_cnt,
- rblk->stat_akcipher_err_cnt);
+ rblk->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_CIPHER]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_CIPHER];
struct crypto_stat_cipher *rblk =
drivername,
rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
- rblk->stat_cipher_err_cnt);
+ rblk->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_RNG]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_RNG];
struct crypto_stat_rng *rrng =
drivername,
rrng->stat_seed_cnt,
rrng->stat_generate_cnt, rrng->stat_generate_tlen,
- rrng->stat_rng_err_cnt);
+ rrng->stat_err_cnt);
} else if (tb[CRYPTOCFGA_STAT_KPP]) {
struct rtattr *rta = tb[CRYPTOCFGA_STAT_KPP];
struct crypto_stat_kpp *rkpp =
rkpp->stat_setsecret_cnt,
rkpp->stat_generate_public_key_cnt,
rkpp->stat_compute_shared_secret_cnt,
- rkpp->stat_kpp_err_cnt);
+ rkpp->stat_err_cnt);
} else {
fprintf(stderr, "%s is of an unknown algorithm\n", drivername);
}