struct cfs_hash_ops {
/** return hashed value from @key */
- unsigned (*hs_hash)(struct cfs_hash *hs, const void *key,
- unsigned mask);
+ unsigned int (*hs_hash)(struct cfs_hash *hs, const void *key,
+ unsigned int mask);
/** return key address of @hnode */
void * (*hs_key)(struct hlist_node *hnode);
/** copy key from @hnode to @key */
}
static inline unsigned
-cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
+cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned int mask)
{
return hs->hs_ops->hs_hash(hs, key, mask);
}
}
static inline void
-cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index,
+cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned int index,
struct cfs_hash_bd *bd)
{
bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
/* Hash init/cleanup functions */
struct cfs_hash *
-cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
- unsigned bkt_bits, unsigned extra_bytes,
- unsigned min_theta, unsigned max_theta,
- struct cfs_hash_ops *ops, unsigned flags);
+cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
+ unsigned int bkt_bits, unsigned int extra_bytes,
+ unsigned int min_theta, unsigned int max_theta,
+ struct cfs_hash_ops *ops, unsigned int flags);
struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
void cfs_hash_putref(struct cfs_hash *hs);
cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
void
-cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
+cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned int hindex,
cfs_hash_for_each_cb_t, void *data);
int cfs_hash_is_empty(struct cfs_hash *hs);
__u64 cfs_hash_size_get(struct cfs_hash *hs);
* Generic djb2 hash algorithm for character arrays.
*/
static inline unsigned
-cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
+cfs_hash_djb2_hash(const void *key, size_t size, unsigned int mask)
{
- unsigned i, hash = 5381;
+ unsigned int i, hash = 5381;
LASSERT(key != NULL);
* Generic u32 hash algorithm.
*/
static inline unsigned
-cfs_hash_u32_hash(const __u32 key, unsigned mask)
+cfs_hash_u32_hash(const __u32 key, unsigned int mask)
{
return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
}
* Generic u64 hash algorithm.
*/
static inline unsigned
-cfs_hash_u64_hash(const __u64 key, unsigned mask)
+cfs_hash_u64_hash(const __u64 key, unsigned int mask)
{
- return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
+ return ((unsigned int)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
}
/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
char *cfs_trimwhite(char *str);
int cfs_gettok(struct cfs_lstr *next, char delim, struct cfs_lstr *res);
-int cfs_str2num_check(char *str, int nob, unsigned *num,
- unsigned min, unsigned max);
+int cfs_str2num_check(char *str, int nob, unsigned int *num,
+ unsigned int min, unsigned int max);
int cfs_expr_list_match(__u32 value, struct cfs_expr_list *expr_list);
int cfs_expr_list_print(char *buffer, int count,
struct cfs_expr_list *expr_list);
}
void cfs_expr_list_free(struct cfs_expr_list *expr_list);
-int cfs_expr_list_parse(char *str, int len, unsigned min, unsigned max,
+int cfs_expr_list_parse(char *str, int len, unsigned int min, unsigned int max,
struct cfs_expr_list **elpp);
void cfs_expr_list_free_list(struct list_head *list);
/* nodes mask for this partition */
nodemask_t *cpt_nodemask;
/* spread rotor for NUMA allocator */
- unsigned cpt_spread_rotor;
+ unsigned int cpt_spread_rotor;
};
/** descriptor for CPU partitions */
struct cfs_cpt_table {
/* version, reserved for hotplug */
- unsigned ctb_version;
+ unsigned int ctb_version;
/* spread rotor for NUMA allocator */
- unsigned ctb_spread_rotor;
+ unsigned int ctb_spread_rotor;
/* # of CPU partitions */
- unsigned ctb_nparts;
+ unsigned int ctb_nparts;
/* partitions tables */
struct cfs_cpu_partition *ctb_parts;
/* shadow HW CPU to CPU partition ID */