struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
-#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
+#define INIT_RHT_NULLS_HEAD(ptr) \
((ptr) = (typeof(ptr)) NULLS_MARKER(0))
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
static union nested_table *nested_table_alloc(struct rhashtable *ht,
union nested_table __rcu **prev,
- unsigned int shifted,
- unsigned int nhash)
+ unsigned int shifted)
{
union nested_table *ntbl;
int i;
if (ntbl && shifted) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
- INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
- (i << shifted) | nhash);
+ INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
}
rcu_assign_pointer(*prev, ntbl);
return NULL;
if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
- 0, 0)) {
+ 0)) {
kfree(tbl);
return NULL;
}
tbl->hash_rnd = get_random_u32();
for (i = 0; i < nbuckets; i++)
- INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+ INIT_RHT_NULLS_HEAD(tbl->buckets[i]);
return tbl;
}
nhash = index;
shifted = tbl->nest;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0, nhash);
+ size <= (1 << shift) ? shifted : 0);
while (ntbl && size > (1 << shift)) {
index = hash & ((1 << shift) - 1);
nhash |= index << shifted;
shifted += shift;
ntbl = nested_table_alloc(ht, &ntbl[index].table,
- size <= (1 << shift) ? shifted : 0,
- nhash);
+ size <= (1 << shift) ? shifted : 0);
}
if (!ntbl)