* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
* @hash_rnd: Random seed to fold into hash
+ * @shift: Current size (1 << shift)
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
* @buckets: size * hash buckets
struct bucket_table {
size_t size;
u32 hash_rnd;
+ u32 shift;
unsigned int locks_mask;
spinlock_t *locks;
* @tbl: Bucket table
* @future_tbl: Table under construction during expansion/shrinking
* @nelems: Number of elements in table
- * @shift: Current size (1 << shift)
* @p: Configuration parameters
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
struct bucket_table __rcu *tbl;
struct bucket_table __rcu *future_tbl;
atomic_t nelems;
- atomic_t shift;
+ bool being_destroyed;
struct rhashtable_params p;
struct work_struct run_work;
struct mutex mutex;
struct list_head walkers;
- bool being_destroyed;
};
/**
}
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
- size_t nbuckets)
+ size_t nbuckets, u32 hash_rnd)
{
struct bucket_table *tbl = NULL;
size_t size;
return NULL;
tbl->size = nbuckets;
+ tbl->shift = ilog2(nbuckets);
+ tbl->hash_rnd = hash_rnd;
if (alloc_bucket_locks(ht, tbl) < 0) {
bucket_table_free(tbl);
/**
* rht_grow_above_75 - returns true if nelems > 0.75 * table-size
* @ht: hash table
- * @new_size: new table size
+ * @tbl: current table
*/
-static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+static bool rht_grow_above_75(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
{
/* Expand table when exceeding 75% load */
- return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
- (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
+ return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
+ (!ht->p.max_shift || tbl->shift < ht->p.max_shift);
}
/**
* rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
* @ht: hash table
- * @new_size: new table size
+ * @tbl: current table
*/
-static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
+static bool rht_shrink_below_30(const struct rhashtable *ht,
+ const struct bucket_table *tbl)
{
/* Shrink table beneath 30% load */
- return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
- (atomic_read(&ht->shift) > ht->p.min_shift);
+ return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
+ tbl->shift > ht->p.min_shift;
}
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
ASSERT_RHT_MUTEX(ht);
- new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
+ new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, old_tbl->hash_rnd);
if (new_tbl == NULL)
return -ENOMEM;
- new_tbl->hash_rnd = old_tbl->hash_rnd;
-
- atomic_inc(&ht->shift);
-
rhashtable_rehash(ht, new_tbl);
-
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);
*/
int rhashtable_shrink(struct rhashtable *ht)
{
- struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
ASSERT_RHT_MUTEX(ht);
- new_tbl = bucket_table_alloc(ht, tbl->size / 2);
+ new_tbl = bucket_table_alloc(ht, old_tbl->size / 2, old_tbl->hash_rnd);
if (new_tbl == NULL)
return -ENOMEM;
- new_tbl->hash_rnd = tbl->hash_rnd;
-
- atomic_dec(&ht->shift);
-
rhashtable_rehash(ht, new_tbl);
-
return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);
list_for_each_entry(walker, &ht->walkers, list)
walker->resize = true;
- if (rht_grow_above_75(ht, tbl->size))
+ if (rht_grow_above_75(ht, tbl))
rhashtable_expand(ht);
- else if (rht_shrink_below_30(ht, tbl->size))
+ else if (rht_shrink_below_30(ht, tbl))
rhashtable_shrink(ht);
unlock:
mutex_unlock(&ht->mutex);
rcu_assign_pointer(tbl->buckets[hash], obj);
atomic_inc(&ht->nelems);
- if (no_resize_running && rht_grow_above_75(ht, tbl->size))
+ if (no_resize_running && rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
exit:
bool no_resize_running = tbl == old_tbl;
atomic_dec(&ht->nelems);
- if (no_resize_running && rht_shrink_below_30(ht, tbl->size))
+ if (no_resize_running && rht_shrink_below_30(ht, tbl))
schedule_work(&ht->run_work);
}
{
struct bucket_table *tbl;
size_t size;
+ u32 hash_rnd;
size = HASH_DEFAULT_SIZE;
else
ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
- tbl = bucket_table_alloc(ht, size);
+ get_random_bytes(&hash_rnd, sizeof(hash_rnd));
+
+ tbl = bucket_table_alloc(ht, size, hash_rnd);
if (tbl == NULL)
return -ENOMEM;
- get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
-
atomic_set(&ht->nelems, 0);
- atomic_set(&ht->shift, ilog2(tbl->size));
+
RCU_INIT_POINTER(ht->tbl, tbl);
RCU_INIT_POINTER(ht->future_tbl, tbl);