rhashtable: allow rht_bucket_var to return NULL.
authorNeilBrown <neilb@suse.com>
Mon, 1 Apr 2019 23:07:45 +0000 (10:07 +1100)
committerDavid S. Miller <davem@davemloft.net>
Mon, 8 Apr 2019 02:12:12 +0000 (19:12 -0700)
Rather than returning a pointer to a static nulls, rht_bucket_var()
now returns NULL if the bucket doesn't exist.
This will make the next patch, which stores a bitlock in the
bucket pointer, somewhat cleaner.

This change involves introducing __rht_bucket_nested() which is
like rht_bucket_nested(), but doesn't provide the static nulls,
and changing rht_bucket_nested() to call this and possible
provide a static nulls - as is still needed for the non-var case.

Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/rhashtable.h
lib/rhashtable.c

index 86dfa417848d36367e1d42f8f92a2e62b46c2f7b..0c9175aeab8ad24fd277e6c6e99af05bdb0691eb 100644 (file)
@@ -265,6 +265,8 @@ void rhashtable_destroy(struct rhashtable *ht);
 
 struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
                                            unsigned int hash);
+struct rhash_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
+                                             unsigned int hash);
 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
                                                   struct bucket_table *tbl,
                                                   unsigned int hash);
@@ -294,7 +296,7 @@ static inline struct rhash_head __rcu *const *rht_bucket(
 static inline struct rhash_head __rcu **rht_bucket_var(
        struct bucket_table *tbl, unsigned int hash)
 {
-       return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+       return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
                                     &tbl->buckets[hash];
 }
 
@@ -890,6 +892,8 @@ static inline int __rhashtable_remove_fast_one(
        spin_lock_bh(lock);
 
        pprev = rht_bucket_var(tbl, hash);
+       if (!pprev)
+               goto out;
        rht_for_each_from(he, *pprev, tbl, hash) {
                struct rhlist_head *list;
 
@@ -934,6 +938,7 @@ static inline int __rhashtable_remove_fast_one(
                break;
        }
 
+out:
        spin_unlock_bh(lock);
 
        if (err > 0) {
@@ -1042,6 +1047,8 @@ static inline int __rhashtable_replace_fast(
        spin_lock_bh(lock);
 
        pprev = rht_bucket_var(tbl, hash);
+       if (!pprev)
+               goto out;
        rht_for_each_from(he, *pprev, tbl, hash) {
                if (he != obj_old) {
                        pprev = &he->next;
@@ -1053,7 +1060,7 @@ static inline int __rhashtable_replace_fast(
                err = 0;
                break;
        }
-
+out:
        spin_unlock_bh(lock);
 
        return err;
index 6c4f5c8e9baa6ef85bd7e690f0b30c9ea7ad0f4b..b28fdd560ea915b0447f095a0f32f100475a920c 100644 (file)
@@ -237,8 +237,10 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
                goto out;
 
        err = -ENOENT;
+       if (!pprev)
+               goto out;
 
-       rht_for_each(entry, old_tbl, old_hash) {
+       rht_for_each_from(entry, *pprev, old_tbl, old_hash) {
                err = 0;
                next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
 
@@ -496,6 +498,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
 
        elasticity = RHT_ELASTICITY;
        pprev = rht_bucket_var(tbl, hash);
+       if (!pprev)
+               return ERR_PTR(-ENOENT);
        rht_for_each_from(head, *pprev, tbl, hash) {
                struct rhlist_head *list;
                struct rhlist_head *plist;
@@ -1161,11 +1165,10 @@ void rhashtable_destroy(struct rhashtable *ht)
 }
 EXPORT_SYMBOL_GPL(rhashtable_destroy);
 
-struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
-                                           unsigned int hash)
+struct rhash_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
+                                             unsigned int hash)
 {
        const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
-       static struct rhash_head __rcu *rhnull;
        unsigned int index = hash & ((1 << tbl->nest) - 1);
        unsigned int size = tbl->size >> tbl->nest;
        unsigned int subhash = hash;
@@ -1183,15 +1186,23 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
                subhash >>= shift;
        }
 
-       if (!ntbl) {
-               if (!rhnull)
-                       INIT_RHT_NULLS_HEAD(rhnull);
-               return &rhnull;
-       }
+       if (!ntbl)
+               return NULL;
 
        return &ntbl[subhash].bucket;
 
 }
+EXPORT_SYMBOL_GPL(__rht_bucket_nested);
+
+struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+                                           unsigned int hash)
+{
+       static struct rhash_head __rcu *rhnull;
+
+       if (!rhnull)
+               INIT_RHT_NULLS_HEAD(rhnull);
+       return __rht_bucket_nested(tbl, hash) ?: &rhnull;
+}
 EXPORT_SYMBOL_GPL(rht_bucket_nested);
 
 struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,