staging: lustre: make struct lu_site_bkt_data private
authorNeilBrown <neilb@suse.com>
Mon, 7 May 2018 00:54:48 +0000 (10:54 +1000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 8 May 2018 11:09:19 +0000 (13:09 +0200)
This data structure only needs to be public so that
various modules can access a wait queue to wait for object
destruction.
If we provide a function to get the wait queue, rather than the
whole bucket, the structure can be made private.

Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/lustre/lustre/include/lu_object.h
drivers/staging/lustre/lustre/llite/lcommon_cl.c
drivers/staging/lustre/lustre/lov/lov_object.c
drivers/staging/lustre/lustre/obdclass/lu_object.c

index c3b0ed518819144d82f01ac9da2e17e77e64c9b1..f29bbca5af656d5a605d4504487548d9bb6eca1f 100644 (file)
@@ -549,31 +549,7 @@ struct lu_object_header {
 };
 
 struct fld;
-
-struct lu_site_bkt_data {
-       /**
-        * number of object in this bucket on the lsb_lru list.
-        */
-       long                    lsb_lru_len;
-       /**
-        * LRU list, updated on each access to object. Protected by
-        * bucket lock of lu_site::ls_obj_hash.
-        *
-        * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
-        * moved to the lu_site::ls_lru.prev (this is due to the non-existence
-        * of list_for_each_entry_safe_reverse()).
-        */
-       struct list_head                lsb_lru;
-       /**
-        * Wait-queue signaled when an object in this site is ultimately
-        * destroyed (lu_object_free()). It is used by lu_object_find() to
-        * wait before re-trying when object in the process of destruction is
-        * found in the hash table.
-        *
-        * \see htable_lookup().
-        */
-       wait_queue_head_t              lsb_marche_funebre;
-};
+struct lu_site_bkt_data;
 
 enum {
        LU_SS_CREATED    = 0,
@@ -642,14 +618,8 @@ struct lu_site {
        struct percpu_counter    ls_lru_len_counter;
 };
 
-static inline struct lu_site_bkt_data *
-lu_site_bkt_from_fid(struct lu_site *site, struct lu_fid *fid)
-{
-       struct cfs_hash_bd bd;
-
-       cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
-       return cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
-}
+wait_queue_head_t *
+lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid);
 
 static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
 {
index df5c0c0ae703e5baf6ea153f3c0b22c0ab2d9f40..d5b42fb1d601c9646bc70ef748843f929ccfe417 100644 (file)
@@ -211,12 +211,12 @@ static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
 
        if (unlikely(atomic_read(&header->loh_ref) != 1)) {
                struct lu_site *site = obj->co_lu.lo_dev->ld_site;
-               struct lu_site_bkt_data *bkt;
+               wait_queue_head_t *wq;
 
-               bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
+               wq = lu_site_wq_from_fid(site, &header->loh_fid);
 
                init_waitqueue_entry(&waiter, current);
-               add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+               add_wait_queue(wq, &waiter);
 
                while (1) {
                        set_current_state(TASK_UNINTERRUPTIBLE);
@@ -226,7 +226,7 @@ static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
                }
 
                set_current_state(TASK_RUNNING);
-               remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+               remove_wait_queue(wq, &waiter);
        }
 
        cl_object_put(env, obj);
index f7c69680cb7d3848d541c31ea1e7a9d7bd12956a..adc90f310fd7a9a2d99be6ba209c594f1a1981f2 100644 (file)
@@ -370,7 +370,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
        struct cl_object        *sub;
        struct lov_layout_raid0 *r0;
        struct lu_site    *site;
-       struct lu_site_bkt_data *bkt;
+       wait_queue_head_t *wq;
        wait_queue_entry_t        *waiter;
 
        r0  = &lov->u.raid0;
@@ -378,7 +378,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
 
        sub  = lovsub2cl(los);
        site = sub->co_lu.lo_dev->ld_site;
-       bkt  = lu_site_bkt_from_fid(site, &sub->co_lu.lo_header->loh_fid);
+       wq   = lu_site_wq_from_fid(site, &sub->co_lu.lo_header->loh_fid);
 
        cl_object_kill(env, sub);
        /* release a reference to the sub-object and ... */
@@ -391,7 +391,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
        if (r0->lo_sub[idx] == los) {
                waiter = &lov_env_info(env)->lti_waiter;
                init_waitqueue_entry(waiter, current);
-               add_wait_queue(&bkt->lsb_marche_funebre, waiter);
+               add_wait_queue(wq, waiter);
                set_current_state(TASK_UNINTERRUPTIBLE);
                while (1) {
                        /* this wait-queue is signaled at the end of
@@ -408,7 +408,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
                                break;
                        }
                }
-               remove_wait_queue(&bkt->lsb_marche_funebre, waiter);
+               remove_wait_queue(wq, waiter);
        }
        LASSERT(!r0->lo_sub[idx]);
 }
index be10104f8ba6b114aec02ae3c24566d482aaf135..197dd1eab3456e29bb532e6a4a08ec36d1ef0512 100644 (file)
 #include <lu_ref.h>
 #include <linux/list.h>
 
+struct lu_site_bkt_data {
+       /**
+        * number of object in this bucket on the lsb_lru list.
+        */
+       long                    lsb_lru_len;
+       /**
+        * LRU list, updated on each access to object. Protected by
+        * bucket lock of lu_site::ls_obj_hash.
+        *
+        * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
+        * moved to the lu_site::ls_lru.prev (this is due to the non-existence
+        * of list_for_each_entry_safe_reverse()).
+        */
+       struct list_head                lsb_lru;
+       /**
+        * Wait-queue signaled when an object in this site is ultimately
+        * destroyed (lu_object_free()). It is used by lu_object_find() to
+        * wait before re-trying when object in the process of destruction is
+        * found in the hash table.
+        *
+        * \see htable_lookup().
+        */
+       wait_queue_head_t              lsb_marche_funebre;
+};
+
 enum {
        LU_CACHE_PERCENT_MAX     = 50,
        LU_CACHE_PERCENT_DEFAULT = 20
@@ -88,6 +113,18 @@ MODULE_PARM_DESC(lu_cache_nr, "Maximum number of objects in lu_object cache");
 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx);
 
+wait_queue_head_t *
+lu_site_wq_from_fid(struct lu_site *site, struct lu_fid *fid)
+{
+       struct cfs_hash_bd bd;
+       struct lu_site_bkt_data *bkt;
+
+       cfs_hash_bd_get(site->ls_obj_hash, fid, &bd);
+       bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
+       return &bkt->lsb_marche_funebre;
+}
+EXPORT_SYMBOL(lu_site_wq_from_fid);
+
 /**
  * Decrease reference counter on object. If last reference is freed, return
  * object to the cache, unless lu_object_is_dying(o) holds. In the latter
@@ -288,7 +325,7 @@ next:
  */
 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
 {
-       struct lu_site_bkt_data *bkt;
+       wait_queue_head_t *wq;
        struct lu_site    *site;
        struct lu_object        *scan;
        struct list_head              *layers;
@@ -296,7 +333,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
 
        site   = o->lo_dev->ld_site;
        layers = &o->lo_header->loh_layers;
-       bkt    = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
+       wq     = lu_site_wq_from_fid(site, &o->lo_header->loh_fid);
        /*
         * First call ->loo_object_delete() method to release all resources.
         */
@@ -324,8 +361,8 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o)
                o->lo_ops->loo_object_free(env, o);
        }
 
-       if (waitqueue_active(&bkt->lsb_marche_funebre))
-               wake_up_all(&bkt->lsb_marche_funebre);
+       if (waitqueue_active(wq))
+               wake_up_all(wq);
 }
 
 /**
@@ -749,7 +786,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env,
                                    const struct lu_fid *f,
                                    const struct lu_object_conf *conf)
 {
-       struct lu_site_bkt_data *bkt;
+       wait_queue_head_t       *wq;
        struct lu_object        *obj;
        wait_queue_entry_t         wait;
 
@@ -762,8 +799,8 @@ struct lu_object *lu_object_find_at(const struct lu_env *env,
                 * wait queue.
                 */
                schedule();
-               bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
-               remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
+               wq = lu_site_wq_from_fid(dev->ld_site, (void *)f);
+               remove_wait_queue(wq, &wait);
        }
 }
 EXPORT_SYMBOL(lu_object_find_at);