bcache: update bucket_in_use in real time
authorTang Junhui <tang.junhui@zte.com.cn>
Mon, 30 Oct 2017 21:46:33 +0000 (14:46 -0700)
committerJens Axboe <axboe@kernel.dk>
Mon, 30 Oct 2017 21:57:54 +0000 (15:57 -0600)
bucket_in_use is updated in gc thread which triggered by invalidating or
writing sectors_to_gc dirty data, It's a long interval. Therefore, when we
use it to compare with the threshold, it is often not timely, which leads
to inaccurate judgment and often results in bucket depletion.

We have send a patch before, by the means of updating bucket_in_use
periodically In gc thread, which Coly thought that would lead high
latency, In this patch, we add avail_nbuckets to record the count of
available buckets, and we calculate bucket_in_use when alloc or free
bucket in real time.

[edited by ML: eliminated some whitespace errors]

Signed-off-by: Tang Junhui <tang.junhui@zte.com.cn>
Signed-off-by: Michael Lyle <mlyle@lyle.org>
Reviewed-by: Michael Lyle <mlyle@lyle.org>
Reviewed-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/bcache/alloc.c
drivers/md/bcache/bcache.h
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h

index 4c40870e99f5a8e3ecbd283f186eac4bf1e5a74c..8c5a626343d4da98f26ae503fcf4af9bd7777d61 100644 (file)
@@ -442,6 +442,11 @@ out:
                b->prio = INITIAL_PRIO;
        }
 
+       if (ca->set->avail_nbuckets > 0) {
+               ca->set->avail_nbuckets--;
+               bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+       }
+
        return r;
 }
 
@@ -449,6 +454,11 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
 {
        SET_GC_MARK(b, 0);
        SET_GC_SECTORS_USED(b, 0);
+
+       if (ca->set->avail_nbuckets < ca->set->nbuckets) {
+               ca->set->avail_nbuckets++;
+               bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
+       }
 }
 
 void bch_bucket_free(struct cache_set *c, struct bkey *k)
index 363ea6256b39316a41f6ab996b5e7c1c8d4fb442..e274082330dcd8b1e564576c2bedc581c830d650 100644 (file)
@@ -581,6 +581,7 @@ struct cache_set {
        uint8_t                 need_gc;
        struct gc_stat          gc_stats;
        size_t                  nbuckets;
+       size_t                  avail_nbuckets;
 
        struct task_struct      *gc_thread;
        /* Where in the btree gc currently is */
index 866dcf78ff8e691e051dace4506ae0ac760b2901..d8865e6ead37046b6d9a0b9c56baa1306a9ee089 100644 (file)
@@ -1240,6 +1240,11 @@ void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
        __bch_btree_mark_key(c, level, k);
 }
 
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
+{
+       stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
+}
+
 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
 {
        uint8_t stale = 0;
@@ -1651,9 +1656,8 @@ static void btree_gc_start(struct cache_set *c)
        mutex_unlock(&c->bucket_lock);
 }
 
-static size_t bch_btree_gc_finish(struct cache_set *c)
+static void bch_btree_gc_finish(struct cache_set *c)
 {
-       size_t available = 0;
        struct bucket *b;
        struct cache *ca;
        unsigned i;
@@ -1690,6 +1694,7 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
        }
        rcu_read_unlock();
 
+       c->avail_nbuckets = 0;
        for_each_cache(ca, c, i) {
                uint64_t *i;
 
@@ -1711,18 +1716,16 @@ static size_t bch_btree_gc_finish(struct cache_set *c)
                        BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
 
                        if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
-                               available++;
+                               c->avail_nbuckets++;
                }
        }
 
        mutex_unlock(&c->bucket_lock);
-       return available;
 }
 
 static void bch_btree_gc(struct cache_set *c)
 {
        int ret;
-       unsigned long available;
        struct gc_stat stats;
        struct closure writes;
        struct btree_op op;
@@ -1745,14 +1748,14 @@ static void bch_btree_gc(struct cache_set *c)
                        pr_warn("gc failed!");
        } while (ret);
 
-       available = bch_btree_gc_finish(c);
+       bch_btree_gc_finish(c);
        wake_up_allocators(c);
 
        bch_time_stats_update(&c->btree_gc_time, start_time);
 
        stats.key_bytes *= sizeof(uint64_t);
        stats.data      <<= 9;
-       stats.in_use    = (c->nbuckets - available) * 100 / c->nbuckets;
+       bch_update_bucket_in_use(c, &stats);
        memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
 
        trace_bcache_gc_end(c);
index 73da1f5626cb85880f41c1e3cf8fcd9e5584001f..4073aca09a4982af8647dc5bae9cabf24f82e540 100644 (file)
@@ -305,5 +305,5 @@ void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
 struct keybuf_key *bch_keybuf_next(struct keybuf *);
 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *,
                                          struct bkey *, keybuf_pred_fn *);
-
+void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
 #endif