Btrfs: delayed-refs: use rb_first_cached for ref_tree
authorLiu Bo <bo.liu@linux.alibaba.com>
Wed, 22 Aug 2018 19:51:50 +0000 (03:51 +0800)
committerDavid Sterba <dsterba@suse.com>
Mon, 15 Oct 2018 15:23:33 +0000 (17:23 +0200)
rb_first_cached() trades an extra pointer "leftmost" for doing the same
job as rb_first() but in O(1).

Functions manipulating href->ref_tree need to get the first entry, this
converts href->ref_tree to use rb_first_cached().

For more details about the optimization see patch "Btrfs: delayed-refs:
use rb_first_cached for href_root".

Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com>
Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/backref.c
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c

index 84006e3dd105acab368ac899e0579627a371048b..1854835e082bb486405add66526d5900ad0bac0b 100644 (file)
@@ -769,7 +769,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
                btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
 
        spin_lock(&head->lock);
-       for (n = rb_first(&head->ref_tree); n; n = rb_next(n)) {
+       for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
                node = rb_entry(n, struct btrfs_delayed_ref_node,
                                ref_node);
                if (node->seq > seq)
index f07952e16a3bf07a07670e8d32200ac7f64cb39c..7f567c944fece4c861386382bb491eec04bc8884 100644 (file)
@@ -133,13 +133,14 @@ static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
        return NULL;
 }
 
-static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
+static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
                struct btrfs_delayed_ref_node *ins)
 {
-       struct rb_node **p = &root->rb_node;
+       struct rb_node **p = &root->rb_root.rb_node;
        struct rb_node *node = &ins->ref_node;
        struct rb_node *parent_node = NULL;
        struct btrfs_delayed_ref_node *entry;
+       bool leftmost = true;
 
        while (*p) {
                int comp;
@@ -148,16 +149,18 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
                entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
                                 ref_node);
                comp = comp_refs(ins, entry, true);
-               if (comp < 0)
+               if (comp < 0) {
                        p = &(*p)->rb_left;
-               else if (comp > 0)
+               } else if (comp > 0) {
                        p = &(*p)->rb_right;
-               else
+                       leftmost = false;
+               } else {
                        return entry;
+               }
        }
 
        rb_link_node(node, parent_node, p);
-       rb_insert_color(node, root);
+       rb_insert_color_cached(node, root, leftmost);
        return NULL;
 }
 
@@ -231,7 +234,7 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
                                    struct btrfs_delayed_ref_node *ref)
 {
        lockdep_assert_held(&head->lock);
-       rb_erase(&ref->ref_node, &head->ref_tree);
+       rb_erase_cached(&ref->ref_node, &head->ref_tree);
        RB_CLEAR_NODE(&ref->ref_node);
        if (!list_empty(&ref->add_list))
                list_del(&ref->add_list);
@@ -300,7 +303,7 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
 
        lockdep_assert_held(&head->lock);
 
-       if (RB_EMPTY_ROOT(&head->ref_tree))
+       if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
                return;
 
        /* We don't have too many refs to merge for data. */
@@ -318,7 +321,8 @@ void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
        spin_unlock(&fs_info->tree_mod_seq_lock);
 
 again:
-       for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+       for (node = rb_first_cached(&head->ref_tree); node;
+            node = rb_next(node)) {
                ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
                if (seq && ref->seq >= seq)
                        continue;
@@ -573,7 +577,7 @@ static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
        head_ref->must_insert_reserved = must_insert_reserved;
        head_ref->is_data = is_data;
        head_ref->is_system = is_system;
-       head_ref->ref_tree = RB_ROOT;
+       head_ref->ref_tree = RB_ROOT_CACHED;
        INIT_LIST_HEAD(&head_ref->ref_add_list);
        RB_CLEAR_NODE(&head_ref->href_node);
        head_ref->processing = 0;
index 88438b6cee451cac710e72282c7b70e7ee6e7228..c3e3486a126c234f5ffa4f91ddcde7775d715318 100644 (file)
@@ -79,7 +79,7 @@ struct btrfs_delayed_ref_head {
        struct mutex mutex;
 
        spinlock_t lock;
-       struct rb_root ref_tree;
+       struct rb_root_cached ref_tree;
        /* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
        struct list_head ref_add_list;
 
index a21a9678681384e7d3d32f60b0a76dd7f306c24d..517484ecad968dc1d83cf0a5ecc1871398c50fac 100644 (file)
@@ -4221,11 +4221,11 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
                        continue;
                }
                spin_lock(&head->lock);
-               while ((n = rb_first(&head->ref_tree)) != NULL) {
+               while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
                        ref = rb_entry(n, struct btrfs_delayed_ref_node,
                                       ref_node);
                        ref->in_tree = 0;
-                       rb_erase(&ref->ref_node, &head->ref_tree);
+                       rb_erase_cached(&ref->ref_node, &head->ref_tree);
                        RB_CLEAR_NODE(&ref->ref_node);
                        if (!list_empty(&ref->add_list))
                                list_del(&ref->add_list);
index 30b3d8561768e4ea29036fe33ca62d8677533320..26fb9cbf380792d45aab9740c1957992bcf5357d 100644 (file)
@@ -2374,7 +2374,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
 {
        struct btrfs_delayed_ref_node *ref;
 
-       if (RB_EMPTY_ROOT(&head->ref_tree))
+       if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
                return NULL;
 
        /*
@@ -2387,7 +2387,7 @@ select_delayed_ref(struct btrfs_delayed_ref_head *head)
                return list_first_entry(&head->ref_add_list,
                                struct btrfs_delayed_ref_node, add_list);
 
-       ref = rb_entry(rb_first(&head->ref_tree),
+       ref = rb_entry(rb_first_cached(&head->ref_tree),
                       struct btrfs_delayed_ref_node, ref_node);
        ASSERT(list_empty(&ref->add_list));
        return ref;
@@ -2448,7 +2448,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
        spin_unlock(&head->lock);
        spin_lock(&delayed_refs->lock);
        spin_lock(&head->lock);
-       if (!RB_EMPTY_ROOT(&head->ref_tree) || head->extent_op) {
+       if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) {
                spin_unlock(&head->lock);
                spin_unlock(&delayed_refs->lock);
                return 1;
@@ -2597,7 +2597,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 
                actual_count++;
                ref->in_tree = 0;
-               rb_erase(&ref->ref_node, &locked_ref->ref_tree);
+               rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
                RB_CLEAR_NODE(&ref->ref_node);
                if (!list_empty(&ref->add_list))
                        list_del(&ref->add_list);
@@ -3040,7 +3040,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
         * XXX: We should replace this with a proper search function in the
         * future.
         */
-       for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
+       for (node = rb_first_cached(&head->ref_tree); node;
+            node = rb_next(node)) {
                ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
                /* If it's a shared ref we know a cross reference exists */
                if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
@@ -6908,7 +6909,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
                goto out_delayed_unlock;
 
        spin_lock(&head->lock);
-       if (!RB_EMPTY_ROOT(&head->ref_tree))
+       if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root))
                goto out;
 
        if (head->extent_op) {