btrfs: add btrfs_delete_ref_head helper
authorJosef Bacik <jbacik@fb.com>
Mon, 3 Dec 2018 15:20:29 +0000 (10:20 -0500)
committerDavid Sterba <dsterba@suse.com>
Mon, 17 Dec 2018 13:51:46 +0000 (14:51 +0100)
We do this dance in cleanup_ref_head and check_ref_cleanup, unify it
into a helper and cleanup the calling functions.

Reviewed-by: Omar Sandoval <osandov@fb.com>
Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Josef Bacik <jbacik@fb.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/extent-tree.c

index 9301b3ad921705c48659b86dd9098f97f42af3e6..b3e4c9fcb664a7040d687e7e012195b28cbaff06 100644 (file)
@@ -400,6 +400,20 @@ again:
        return head;
 }
 
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+                          struct btrfs_delayed_ref_head *head)
+{
+       lockdep_assert_held(&delayed_refs->lock);
+       lockdep_assert_held(&head->lock);
+
+       rb_erase_cached(&head->href_node, &delayed_refs->href_root);
+       RB_CLEAR_NODE(&head->href_node);
+       atomic_dec(&delayed_refs->num_entries);
+       delayed_refs->num_heads--;
+       if (head->processing == 0)
+               delayed_refs->num_heads_ready--;
+}
+
 /*
  * Helper to insert the ref_node to the tail or merge with tail.
  *
index 8e20c5cb5404dbbea77edab1c6ef16ead2348ed4..d2af974f68a1ac2b00c157f8297c33e451fe8d14 100644 (file)
@@ -261,7 +261,8 @@ static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
 {
        mutex_unlock(&head->mutex);
 }
-
+void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
+                          struct btrfs_delayed_ref_head *head);
 
 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
                struct btrfs_delayed_ref_root *delayed_refs);
index b32ccdbcc0bc9e812d21110c6710ddedfb4f8302..6883636b024387db1aad5d33f0c060d05165a084 100644 (file)
@@ -2474,12 +2474,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
                spin_unlock(&delayed_refs->lock);
                return 1;
        }
-       delayed_refs->num_heads--;
-       rb_erase_cached(&head->href_node, &delayed_refs->href_root);
-       RB_CLEAR_NODE(&head->href_node);
+       btrfs_delete_ref_head(delayed_refs, head);
        spin_unlock(&head->lock);
        spin_unlock(&delayed_refs->lock);
-       atomic_dec(&delayed_refs->num_entries);
 
        trace_run_delayed_ref_head(fs_info, head, 0);
 
@@ -6987,22 +6984,9 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
        if (!mutex_trylock(&head->mutex))
                goto out;
 
-       /*
-        * at this point we have a head with no other entries.  Go
-        * ahead and process it.
-        */
-       rb_erase_cached(&head->href_node, &delayed_refs->href_root);
-       RB_CLEAR_NODE(&head->href_node);
-       atomic_dec(&delayed_refs->num_entries);
-
-       /*
-        * we don't take a ref on the node because we're removing it from the
-        * tree, so we just steal the ref the tree was holding.
-        */
-       delayed_refs->num_heads--;
-       if (head->processing == 0)
-               delayed_refs->num_heads_ready--;
+       btrfs_delete_ref_head(delayed_refs, head);
        head->processing = 0;
+
        spin_unlock(&head->lock);
        spin_unlock(&delayed_refs->lock);