btrfs: add cleanup_ref_head_accounting helper
authorJosef Bacik <jbacik@fb.com>
Mon, 3 Dec 2018 15:20:30 +0000 (10:20 -0500)
committerDavid Sterba <dsterba@suse.com>
Mon, 17 Dec 2018 13:51:46 +0000 (14:51 +0100)
We were missing some quota cleanups in check_ref_cleanup, so break the
ref head accounting cleanup into a helper and call that from both
check_ref_cleanup and cleanup_ref_head.  This will hopefully ensure that
we don't screw up accounting in the future for other things that we add.

Reviewed-by: Omar Sandoval <osandov@fb.com>
Reviewed-by: Liu Bo <bo.liu@linux.alibaba.com>
Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Josef Bacik <jbacik@fb.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent-tree.c

index 6883636b024387db1aad5d33f0c060d05165a084..ea1a85ee2c5d69aa03a1996d9f650090a648b08b 100644 (file)
@@ -2443,6 +2443,41 @@ static int cleanup_extent_op(struct btrfs_trans_handle *trans,
        return ret ? ret : 1;
 }
 
+static void cleanup_ref_head_accounting(struct btrfs_trans_handle *trans,
+                                       struct btrfs_delayed_ref_head *head)
+{
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+       struct btrfs_delayed_ref_root *delayed_refs =
+               &trans->transaction->delayed_refs;
+
+       if (head->total_ref_mod < 0) {
+               struct btrfs_space_info *space_info;
+               u64 flags;
+
+               if (head->is_data)
+                       flags = BTRFS_BLOCK_GROUP_DATA;
+               else if (head->is_system)
+                       flags = BTRFS_BLOCK_GROUP_SYSTEM;
+               else
+                       flags = BTRFS_BLOCK_GROUP_METADATA;
+               space_info = __find_space_info(fs_info, flags);
+               ASSERT(space_info);
+               percpu_counter_add_batch(&space_info->total_bytes_pinned,
+                                  -head->num_bytes,
+                                  BTRFS_TOTAL_BYTES_PINNED_BATCH);
+
+               if (head->is_data) {
+                       spin_lock(&delayed_refs->lock);
+                       delayed_refs->pending_csums -= head->num_bytes;
+                       spin_unlock(&delayed_refs->lock);
+               }
+       }
+
+       /* Also free its reserved qgroup space */
+       btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
+                                     head->qgroup_reserved);
+}
+
 static int cleanup_ref_head(struct btrfs_trans_handle *trans,
                            struct btrfs_delayed_ref_head *head)
 {
@@ -2478,31 +2513,6 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
        spin_unlock(&head->lock);
        spin_unlock(&delayed_refs->lock);
 
-       trace_run_delayed_ref_head(fs_info, head, 0);
-
-       if (head->total_ref_mod < 0) {
-               struct btrfs_space_info *space_info;
-               u64 flags;
-
-               if (head->is_data)
-                       flags = BTRFS_BLOCK_GROUP_DATA;
-               else if (head->is_system)
-                       flags = BTRFS_BLOCK_GROUP_SYSTEM;
-               else
-                       flags = BTRFS_BLOCK_GROUP_METADATA;
-               space_info = __find_space_info(fs_info, flags);
-               ASSERT(space_info);
-               percpu_counter_add_batch(&space_info->total_bytes_pinned,
-                                  -head->num_bytes,
-                                  BTRFS_TOTAL_BYTES_PINNED_BATCH);
-
-               if (head->is_data) {
-                       spin_lock(&delayed_refs->lock);
-                       delayed_refs->pending_csums -= head->num_bytes;
-                       spin_unlock(&delayed_refs->lock);
-               }
-       }
-
        if (head->must_insert_reserved) {
                btrfs_pin_extent(fs_info, head->bytenr,
                                 head->num_bytes, 1);
@@ -2512,9 +2522,9 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans,
                }
        }
 
-       /* Also free its reserved qgroup space */
-       btrfs_qgroup_free_delayed_ref(fs_info, head->qgroup_ref_root,
-                                     head->qgroup_reserved);
+       cleanup_ref_head_accounting(trans, head);
+
+       trace_run_delayed_ref_head(fs_info, head, 0);
        btrfs_delayed_ref_unlock(head);
        btrfs_put_delayed_ref_head(head);
        return 0;
@@ -6994,6 +7004,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
        if (head->must_insert_reserved)
                ret = 1;
 
+       cleanup_ref_head_accounting(trans, head);
        mutex_unlock(&head->mutex);
        btrfs_put_delayed_ref_head(head);
        return ret;