btrfs: rework btrfs_check_space_for_delayed_refs
authorJosef Bacik <josef@toxicpanda.com>
Mon, 3 Dec 2018 15:20:36 +0000 (10:20 -0500)
committerDavid Sterba <dsterba@suse.com>
Mon, 17 Dec 2018 13:51:47 +0000 (14:51 +0100)
Now with the delayed_refs_rsv we can now know exactly how much pending
delayed refs space we need.  This means we can drastically simplify
btrfs_check_space_for_delayed_refs by simply checking how much space we
have reserved for the global rsv (which acts as a spill over buffer) and
the delayed refs rsv.  If our total size is beyond that amount then we
know it's time to commit the transaction and stop any more delayed refs
from being generated.

With the introduction of dealyed_refs_rsv infrastructure, namely
btrfs_update_delayed_refs_rsv we now know exactly how much pending
delayed refs space is required.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/transaction.c

index d07a23f15e22732252ea792bff4dcd025b605a3c..6520e8e70b09e8e2f65cf45872a1db1160c3a561 100644 (file)
@@ -2651,7 +2651,7 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
 }
 
 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
-int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans);
+bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
                                         const u64 start);
 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
index 1b10c2fe7c7d805520e935479660bde3f8747236..3abb03b5ab228ddc28271a7a1e60b73d2b4059b3 100644 (file)
@@ -2839,40 +2839,28 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes)
        return num_csums;
 }
 
-int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans)
+bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
 {
-       struct btrfs_fs_info *fs_info = trans->fs_info;
-       struct btrfs_block_rsv *global_rsv;
-       u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
-       u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
-       unsigned int num_dirty_bgs = trans->transaction->num_dirty_bgs;
-       u64 num_bytes, num_dirty_bgs_bytes;
-       int ret = 0;
+       struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+       struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+       bool ret = false;
+       u64 reserved;
 
-       num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
-       num_heads = heads_to_leaves(fs_info, num_heads);
-       if (num_heads > 1)
-               num_bytes += (num_heads - 1) * fs_info->nodesize;
-       num_bytes <<= 1;
-       num_bytes += btrfs_csum_bytes_to_leaves(fs_info, csum_bytes) *
-                                                       fs_info->nodesize;
-       num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(fs_info,
-                                                            num_dirty_bgs);
-       global_rsv = &fs_info->global_block_rsv;
+       spin_lock(&global_rsv->lock);
+       reserved = global_rsv->reserved;
+       spin_unlock(&global_rsv->lock);
 
        /*
-        * If we can't allocate any more chunks lets make sure we have _lots_ of
-        * wiggle room since running delayed refs can create more delayed refs.
+        * Since the global reserve is just kind of magic we don't really want
+        * to rely on it to save our bacon, so if our size is more than the
+        * delayed_refs_rsv and the global rsv then it's time to think about
+        * bailing.
         */
-       if (global_rsv->space_info->full) {
-               num_dirty_bgs_bytes <<= 1;
-               num_bytes <<= 1;
-       }
-
-       spin_lock(&global_rsv->lock);
-       if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
-               ret = 1;
-       spin_unlock(&global_rsv->lock);
+       spin_lock(&delayed_refs_rsv->lock);
+       reserved += delayed_refs_rsv->reserved;
+       if (delayed_refs_rsv->size >= reserved)
+               ret = true;
+       spin_unlock(&delayed_refs_rsv->lock);
        return ret;
 }
 
@@ -2891,7 +2879,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
        if (val >= NSEC_PER_SEC / 2)
                return 2;
 
-       return btrfs_check_space_for_delayed_refs(trans);
+       return btrfs_check_space_for_delayed_refs(trans->fs_info);
 }
 
 struct async_delayed_refs {
index 3c71fcd402d004315737fb07aaa8afc019c6af0d..b6c6d8862b80072c12ff24959f0e33c5972844dd 100644 (file)
@@ -5331,8 +5331,8 @@ static struct btrfs_trans_handle *evict_refill_and_join(struct btrfs_root *root,
                 * Try to steal from the global reserve if there is space for
                 * it.
                 */
-               if (!btrfs_check_space_for_delayed_refs(trans) &&
-                   !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, false))
+               if (!btrfs_check_space_for_delayed_refs(fs_info) &&
+                   !btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0))
                        return trans;
 
                /* If not, commit and try again. */
index e18eb75e6fa36ca3dba738d116c38e84dd3a8dae..8154b64cc4de4a8de093e213d153326acdbf4f42 100644 (file)
@@ -789,7 +789,7 @@ static int should_end_transaction(struct btrfs_trans_handle *trans)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
 
-       if (btrfs_check_space_for_delayed_refs(trans))
+       if (btrfs_check_space_for_delayed_refs(fs_info))
                return 1;
 
        return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);