btrfs: move basic block_group definitions to their own header
authorJosef Bacik <josef@toxicpanda.com>
Thu, 20 Jun 2019 19:37:44 +0000 (15:37 -0400)
committerDavid Sterba <dsterba@suse.com>
Mon, 9 Sep 2019 12:59:03 +0000 (14:59 +0200)
This is prep work for moving all of the block group cache code into its
own file.

Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
[ minor comment updates ]
Signed-off-by: David Sterba <dsterba@suse.com>
21 files changed:
fs/btrfs/block-group.h [new file with mode: 0644]
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/btrfs/free-space-tree.c
fs/btrfs/free-space-tree.h
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/reada.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/space-info.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/free-space-tests.c
fs/btrfs/tests/free-space-tree-tests.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c

diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
new file mode 100644 (file)
index 0000000..0547450
--- /dev/null
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef BTRFS_BLOCK_GROUP_H
+#define BTRFS_BLOCK_GROUP_H
+
+enum btrfs_disk_cache_state {
+       BTRFS_DC_WRITTEN,
+       BTRFS_DC_ERROR,
+       BTRFS_DC_CLEAR,
+       BTRFS_DC_SETUP,
+};
+
+struct btrfs_caching_control {
+       struct list_head list;
+       struct mutex mutex;
+       wait_queue_head_t wait;
+       struct btrfs_work work;
+       struct btrfs_block_group_cache *block_group;
+       u64 progress;
+       refcount_t count;
+};
+
+/* Once caching_thread() finds this much free space, it will wake up waiters. */
+#define CACHING_CTL_WAKE_UP SZ_2M
+
+struct btrfs_block_group_cache {
+       struct btrfs_key key;
+       struct btrfs_block_group_item item;
+       struct btrfs_fs_info *fs_info;
+       struct inode *inode;
+       spinlock_t lock;
+       u64 pinned;
+       u64 reserved;
+       u64 delalloc_bytes;
+       u64 bytes_super;
+       u64 flags;
+       u64 cache_generation;
+
+       /*
+        * If the free space extent count exceeds this number, convert the block
+        * group to bitmaps.
+        */
+       u32 bitmap_high_thresh;
+
+       /*
+        * If the free space extent count drops below this number, convert the
+        * block group back to extents.
+        */
+       u32 bitmap_low_thresh;
+
+       /*
+        * It is just used for the delayed data space allocation because
+        * only the data space allocation and the relative metadata update
+        * can be done cross the transaction.
+        */
+       struct rw_semaphore data_rwsem;
+
+       /* For raid56, this is a full stripe, without parity */
+       unsigned long full_stripe_len;
+
+       unsigned int ro;
+       unsigned int iref:1;
+       unsigned int has_caching_ctl:1;
+       unsigned int removed:1;
+
+       int disk_cache_state;
+
+       /* Cache tracking stuff */
+       int cached;
+       struct btrfs_caching_control *caching_ctl;
+       u64 last_byte_to_unpin;
+
+       struct btrfs_space_info *space_info;
+
+       /* Free space cache stuff */
+       struct btrfs_free_space_ctl *free_space_ctl;
+
+       /* Block group cache stuff */
+       struct rb_node cache_node;
+
+       /* For block groups in the same raid type */
+       struct list_head list;
+
+       /* Usage count */
+       atomic_t count;
+
+       /*
+        * List of struct btrfs_free_clusters for this block group.
+        * Today it will only have one thing on it, but that may change
+        */
+       struct list_head cluster_list;
+
+       /* For delayed block group creation or deletion of empty block groups */
+       struct list_head bg_list;
+
+       /* For read-only block groups */
+       struct list_head ro_list;
+
+       atomic_t trimming;
+
+       /* For dirty block groups */
+       struct list_head dirty_list;
+       struct list_head io_list;
+
+       struct btrfs_io_ctl io_ctl;
+
+       /*
+        * Incremented when doing extent allocations and holding a read lock
+        * on the space_info's groups_sem semaphore.
+        * Decremented when an ordered extent that represents an IO against this
+        * block group's range is created (after it's added to its inode's
+        * root's list of ordered extents) or immediately after the allocation
+        * if it's a metadata extent or fallocate extent (for these cases we
+        * don't create ordered extents).
+        */
+       atomic_t reservations;
+
+       /*
+        * Incremented while holding the spinlock *lock* by a task checking if
+        * it can perform a nocow write (incremented if the value for the *ro*
+        * field is 0). Decremented by such tasks once they create an ordered
+        * extent or before that if some error happens before reaching that step.
+        * This is to prevent races between block group relocation and nocow
+        * writes through direct IO.
+        */
+       atomic_t nocow_writers;
+
+       /* Lock for free space tree operations. */
+       struct mutex free_space_lock;
+
+       /*
+        * Does the block group need to be added to the free space tree?
+        * Protected by free_space_lock.
+        */
+       int needs_free_space;
+
+       /* Record locked full stripes for RAID5/6 block group */
+       struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
+};
+
+#ifdef CONFIG_BTRFS_DEBUG
+static inline int btrfs_should_fragment_free_space(
+               struct btrfs_block_group_cache *block_group)
+{
+       struct btrfs_fs_info *fs_info = block_group->fs_info;
+
+       return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
+               block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+              (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
+               block_group->flags &  BTRFS_BLOCK_GROUP_DATA);
+}
+#endif
+
+#endif /* BTRFS_BLOCK_GROUP_H */
index 88042497dbeccefb7d0e2c7a419070ae516847ad..e95fdd1d9dd219fbf49da41f841c92cc45aa9fdd 100644 (file)
@@ -39,6 +39,7 @@ struct btrfs_transaction;
 struct btrfs_pending_snapshot;
 struct btrfs_delayed_ref_root;
 struct btrfs_space_info;
+struct btrfs_block_group_cache;
 extern struct kmem_cache *btrfs_trans_handle_cachep;
 extern struct kmem_cache *btrfs_bit_radix_cachep;
 extern struct kmem_cache *btrfs_path_cachep;
@@ -439,26 +440,6 @@ enum btrfs_caching_type {
        BTRFS_CACHE_ERROR,
 };
 
-enum btrfs_disk_cache_state {
-       BTRFS_DC_WRITTEN,
-       BTRFS_DC_ERROR,
-       BTRFS_DC_CLEAR,
-       BTRFS_DC_SETUP,
-};
-
-struct btrfs_caching_control {
-       struct list_head list;
-       struct mutex mutex;
-       wait_queue_head_t wait;
-       struct btrfs_work work;
-       struct btrfs_block_group_cache *block_group;
-       u64 progress;
-       refcount_t count;
-};
-
-/* Once caching_thread() finds this much free space, it will wake up waiters. */
-#define CACHING_CTL_WAKE_UP SZ_2M
-
 struct btrfs_io_ctl {
        void *cur, *orig;
        struct page *page;
@@ -481,120 +462,6 @@ struct btrfs_full_stripe_locks_tree {
        struct mutex lock;
 };
 
-struct btrfs_block_group_cache {
-       struct btrfs_key key;
-       struct btrfs_block_group_item item;
-       struct btrfs_fs_info *fs_info;
-       struct inode *inode;
-       spinlock_t lock;
-       u64 pinned;
-       u64 reserved;
-       u64 delalloc_bytes;
-       u64 bytes_super;
-       u64 flags;
-       u64 cache_generation;
-
-       /*
-        * If the free space extent count exceeds this number, convert the block
-        * group to bitmaps.
-        */
-       u32 bitmap_high_thresh;
-
-       /*
-        * If the free space extent count drops below this number, convert the
-        * block group back to extents.
-        */
-       u32 bitmap_low_thresh;
-
-       /*
-        * It is just used for the delayed data space allocation because
-        * only the data space allocation and the relative metadata update
-        * can be done cross the transaction.
-        */
-       struct rw_semaphore data_rwsem;
-
-       /* for raid56, this is a full stripe, without parity */
-       unsigned long full_stripe_len;
-
-       unsigned int ro;
-       unsigned int iref:1;
-       unsigned int has_caching_ctl:1;
-       unsigned int removed:1;
-
-       int disk_cache_state;
-
-       /* cache tracking stuff */
-       int cached;
-       struct btrfs_caching_control *caching_ctl;
-       u64 last_byte_to_unpin;
-
-       struct btrfs_space_info *space_info;
-
-       /* free space cache stuff */
-       struct btrfs_free_space_ctl *free_space_ctl;
-
-       /* block group cache stuff */
-       struct rb_node cache_node;
-
-       /* for block groups in the same raid type */
-       struct list_head list;
-
-       /* usage count */
-       atomic_t count;
-
-       /* List of struct btrfs_free_clusters for this block group.
-        * Today it will only have one thing on it, but that may change
-        */
-       struct list_head cluster_list;
-
-       /* For delayed block group creation or deletion of empty block groups */
-       struct list_head bg_list;
-
-       /* For read-only block groups */
-       struct list_head ro_list;
-
-       atomic_t trimming;
-
-       /* For dirty block groups */
-       struct list_head dirty_list;
-       struct list_head io_list;
-
-       struct btrfs_io_ctl io_ctl;
-
-       /*
-        * Incremented when doing extent allocations and holding a read lock
-        * on the space_info's groups_sem semaphore.
-        * Decremented when an ordered extent that represents an IO against this
-        * block group's range is created (after it's added to its inode's
-        * root's list of ordered extents) or immediately after the allocation
-        * if it's a metadata extent or fallocate extent (for these cases we
-        * don't create ordered extents).
-        */
-       atomic_t reservations;
-
-       /*
-        * Incremented while holding the spinlock *lock* by a task checking if
-        * it can perform a nocow write (incremented if the value for the *ro*
-        * field is 0). Decremented by such tasks once they create an ordered
-        * extent or before that if some error happens before reaching that step.
-        * This is to prevent races between block group relocation and nocow
-        * writes through direct IO.
-        */
-       atomic_t nocow_writers;
-
-       /* Lock for free space tree operations. */
-       struct mutex free_space_lock;
-
-       /*
-        * Does the block group need to be added to the free space tree?
-        * Protected by free_space_lock.
-        */
-       int needs_free_space;
-
-       /* Record locked full stripes for RAID5/6 block group */
-       struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
-};
-
 /* delayed seq elem */
 struct seq_list {
        struct list_head list;
@@ -1387,19 +1254,6 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
        btrfs_clear_opt(fs_info->mount_opt, opt);                       \
 }
 
-#ifdef CONFIG_BTRFS_DEBUG
-static inline int
-btrfs_should_fragment_free_space(struct btrfs_block_group_cache *block_group)
-{
-       struct btrfs_fs_info *fs_info = block_group->fs_info;
-
-       return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
-               block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
-              (btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
-               block_group->flags &  BTRFS_BLOCK_GROUP_DATA);
-}
-#endif
-
 /*
  * Requests for changes that need to be done during transaction commit.
  *
index 97beb351a10cdc0117bc41b5dade4d01eb916093..589405eeb80fe563d6ed3428904b314bcffb041d 100644 (file)
@@ -40,6 +40,7 @@
 #include "compression.h"
 #include "tree-checker.h"
 #include "ref-verify.h"
+#include "block-group.h"
 
 #define BTRFS_SUPER_FLAG_SUPP  (BTRFS_HEADER_FLAG_WRITTEN |\
                                 BTRFS_HEADER_FLAG_RELOC |\
index 0bb095bda01b72c4080e4897133b5169d04acd86..f28697131f221b4740779f32a2de5df18d7fb3fd 100644 (file)
@@ -32,6 +32,7 @@
 #include "space-info.h"
 #include "block-rsv.h"
 #include "delalloc-space.h"
+#include "block-group.h"
 
 #undef SCRAMBLE_DELAYED_REFS
 
index 92cb06dd94d39f4506ea1e3cd8d0ef2e01ac6ab3..faaf57a7c289a51a7cc3495e1f08577ecb807795 100644 (file)
@@ -20,6 +20,7 @@
 #include "volumes.h"
 #include "space-info.h"
 #include "delalloc-space.h"
+#include "block-group.h"
 
 #define BITS_PER_BITMAP                (PAGE_SIZE * 8UL)
 #define MAX_CACHE_BYTES_PER_GIG        SZ_32K
index f5dc115ebba069f70b74131b7627daeeee989f9a..48a03f5240f59bccd6a45d40023f904cf948e7ac 100644 (file)
@@ -10,6 +10,7 @@
 #include "locking.h"
 #include "free-space-tree.h"
 #include "transaction.h"
+#include "block-group.h"
 
 static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
                                        struct btrfs_block_group_cache *block_group,
index 22b7602bde253f6d5ec6182d765d3d581c3ee520..360d50e1cdeabe3520b261c8b64fba4b1e8a13c5 100644 (file)
@@ -6,6 +6,8 @@
 #ifndef BTRFS_FREE_SPACE_TREE_H
 #define BTRFS_FREE_SPACE_TREE_H
 
+struct btrfs_caching_control;
+
 /*
  * The default size for new free space bitmap items. The last bitmap in a block
  * group may be truncated, and none of the free space tree code assumes that
index 20963b6567aef26c9ee010f62a9198e6ca2c6fc0..612c25aac15cd2da7a83e99d9ad6453466f440a5 100644 (file)
@@ -47,6 +47,7 @@
 #include "props.h"
 #include "qgroup.h"
 #include "delalloc-space.h"
+#include "block-group.h"
 
 struct btrfs_iget_args {
        struct btrfs_key *location;
index ccac62d40dd2dd4d975e34055f2af44813900f0f..b431f7877e8861a3c3a4865b77afbb705afc1519 100644 (file)
@@ -45,6 +45,7 @@
 #include "compression.h"
 #include "space-info.h"
 #include "delalloc-space.h"
+#include "block-group.h"
 
 #ifdef CONFIG_64BIT
 /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI
index f8a3c1b0a15a81d5cb3a9ea5cdd9c6205fe32d96..a960e33525ba780c08a07c073172675488cab0ae 100644 (file)
@@ -21,7 +21,7 @@
 #include "backref.h"
 #include "extent_io.h"
 #include "qgroup.h"
-
+#include "block-group.h"
 
 /* TODO XXX FIXME
  *  - subvol delete -> delete when ref goes to 0? delete limits also?
index bb5bd49573b4cc116274d92d0bef180b0c1ec19f..0b034c4943556bb2b4e1d27542afdf475b925049 100644 (file)
@@ -14,6 +14,7 @@
 #include "disk-io.h"
 #include "transaction.h"
 #include "dev-replace.h"
+#include "block-group.h"
 
 #undef DEBUG
 
index 7ec632d4d9607800aaa189e2fa340b517ac57c0c..2f0e25afa48605f1ff0938271c18f3f49cc95ce2 100644 (file)
@@ -21,6 +21,7 @@
 #include "qgroup.h"
 #include "print-tree.h"
 #include "delalloc-space.h"
+#include "block-group.h"
 
 /*
  * backref_node, mapping_node and tree_block start with this
index 0c99cf9fb595c25e8954223ef27ccc2da33f3777..f7d4e03f4c5d5ba78b5fd8e01b8ed3be3ea274a1 100644 (file)
@@ -18,6 +18,7 @@
 #include "check-integrity.h"
 #include "rcu-string.h"
 #include "raid56.h"
+#include "block-group.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
index ab7b9ec4c240a6ea0b32d08a6027d28078b85363..9dbb9c5f82b11a22ee3e692b2db39fb1d1fc0b5a 100644 (file)
@@ -8,6 +8,7 @@
 #include "ordered-data.h"
 #include "transaction.h"
 #include "math.h"
+#include "block-group.h"
 
 u64 btrfs_space_info_used(struct btrfs_space_info *s_info,
                          bool may_use_included)
index 10bc7e6cca750e51baff4c8733eb8c428d371f1c..16c7af333d3ac46b9549af87982fe8cdb7329cfd 100644 (file)
@@ -44,6 +44,7 @@
 #include "backref.h"
 #include "space-info.h"
 #include "tests/btrfs-tests.h"
+#include "block-group.h"
 
 #include "qgroup.h"
 #define CREATE_TRACE_POINTS
index 9539f8143b7a5c2d433184536a049948fc8bcdb4..271e7e714920ba8f27f55f0a29fa45f17bfe81a7 100644 (file)
@@ -17,6 +17,7 @@
 #include "sysfs.h"
 #include "volumes.h"
 #include "space-info.h"
+#include "block-group.h"
 
 static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj);
 static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj);
index 1e3ba4949399536929b2d6a951d04aec69357fb5..b5e80563efaa4fd616e6a1927dd35592e1bac118 100644 (file)
@@ -15,6 +15,7 @@
 #include "../volumes.h"
 #include "../disk-io.h"
 #include "../qgroup.h"
+#include "../block-group.h"
 
 static struct vfsmount *test_mnt = NULL;
 
index af89f66f9e6365663b1c2a8453241e6010f8630f..43ec7060fcd2982f7a49aec2ae97306fa1d6e663 100644 (file)
@@ -8,6 +8,7 @@
 #include "../ctree.h"
 #include "../disk-io.h"
 #include "../free-space-cache.h"
+#include "../block-group.h"
 
 #define BITS_PER_BITMAP                (PAGE_SIZE * 8UL)
 
index a90dad166971bdcfa619e2c39308bf10963abaf0..bc92df9776308d0e309f61747b0343ac9b5c3985 100644 (file)
@@ -9,6 +9,7 @@
 #include "../disk-io.h"
 #include "../free-space-tree.h"
 #include "../transaction.h"
+#include "../block-group.h"
 
 struct free_space_extent {
        u64 start;
index e3adb714c04b364869e9da5c36e2f9fa0157b7fb..2e3f6778bfa34cac40296e1202acbef5be23863b 100644 (file)
@@ -19,6 +19,7 @@
 #include "volumes.h"
 #include "dev-replace.h"
 #include "qgroup.h"
+#include "block-group.h"
 
 #define BTRFS_ROOT_TRANS_TAG 0
 
index ef3e5b4f88bef174f04630f20667063e28bf3e96..cb9dcdffe43444682b31208a003204a7d32e8951 100644 (file)
@@ -29,6 +29,7 @@
 #include "sysfs.h"
 #include "tree-checker.h"
 #include "space-info.h"
+#include "block-group.h"
 
 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
        [BTRFS_RAID_RAID10] = {