btrfs: Optimize unallocated chunks discard
authorNikolay Borisov <nborisov@suse.com>
Wed, 27 Mar 2019 12:24:16 +0000 (14:24 +0200)
committerDavid Sterba <dsterba@suse.com>
Mon, 29 Apr 2019 17:02:38 +0000 (19:02 +0200)
Currently unallocated chunks are always trimmed. For example
2 consecutive trims on large storage would trim freespace twice
irrespective of whether the space was actually allocated or not between
those trims.

Optimise this behavior by exploiting the newly introduced alloc_state
tree of btrfs_device. A new CHUNK_TRIMMED bit is used to mark
those unallocated chunks which have been trimmed and have not been
allocated afterwards. On chunk allocation the respective underlying devices'
physical space will have its CHUNK_TRIMMED flag cleared. This avoids
submitting discards for space which hasn't been changed since the last
time discard was issued.

This applies to the single mount period of the filesystem as the
information is not stored permanently.

Signed-off-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.h
fs/btrfs/extent_map.c

index 7500728bcdd390b3d2a226f69da2d0121576049a..1ebac1982a9cda2e504cfeb0745ba81c673f5486 100644 (file)
@@ -11249,6 +11249,54 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
        return unpin_extent_range(fs_info, start, end, false);
 }
 
+static bool should_skip_trim(struct btrfs_device *device, u64 *start, u64 *len)
+{
+       u64 trimmed_start = 0, trimmed_end = 0;
+       u64 end = *start + *len - 1;
+
+       if (!find_first_extent_bit(&device->alloc_state, *start, &trimmed_start,
+                                  &trimmed_end, CHUNK_TRIMMED, NULL)) {
+               u64 trimmed_len = trimmed_end - trimmed_start + 1;
+
+               if (*start < trimmed_start) {
+                       if (in_range(end, trimmed_start, trimmed_len) ||
+                           end > trimmed_end) {
+                               /*
+                                * start|------|end
+                                *      ts|--|trimmed_len
+                                *      OR
+                                * start|-----|end
+                                *      ts|-----|trimmed_len
+                                */
+                               *len = trimmed_start - *start;
+                               return false;
+                       } else if (end < trimmed_start) {
+                               /*
+                                * start|------|end
+                                *             ts|--|trimmed_len
+                                */
+                               return false;
+                       }
+               } else if (in_range(*start, trimmed_start, trimmed_len)) {
+                       if (in_range(end, trimmed_start, trimmed_len)) {
+                               /*
+                                * start|------|end
+                                *  ts|----------|trimmed_len
+                                */
+                               return true;
+                       } else {
+                               /*
+                                * start|-----------|end
+                                *  ts|----------|trimmed_len
+                                */
+                               *start = trimmed_end + 1;
+                               *len = end - *start + 1;
+                               return false;
+                       }
+               }
+       }
+       return false;
+}
 /*
  * It used to be that old block groups would be left around forever.
  * Iterating over them would be enough to trim unused space.  Since we
@@ -11319,7 +11367,14 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
                start = max(range->start, start);
                len = min(range->len, len);
 
-               ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
+               if (!should_skip_trim(device, &start, &len)) {
+                       ret = btrfs_issue_discard(device->bdev, start, len,
+                                                 &bytes);
+                       if (!ret)
+                               set_extent_bits(&device->alloc_state, start,
+                                               start + bytes - 1,
+                                               CHUNK_TRIMMED);
+               }
                mutex_unlock(&fs_info->chunk_mutex);
 
                if (ret)
index 6435c2818ec385326327f05ff0a4970d27992045..1680832d2c88ecefbc87eb71970b4886941184b6 100644 (file)
                                 EXTENT_CLEAR_DATA_RESV)
 #define EXTENT_CTLBITS         (EXTENT_DO_ACCOUNTING)
 
-/* Redefined bits above which are used only in the device allocation tree */
+/*
+ * Redefined bits above which are used only in the device allocation tree,
+ * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
+ * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
+ * manipulation functions
+ */
 #define CHUNK_ALLOCATED EXTENT_DIRTY
+#define CHUNK_TRIMMED   EXTENT_DEFRAG
 
 /*
  * flags for bio submission. The high bits indicate the compression
index 5a79a656dfa6481ac4579ae3832dc898b168f15d..9558d79faf1e4c3d1d9931e5f600f3732463fed3 100644 (file)
@@ -389,8 +389,10 @@ int add_extent_mapping(struct extent_map_tree *tree,
                goto out;
 
        setup_extent_mapping(tree, em, modified);
-       if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
+       if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags)) {
                extent_map_device_set_bits(em, CHUNK_ALLOCATED);
+               extent_map_device_clear_bits(em, CHUNK_TRIMMED);
+       }
 out:
        return ret;
 }