Btrfs: remove nr_async_submits and async_submit_draining
authorLiu Bo <bo.li.liu@oracle.com>
Thu, 7 Sep 2017 17:22:22 +0000 (11:22 -0600)
committerDavid Sterba <dsterba@suse.com>
Mon, 30 Oct 2017 11:27:59 +0000 (12:27 +0100)
Now that we have the combo of flushing twice, which can make sure IO
have started since the second flush will wait for page lock which
won't be unlocked unless setting page writeback and queuing ordered
extents, we don't need %async_submit_draining, %async_delalloc_pages
and %nr_async_submits to tell whether the IO has actually started.

Moreover, all the flushers in use are followed by functions that wait
for ordered extents to complete, so %nr_async_submits, which tracks
whether bio's async submit has made progress, doesn't really make
sense.

However, %async_delalloc_pages is still required by shrink_delalloc()
as that function doesn't flush twice in the normal case (just issues a
writeback with WB_REASON_FS_FREE_SPACE).

Signed-off-by: Liu Bo <bo.li.liu@oracle.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/inode.c

index bc1b6a0337003a95d13866e51fcb046128ab1a6d..7995666af95983e3fa4833caf7ab8d703054d605 100644 (file)
@@ -878,8 +878,6 @@ struct btrfs_fs_info {
        rwlock_t tree_mod_log_lock;
        struct rb_root tree_mod_log;
 
-       atomic_t nr_async_submits;
-       atomic_t async_submit_draining;
        atomic_t async_delalloc_pages;
        atomic_t open_ioctl_trans;
 
index 2b18b0c379c1bdd9689e25693eeaf7e59894fac1..f3e6e8fa19b0665d565a48a6b7e04b813bfea532 100644 (file)
@@ -740,22 +740,9 @@ static void run_one_async_start(struct btrfs_work *work)
 
 static void run_one_async_done(struct btrfs_work *work)
 {
-       struct btrfs_fs_info *fs_info;
        struct async_submit_bio *async;
-       int limit;
 
        async = container_of(work, struct  async_submit_bio, work);
-       fs_info = async->fs_info;
-
-       limit = btrfs_async_submit_limit(fs_info);
-       limit = limit * 2 / 3;
-
-       /*
-        * atomic_dec_return implies a barrier for waitqueue_active
-        */
-       if (atomic_dec_return(&fs_info->nr_async_submits) < limit &&
-           waitqueue_active(&fs_info->async_submit_wait))
-               wake_up(&fs_info->async_submit_wait);
 
        /* If an error occurred we just want to clean up the bio and move on */
        if (async->status) {
@@ -803,19 +790,10 @@ blk_status_t btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
 
        async->status = 0;
 
-       atomic_inc(&fs_info->nr_async_submits);
-
        if (op_is_sync(bio->bi_opf))
                btrfs_set_work_high_priority(&async->work);
 
        btrfs_queue_work(fs_info->workers, &async->work);
-
-       while (atomic_read(&fs_info->async_submit_draining) &&
-             atomic_read(&fs_info->nr_async_submits)) {
-               wait_event(fs_info->async_submit_wait,
-                          (atomic_read(&fs_info->nr_async_submits) == 0));
-       }
-
        return 0;
 }
 
@@ -2515,9 +2493,7 @@ int open_ctree(struct super_block *sb,
        btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY);
        btrfs_init_block_rsv(&fs_info->delayed_block_rsv,
                             BTRFS_BLOCK_RSV_DELOPS);
-       atomic_set(&fs_info->nr_async_submits, 0);
        atomic_set(&fs_info->async_delalloc_pages, 0);
-       atomic_set(&fs_info->async_submit_draining, 0);
        atomic_set(&fs_info->defrag_running, 0);
        atomic_set(&fs_info->qgroup_op_seq, 0);
        atomic_set(&fs_info->reada_works_cnt, 0);
index aaf36f78fa078ac48023a3e694b798738a4e41c5..4ddb299af4728c452335f2a2ac3d93bcc86fba98 100644 (file)
@@ -1222,13 +1222,6 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
 
                btrfs_queue_work(fs_info->delalloc_workers, &async_cow->work);
 
-               while (atomic_read(&fs_info->async_submit_draining) &&
-                      atomic_read(&fs_info->async_delalloc_pages)) {
-                       wait_event(fs_info->async_submit_wait,
-                                  (atomic_read(&fs_info->async_delalloc_pages) ==
-                                   0));
-               }
-
                *nr_written += nr_pages;
                start = cur_end + 1;
        }
@@ -10332,19 +10325,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
        ret = __start_delalloc_inodes(root, delay_iput, -1);
        if (ret > 0)
                ret = 0;
-       /*
-        * the filemap_flush will queue IO into the worker threads, but
-        * we have to make sure the IO is actually started and that
-        * ordered extents get created before we return
-        */
-       atomic_inc(&fs_info->async_submit_draining);
-       while (atomic_read(&fs_info->nr_async_submits) ||
-              atomic_read(&fs_info->async_delalloc_pages)) {
-               wait_event(fs_info->async_submit_wait,
-                          (atomic_read(&fs_info->nr_async_submits) == 0 &&
-                           atomic_read(&fs_info->async_delalloc_pages) == 0));
-       }
-       atomic_dec(&fs_info->async_submit_draining);
        return ret;
 }
 
@@ -10386,14 +10366,6 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
        spin_unlock(&fs_info->delalloc_root_lock);
 
        ret = 0;
-       atomic_inc(&fs_info->async_submit_draining);
-       while (atomic_read(&fs_info->nr_async_submits) ||
-             atomic_read(&fs_info->async_delalloc_pages)) {
-               wait_event(fs_info->async_submit_wait,
-                  (atomic_read(&fs_info->nr_async_submits) == 0 &&
-                   atomic_read(&fs_info->async_delalloc_pages) == 0));
-       }
-       atomic_dec(&fs_info->async_submit_draining);
 out:
        if (!list_empty_careful(&splice)) {
                spin_lock(&fs_info->delalloc_root_lock);