* are written in the same order that the flusher thread sent them
* down.
*/
-static noinline void compress_file_range(struct async_chunk *async_chunk,
- int *num_added)
+static noinline int compress_file_range(struct async_chunk *async_chunk)
{
struct inode *inode = async_chunk->inode;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int i;
int will_compress;
int compress_type = fs_info->compress_type;
+ int compressed_extents = 0;
int redirty = 0;
inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1,
*/
total_in = ALIGN(total_in, PAGE_SIZE);
if (total_compressed + blocksize <= total_in) {
- *num_added += 1;
+ compressed_extents++;
/*
* The async work queues will take care of doing actual
cond_resched();
goto again;
}
- return;
+ return compressed_extents;
}
}
if (pages) {
extent_range_redirty_for_io(inode, start, end);
add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0,
BTRFS_COMPRESS_NONE);
- *num_added += 1;
+ compressed_extents++;
- return;
+ return compressed_extents;
free_pages_out:
for (i = 0; i < nr_pages; i++) {
put_page(pages[i]);
}
kfree(pages);
+
+ return 0;
}
static void free_async_extent_pages(struct async_extent *async_extent)
static noinline void async_cow_start(struct btrfs_work *work)
{
struct async_chunk *async_chunk;
- int num_added = 0;
+ int compressed_extents;
async_chunk = container_of(work, struct async_chunk, work);
- compress_file_range(async_chunk, &num_added);
- if (num_added == 0) {
+ compressed_extents = compress_file_range(async_chunk);
+ if (compressed_extents == 0) {
btrfs_add_delayed_iput(async_chunk->inode);
async_chunk->inode = NULL;
}