unsigned long cluster = max_cluster;
u64 new_align = ~((u64)SZ_128K - 1);
struct page **pages = NULL;
+ bool do_compress = range->flags & BTRFS_DEFRAG_RANGE_COMPRESS;
if (isize == 0)
return 0;
if (range->start >= isize)
return -EINVAL;
- if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
+ if (do_compress) {
if (range->compress_type > BTRFS_COMPRESS_TYPES)
return -EINVAL;
if (range->compress_type)
if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT,
extent_thresh, &last_len, &skip,
- &defrag_end, range->flags &
- BTRFS_DEFRAG_RANGE_COMPRESS)) {
+ &defrag_end, do_compress)){
unsigned long next;
/*
* the should_defrag function tells us how much to skip
}
inode_lock(inode);
- if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
+ if (do_compress)
BTRFS_I(inode)->defrag_compress = compress_type;
ret = cluster_pages_for_defrag(inode, pages, i, cluster);
if (ret < 0) {
filemap_flush(inode->i_mapping);
}
- if ((range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
+ if (do_compress) {
/* the filemap_flush will queue IO into the worker threads, but
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
ret = defrag_count;
out_ra:
- if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
+ if (do_compress) {
inode_lock(inode);
BTRFS_I(inode)->defrag_compress = BTRFS_COMPRESS_NONE;
inode_unlock(inode);