return 0;
}
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, affs_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block);
if (ret < 0 && (rw & WRITE))
affs_write_failed(mapping, offset + count);
return ret;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- return __blockdev_direct_IO(rw, iocb, inode, I_BDEV(inode), iter,
- offset, blkdev_get_block,
- NULL, NULL, 0);
+ return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset,
+ blkdev_get_block, NULL, NULL, 0);
}
int __sync_blockdev(struct block_device *bdev, int wait)
wakeup = false;
}
- ret = __blockdev_direct_IO(rw, iocb, inode,
- BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
- iter, offset, btrfs_get_blocks_direct, NULL,
- btrfs_submit_direct, flags);
+ ret = __blockdev_direct_IO(iocb, inode,
+ BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+ iter, offset, btrfs_get_blocks_direct, NULL,
+ btrfs_submit_direct, flags);
if (rw & WRITE) {
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED)
* for the whole file.
*/
static inline ssize_t
-do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
- get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags)
+do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ loff_t offset, get_block_t get_block, dio_iodone_t end_io,
+ dio_submit_t submit_io, int flags)
{
unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits);
unsigned blkbits = i_blkbits;
struct blk_plug plug;
unsigned long align = offset | iov_iter_alignment(iter);
- if (rw & WRITE)
- rw = WRITE_ODIRECT;
-
/*
* Avoid references to bdev if not absolutely needed to give
* the early prefetch in the caller enough time.
}
/* watch out for a 0 len io from a tricksy fs */
- if (rw == READ && !iov_iter_count(iter))
+ if (iov_iter_rw(iter) == READ && !iov_iter_count(iter))
return 0;
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
dio->flags = flags;
if (dio->flags & DIO_LOCKING) {
- if (rw == READ) {
+ if (iov_iter_rw(iter) == READ) {
struct address_space *mapping =
iocb->ki_filp->f_mapping;
if (is_sync_kiocb(iocb))
dio->is_async = false;
else if (!(dio->flags & DIO_ASYNC_EXTEND) &&
- (rw & WRITE) && end > i_size_read(inode))
+ iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
dio->is_async = false;
else
dio->is_async = true;
dio->inode = inode;
- dio->rw = rw;
+ dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
/*
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
* so that we can call ->fsync.
*/
- if (dio->is_async && (rw & WRITE) &&
+ if (dio->is_async && iov_iter_rw(iter) == WRITE &&
((iocb->ki_filp->f_flags & O_DSYNC) ||
IS_SYNC(iocb->ki_filp->f_mapping->host))) {
retval = dio_set_defer_completion(dio);
* we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
- if (rw == READ && (dio->flags & DIO_LOCKING))
+ if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
mutex_unlock(&dio->inode->i_mutex);
/*
*/
BUG_ON(retval == -EIOCBQUEUED);
if (dio->is_async && retval == 0 && dio->result &&
- (rw == READ || dio->result == count))
+ (iov_iter_rw(iter) == READ || dio->result == count))
retval = -EIOCBQUEUED;
else
dio_await_completion(dio);
return retval;
}
-ssize_t
-__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
- get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags)
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ loff_t offset, get_block_t get_block,
+ dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags)
{
/*
* The block device state is needed in the end to finally
prefetch(bdev->bd_queue);
prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES);
- return do_blockdev_direct_IO(rw, iocb, inode, bdev, iter, offset,
- get_block, end_io, submit_io, flags);
+ return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block,
+ end_io, submit_io, flags);
}
EXPORT_SYMBOL(__blockdev_direct_IO);
ret = dax_do_io(rw, iocb, inode, iter, offset, ext2_get_block,
NULL, DIO_LOCKING);
else
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
+ ret = blockdev_direct_IO(iocb, inode, iter, offset,
ext2_get_block);
if (ret < 0 && (rw & WRITE))
ext2_write_failed(mapping, offset + count);
}
retry:
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, ext3_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, ext3_get_block);
/*
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
ret = dax_do_io(rw, iocb, inode, iter, offset,
ext4_get_block, NULL, 0);
else
- ret = __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev, iter, offset,
- ext4_get_block, NULL, NULL, 0);
+ ret = __blockdev_direct_IO(iocb, inode,
+ inode->i_sb->s_bdev, iter,
+ offset, ext4_get_block, NULL,
+ NULL, 0);
inode_dio_done(inode);
} else {
locked:
ret = dax_do_io(rw, iocb, inode, iter, offset,
ext4_get_block, NULL, DIO_LOCKING);
else
- ret = blockdev_direct_IO(rw, iocb, inode, iter,
- offset, ext4_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset,
+ ext4_get_block);
if (unlikely((rw & WRITE) && ret < 0)) {
loff_t isize = i_size_read(inode);
ret = dax_do_io(rw, iocb, inode, iter, offset, get_block_func,
ext4_end_io_dio, dio_flags);
else
- ret = __blockdev_direct_IO(rw, iocb, inode,
+ ret = __blockdev_direct_IO(iocb, inode,
inode->i_sb->s_bdev, iter, offset,
get_block_func,
ext4_end_io_dio, NULL, dio_flags);
if (rw & WRITE)
__allocate_data_blocks(inode, offset, count);
- err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
+ err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block);
if (err < 0 && (rw & WRITE))
f2fs_write_failed(mapping, offset + count);
* FAT need to use the DIO_LOCKING for avoiding the race
* condition of fat_get_block() and ->truncate().
*/
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, fat_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block);
if (ret < 0 && (rw & WRITE))
fat_write_failed(mapping, offset + count);
truncate_inode_pages_range(mapping, lstart, end);
}
- rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev,
- iter, offset,
- gfs2_get_block_direct, NULL, NULL, 0);
+ rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+ offset, gfs2_get_block_direct, NULL, NULL, 0);
out:
gfs2_glock_dq(&gh);
gfs2_holder_uninit(&gh);
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, hfs_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block);
/*
* In case of error extending write may have instantiated a few
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
- hfsplus_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block);
/*
* In case of error extending write may have instantiated a few
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, jfs_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block);
/*
* In case of error extending write may have instantiated a few
return 0;
/* Needs synchronization with the cleaner */
- size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
- nilfs_get_block);
+ size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
/*
* In case of error extending write may have instantiated a few
di_bh = NULL;
}
- written = __blockdev_direct_IO(WRITE, iocb, inode, inode->i_sb->s_bdev,
- iter, offset,
- ocfs2_direct_IO_get_blocks,
- ocfs2_dio_end_io, NULL, 0);
+ written = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
+ offset, ocfs2_direct_IO_get_blocks,
+ ocfs2_dio_end_io, NULL, 0);
if (unlikely(written < 0)) {
loff_t i_size = i_size_read(inode);
return 0;
if (rw == READ)
- return __blockdev_direct_IO(rw, iocb, inode,
- inode->i_sb->s_bdev,
- iter, offset,
- ocfs2_direct_IO_get_blocks,
- ocfs2_dio_end_io, NULL, 0);
+ return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
+ iter, offset,
+ ocfs2_direct_IO_get_blocks,
+ ocfs2_dio_end_io, NULL, 0);
else
return ocfs2_direct_IO_write(iocb, iter, offset);
}
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset,
+ ret = blockdev_direct_IO(iocb, inode, iter, offset,
reiserfs_get_blocks_direct_io);
/*
size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
+ ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block);
if (unlikely(ret < 0 && (rw & WRITE)))
udf_write_failed(mapping, offset + count);
return ret;
struct block_device *bdev = xfs_find_bdev_for_inode(inode);
if (rw & WRITE) {
- return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
- offset, xfs_get_blocks_direct,
+ return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
+ xfs_get_blocks_direct,
xfs_end_io_direct_write, NULL,
DIO_ASYNC_EXTEND);
}
- return __blockdev_direct_IO(rw, iocb, inode, bdev, iter,
- offset, xfs_get_blocks_direct,
- NULL, NULL, 0);
+ return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
+ xfs_get_blocks_direct, NULL, NULL, 0);
}
/*
void dio_end_io(struct bio *bio, int error);
-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
- struct block_device *bdev, struct iov_iter *iter, loff_t offset,
- get_block_t get_block, dio_iodone_t end_io,
- dio_submit_t submit_io, int flags);
-
-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
- struct inode *inode, struct iov_iter *iter, loff_t offset,
- get_block_t get_block)
-{
- return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iter,
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ struct block_device *bdev, struct iov_iter *iter,
+ loff_t offset, get_block_t get_block,
+ dio_iodone_t end_io, dio_submit_t submit_io,
+ int flags);
+
+static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
+ struct inode *inode,
+ struct iov_iter *iter, loff_t offset,
+ get_block_t get_block)
+{
+ return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
offset, get_block, NULL, NULL,
DIO_LOCKING | DIO_SKIP_HOLES);
}