* Inherit FAILFAST from bio (for read-ahead, and explicit
* FAILFAST). FAILFAST flags are identical for req and bio.
*/
- if (bio_rw_ahead(bio))
+ if (bio_rw_flagged(bio, BIO_RW_AHEAD))
req->cmd_flags |= REQ_FAILFAST_MASK;
else
req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
- if (unlikely(bio_discard(bio))) {
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
req->cmd_flags |= REQ_DISCARD;
- if (bio_barrier(bio))
+ if (bio_rw_flagged(bio, BIO_RW_BARRIER))
req->cmd_flags |= REQ_SOFTBARRIER;
req->q->prepare_discard_fn(req->q, req);
- } else if (unlikely(bio_barrier(bio)))
+ } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)))
req->cmd_flags |= REQ_HARDBARRIER;
- if (bio_sync(bio))
+ if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
req->cmd_flags |= REQ_RW_SYNC;
- if (bio_rw_meta(bio))
+ if (bio_rw_flagged(bio, BIO_RW_META))
req->cmd_flags |= REQ_RW_META;
- if (bio_noidle(bio))
+ if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
req->cmd_flags |= REQ_NOIDLE;
req->errors = 0;
int el_ret;
unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio);
- const int sync = bio_sync(bio);
- const int unplug = bio_unplug(bio);
+ const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+ const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
int rw_flags;
- if (bio_barrier(bio) && bio_has_data(bio) &&
+ if (bio_rw_flagged(bio, BIO_RW_BARRIER) && bio_has_data(bio) &&
(q->next_ordered == QUEUE_ORDERED_NONE)) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
spin_lock_irq(q->queue_lock);
- if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
goto get_rq;
el_ret = elv_merge(q, &req, bio);
if (bio_check_eod(bio, nr_sectors))
goto end_io;
- if (bio_discard(bio) && !q->prepare_discard_fn) {
+ if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+ !q->prepare_discard_fn) {
err = -EOPNOTSUPP;
goto end_io;
}
*/
static inline int cfq_bio_sync(struct bio *bio)
{
- if (bio_data_dir(bio) == READ || bio_sync(bio))
+ if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
return 1;
return 0;
/*
* Don't merge file system requests and discard requests
*/
- if (bio_discard(bio) != bio_discard(rq->bio))
+ if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
+ bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
return 0;
/*
pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
if (bio_rw(bio) == WRITE) {
- int barrier = bio_barrier(bio);
+ bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
struct file *file = lo->lo_backing_file;
if (barrier) {
if (error == -EOPNOTSUPP)
goto out;
- if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
+ if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
goto out;
if (unlikely(error)) {
if (!error)
return 0; /* I/O complete */
- if ((error == -EWOULDBLOCK) && bio_rw_ahead(bio))
+ if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
return error;
if (error == -EOPNOTSUPP)
*/
spin_lock_irqsave(&md->deferred_lock, flags);
if (__noflush_suspending(md)) {
- if (!bio_barrier(io->bio))
+ if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
bio_list_add_head(&md->deferred,
io->bio);
} else
io_error = io->error;
bio = io->bio;
- if (bio_barrier(bio)) {
+ if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
/*
* There can be just one barrier request so we use
* a per-device variable for error reporting.
ci.map = dm_get_table(md);
if (unlikely(!ci.map)) {
- if (!bio_barrier(bio))
+ if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
bio_io_error(bio);
else
if (!md->barrier_error)
* we have to queue this io for later.
*/
if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
- unlikely(bio_barrier(bio))) {
+ unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
up_read(&md->io_lock);
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
{
struct mapped_device *md = q->queuedata;
- if (unlikely(bio_barrier(bio))) {
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
if (dm_request_based(md))
generic_make_request(c);
else {
- if (bio_barrier(c))
+ if (bio_rw_flagged(c, BIO_RW_BARRIER))
process_barrier(md, c);
else
__split_and_process_bio(md, c);
sector_t start_sector;
int cpu;
- if (unlikely(bio_barrier(bio))) {
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
if (uptodate)
multipath_end_bh_io(mp_bh, 0);
- else if (!bio_rw_ahead(bio)) {
+ else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
/*
* oops, IO error:
*/
const int rw = bio_data_dir(bio);
int cpu;
- if (unlikely(bio_barrier(bio))) {
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
const int rw = bio_data_dir(bio);
int cpu;
- if (unlikely(bio_barrier(bio))) {
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
struct bio_list bl;
struct page **behind_pages = NULL;
const int rw = bio_data_dir(bio);
- const int do_sync = bio_sync(bio);
- int cpu, do_barriers;
+ const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+ int cpu;
+ bool do_barriers;
mdk_rdev_t *blocked_rdev;
/*
md_write_start(mddev, bio); /* wait on superblock update early */
- if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
+ if (unlikely(!mddev->barriers_work &&
+ bio_rw_flagged(bio, BIO_RW_BARRIER))) {
if (rw == WRITE)
md_write_end(mddev);
bio_endio(bio, -EOPNOTSUPP);
atomic_set(&r1_bio->remaining, 0);
atomic_set(&r1_bio->behind_remaining, 0);
- do_barriers = bio_barrier(bio);
+ do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
if (do_barriers)
set_bit(R1BIO_Barrier, &r1_bio->state);
* We already have a nr_pending reference on these rdevs.
*/
int i;
- const int do_sync = bio_sync(r1_bio->master_bio);
+ const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
clear_bit(R1BIO_Barrier, &r1_bio->state);
for (i=0; i < conf->raid_disks; i++)
(unsigned long long)r1_bio->sector);
raid_end_bio_io(r1_bio);
} else {
- const int do_sync = bio_sync(r1_bio->master_bio);
+ const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
r1_bio->bios[r1_bio->read_disk] =
mddev->ro ? IO_BLOCKED : NULL;
r1_bio->read_disk = disk;
int i;
int chunk_sects = conf->chunk_mask + 1;
const int rw = bio_data_dir(bio);
- const int do_sync = bio_sync(bio);
+ const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
struct bio_list bl;
unsigned long flags;
mdk_rdev_t *blocked_rdev;
- if (unlikely(bio_barrier(bio))) {
+ if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
raid_end_bio_io(r10_bio);
bio_put(bio);
} else {
- const int do_sync = bio_sync(r10_bio->master_bio);
+ const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
bio_put(bio);
rdev = conf->mirrors[mirror].rdev;
if (printk_ratelimit())
const int rw = bio_data_dir(bi);
int cpu, remaining;
- if (unlikely(bio_barrier(bi))) {
+ if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
bio_endio(bi, -EOPNOTSUPP);
return 0;
}
* I worked with.
*
* Empty barriers are not allowed anyway, see 51fd77bd9f512
- * for example, although later it was changed to bio_discard()
- * only, which does not work in this case.
+ * for example, although later it was changed to
+ * bio_rw_flagged(bio, BIO_RW_DISCARD) only, which does not
+ * work in this case.
*/
//err = -EOPNOTSUPP;
err = 0;
num_run++;
batch_run++;
- if (bio_sync(cur))
+ if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
num_sync_run++;
if (need_resched()) {
bio->bi_rw |= rw;
spin_lock(&device->io_lock);
- if (bio_sync(bio))
+ if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
pending_bios = &device->pending_sync_bios;
else
pending_bios = &device->pending_bios;
BIO_RW_NOIDLE,
};
+/*
+ * First four bits must match between bio->bi_rw and rq->cmd_flags, make
+ * that explicit here.
+ */
+#define BIO_RW_RQ_MASK 0xf
+
static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
{
return (bio->bi_rw & (1 << flag)) != 0;
}
-/*
- * Old defines, these should eventually be replaced by direct usage of
- * bio_rw_flagged()
- */
-#define bio_barrier(bio) bio_rw_flagged(bio, BIO_RW_BARRIER)
-#define bio_sync(bio) bio_rw_flagged(bio, BIO_RW_SYNCIO)
-#define bio_unplug(bio) bio_rw_flagged(bio, BIO_RW_UNPLUG)
-#define bio_failfast_dev(bio) bio_rw_flagged(bio, BIO_RW_FAILFAST_DEV)
-#define bio_failfast_transport(bio) \
- bio_rw_flagged(bio, BIO_RW_FAILFAST_TRANSPORT)
-#define bio_failfast_driver(bio) \
- bio_rw_flagged(bio, BIO_RW_FAILFAST_DRIVER)
-#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
-#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
-#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
-#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE)
-
/*
* upper 16 bits of bi_rw define the io priority of this bio
*/
#define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9)
-#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
+#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD))
static inline unsigned int bio_cur_bytes(struct bio *bio)
{
};
/*
- * request type modified bits. first two bits match BIO_RW* bits, important
+ * request type modified bits. first four bits match BIO_RW* bits, important
*/
enum rq_flag_bits {
__REQ_RW, /* not set, read. set, write */