#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
-blk_status_t null_zone_report(struct nullb *nullb,
- struct nullb_cmd *cmd);
-void null_zone_write(struct nullb_cmd *cmd);
-void null_zone_reset(struct nullb_cmd *cmd);
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int nr_sectors);
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
#else
static inline int null_zone_init(struct nullb_device *dev)
{
}
static inline void null_zone_exit(struct nullb_device *dev) {}
static inline blk_status_t null_zone_report(struct nullb *nullb,
- struct nullb_cmd *cmd)
+ struct bio *bio)
{
return BLK_STS_NOTSUPP;
}
-static inline void null_zone_write(struct nullb_cmd *cmd) {}
-static inline void null_zone_reset(struct nullb_cmd *cmd) {}
+static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int nr_sectors)
+{
+}
+static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* __NULL_BLK_H */
}
}
+static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+
+ if (dev->queue_mode == NULL_Q_BIO) {
+ if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
+ cmd->error = null_zone_report(nullb, cmd->bio);
+ return true;
+ }
+ } else {
+ if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
+ cmd->error = null_zone_report(nullb, cmd->rq->bio);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
int err = 0;
- if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
- cmd->error = null_zone_report(nullb, cmd);
+ if (cmd_report_zone(nullb, cmd))
goto out;
- }
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
cmd->error = errno_to_blk_status(err);
if (!cmd->error && dev->zoned) {
- if (req_op(cmd->rq) == REQ_OP_WRITE)
- null_zone_write(cmd);
- else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
- null_zone_reset(cmd);
+ sector_t sector;
+ unsigned int nr_sectors;
+ int op;
+
+ if (dev->queue_mode == NULL_Q_BIO) {
+ op = bio_op(cmd->bio);
+ sector = cmd->bio->bi_iter.bi_sector;
+ nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
+ } else {
+ op = req_op(cmd->rq);
+ sector = blk_rq_pos(cmd->rq);
+ nr_sectors = blk_rq_sectors(cmd->rq);
+ }
+
+ if (op == REQ_OP_WRITE)
+ null_zone_write(cmd, sector, nr_sectors);
+ else if (op == REQ_OP_ZONE_RESET)
+ null_zone_reset(cmd, sector);
}
out:
/* Complete IO by inline, softirq or timer */
kvfree(dev->zones);
}
-static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
- unsigned int zno, unsigned int nr_zones)
+static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
+ unsigned int zno, unsigned int nr_zones)
{
struct blk_zone_report_hdr *hdr = NULL;
struct bio_vec bvec;
void *addr;
unsigned int zones_to_cpy;
- bio_for_each_segment(bvec, rq->bio, iter) {
+ bio_for_each_segment(bvec, bio, iter) {
addr = kmap_atomic(bvec.bv_page);
zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
}
}
-blk_status_t null_zone_report(struct nullb *nullb,
- struct nullb_cmd *cmd)
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
{
struct nullb_device *dev = nullb->dev;
- struct request *rq = cmd->rq;
- unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
unsigned int nr_zones = dev->nr_zones - zno;
- unsigned int max_zones = (blk_rq_bytes(rq) /
- sizeof(struct blk_zone)) - 1;
+ unsigned int max_zones;
+ max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
nr_zones = min_t(unsigned int, nr_zones, max_zones);
-
- null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
+ null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
return BLK_STS_OK;
}
-void null_zone_write(struct nullb_cmd *cmd)
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
- struct request *rq = cmd->rq;
- sector_t sector = blk_rq_pos(rq);
- unsigned int rq_sectors = blk_rq_sectors(rq);
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
/* Writes must be at the write pointer position */
- if (blk_rq_pos(rq) != zone->wp) {
+ if (sector != zone->wp) {
cmd->error = BLK_STS_IOERR;
break;
}
if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
- zone->wp += rq_sectors;
+ zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->len)
zone->cond = BLK_ZONE_COND_FULL;
break;
}
}
-void null_zone_reset(struct nullb_cmd *cmd)
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- struct request *rq = cmd->rq;
- unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
zone->cond = BLK_ZONE_COND_EMPTY;