block: remove REQ_ATOM_POLL_SLEPT
authorJens Axboe <axboe@kernel.dk>
Wed, 10 Jan 2018 18:30:56 +0000 (11:30 -0700)
committerJens Axboe <axboe@kernel.dk>
Wed, 10 Jan 2018 18:47:43 +0000 (11:47 -0700)
We don't need this to be an atomic flag, it can be a regular
flag. We either end up on the same CPU for the polling, in which
case the state is sane, or we did the sleep which would imply
the needed barrier to ensure we see the right state.

Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk.h
include/linux/blkdev.h

index 25d41151073d445d50d758dddef8881ac2a31f22..dd890d5e0fbdc626b4a6c7e27064f41408caf093 100644 (file)
@@ -290,13 +290,13 @@ static const char *const rqf_name[] = {
        RQF_NAME(SPECIAL_PAYLOAD),
        RQF_NAME(ZONE_WRITE_LOCKED),
        RQF_NAME(MQ_TIMEOUT_EXPIRED),
+       RQF_NAME(MQ_POLL_SLEPT),
 };
 #undef RQF_NAME
 
 #define RQAF_NAME(name) [REQ_ATOM_##name] = #name
 static const char *const rqaf_name[] = {
        RQAF_NAME(COMPLETE),
-       RQAF_NAME(POLL_SLEPT),
 };
 #undef RQAF_NAME
 
index 266fc4f6b046f29cc6c1f45d476232880fc0ecfe..3239ca9e199f52a0fc01e1be6af133782b406401 100644 (file)
@@ -483,7 +483,6 @@ void blk_mq_free_request(struct request *rq)
                blk_put_rl(blk_rq_rl(rq));
 
        blk_mq_rq_update_state(rq, MQ_RQ_IDLE);
-       clear_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
        if (rq->tag != -1)
                blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
        if (sched_tag != -1)
@@ -2976,7 +2975,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
        unsigned int nsecs;
        ktime_t kt;
 
-       if (test_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags))
+       if (rq->rq_flags & RQF_MQ_POLL_SLEPT)
                return false;
 
        /*
@@ -2996,7 +2995,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
        if (!nsecs)
                return false;
 
-       set_bit(REQ_ATOM_POLL_SLEPT, &rq->atomic_flags);
+       rq->rq_flags |= RQF_MQ_POLL_SLEPT;
 
        /*
         * This will be replaced with the stats tracking code, using
index a68dbe312ea36377064af0da0628437edb60fab8..eb306c52121ed70b8a3c84f394a8970e9ba834e5 100644 (file)
@@ -124,8 +124,6 @@ void blk_account_io_done(struct request *req);
  */
 enum rq_atomic_flags {
        REQ_ATOM_COMPLETE = 0,
-
-       REQ_ATOM_POLL_SLEPT,
 };
 
 /*
index 007a7cf1f262157181043a71dfdcf64e6a688dbe..ba31674d8581de32d5bb1e7f7c298822de529b8a 100644 (file)
@@ -127,6 +127,8 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_ZONE_WRITE_LOCKED  ((__force req_flags_t)(1 << 19))
 /* timeout is expired */
 #define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20))
+/* already slept for hybrid poll */
+#define RQF_MQ_POLL_SLEPT      ((__force req_flags_t)(1 << 21))
 
 /* flags that prevent us from merging requests: */
 #define RQF_NOMERGE_FLAGS \