Merge commit '8e8320c9315c' into for-4.13/block
authorJens Axboe <axboe@kernel.dk>
Fri, 23 Jun 2017 03:55:24 +0000 (21:55 -0600)
committerJens Axboe <axboe@kernel.dk>
Fri, 23 Jun 2017 03:55:24 +0000 (21:55 -0600)
Pull in the fix for shared tags, as it conflicts with the pending
changes in for-4.13/block. We already pulled in v4.12-rc5 to solve
other conflicts or get fixes that went into 4.12, so not a lot
of changes in this merge.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
1  2 
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq.c
drivers/block/xen-blkback/blkback.c
include/linux/blkdev.h

index 191bf82d185e82aa2cc7bedd51925de978ed42a4,0ded5e846335667406d58ce08e8439360baeb312..7f0dc48ffb40895a499208474536e300f2efab34
@@@ -46,10 -47,131 +46,49 @@@ void blk_mq_sched_assign_ioc(struct req
                if (!icq)
                        return;
        }
 -
 +      get_io_context(icq->ioc);
        rq->elv.icq = icq;
 -      if (!blk_mq_sched_get_rq_priv(q, rq, bio)) {
 -              rq->rq_flags |= RQF_ELVPRIV;
 -              get_io_context(icq->ioc);
 -              return;
 -      }
 -
 -      rq->elv.icq = NULL;
 -}
 -
 -static void blk_mq_sched_assign_ioc(struct request_queue *q,
 -                                  struct request *rq, struct bio *bio)
 -{
 -      struct io_context *ioc;
 -
 -      ioc = rq_ioc(bio);
 -      if (ioc)
 -              __blk_mq_sched_assign_ioc(q, rq, bio, ioc);
  }
  
 -struct request *blk_mq_sched_get_request(struct request_queue *q,
 -                                       struct bio *bio,
 -                                       unsigned int op,
 -                                       struct blk_mq_alloc_data *data)
 -{
 -      struct elevator_queue *e = q->elevator;
 -      struct request *rq;
 -
 -      blk_queue_enter_live(q);
 -      data->q = q;
 -      if (likely(!data->ctx))
 -              data->ctx = blk_mq_get_ctx(q);
 -      if (likely(!data->hctx))
 -              data->hctx = blk_mq_map_queue(q, data->ctx->cpu);
 -
 -      if (e) {
 -              data->flags |= BLK_MQ_REQ_INTERNAL;
 -
 -              /*
 -               * Flush requests are special and go directly to the
 -               * dispatch list.
 -               */
 -              if (!op_is_flush(op) && e->type->ops.mq.get_request) {
 -                      rq = e->type->ops.mq.get_request(q, op, data);
 -                      if (rq)
 -                              rq->rq_flags |= RQF_QUEUED;
 -              } else
 -                      rq = __blk_mq_alloc_request(data, op);
 -      } else {
 -              rq = __blk_mq_alloc_request(data, op);
 -      }
 -
 -      if (rq) {
 -              if (!op_is_flush(op)) {
 -                      rq->elv.icq = NULL;
 -                      if (e && e->type->icq_cache)
 -                              blk_mq_sched_assign_ioc(q, rq, bio);
 -              }
 -              data->hctx->queued++;
 -              return rq;
 -      }
 -
 -      blk_queue_exit(q);
 -      return NULL;
 -}
 -
 -void blk_mq_sched_put_request(struct request *rq)
 -{
 -      struct request_queue *q = rq->q;
 -      struct elevator_queue *e = q->elevator;
 -
 -      if (rq->rq_flags & RQF_ELVPRIV) {
 -              blk_mq_sched_put_rq_priv(rq->q, rq);
 -              if (rq->elv.icq) {
 -                      put_io_context(rq->elv.icq->ioc);
 -                      rq->elv.icq = NULL;
 -              }
 -      }
 -
 -      if ((rq->rq_flags & RQF_QUEUED) && e && e->type->ops.mq.put_request)
 -              e->type->ops.mq.put_request(rq);
 -      else
 -              blk_mq_finish_request(rq);
 -}
 -
+ /*
+  * Mark a hardware queue as needing a restart. For shared queues, maintain
+  * a count of how many hardware queues are marked for restart.
+  */
+ static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
+ {
+       if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               return;
+       if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+               struct request_queue *q = hctx->queue;
+               if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                       atomic_inc(&q->shared_hctx_restart);
+       } else
+               set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ }
+ static bool blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+ {
+       if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+               return false;
+       if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
+               struct request_queue *q = hctx->queue;
+               if (test_and_clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+                       atomic_dec(&q->shared_hctx_restart);
+       } else
+               clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+       if (blk_mq_hctx_has_pending(hctx)) {
+               blk_mq_run_hw_queue(hctx, true);
+               return true;
+       }
+       return false;
+ }
  void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
  {
        struct request_queue *q = hctx->queue;
Simple merge
diff --cc block/blk-mq.c
Simple merge
Simple merge
Simple merge