block: remove the unused lock argument to rq_qos_throttle
authorChristoph Hellwig <hch@lst.de>
Wed, 14 Nov 2018 16:02:09 +0000 (17:02 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 15 Nov 2018 19:13:22 +0000 (12:13 -0700)
Unused now that the legacy request path is gone.

Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-iolatency.c
block/blk-mq.c
block/blk-rq-qos.c
block/blk-rq-qos.h
block/blk-wbt.c

index 38c35c32aff2dcf3fc0e9ac294a649f0be4a1cb1..8edf1b353ad1f9797f1174e6f09069f8b96eaa71 100644 (file)
@@ -276,10 +276,8 @@ static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
 
 static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
                                       struct iolatency_grp *iolat,
-                                      spinlock_t *lock, bool issue_as_root,
+                                      bool issue_as_root,
                                       bool use_memdelay)
-       __releases(lock)
-       __acquires(lock)
 {
        struct rq_wait *rqw = &iolat->rq_wait;
        unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
@@ -311,14 +309,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
                if (iolatency_may_queue(iolat, &wait, first_block))
                        break;
                first_block = false;
-
-               if (lock) {
-                       spin_unlock_irq(lock);
-                       io_schedule();
-                       spin_lock_irq(lock);
-               } else {
-                       io_schedule();
-               }
+               io_schedule();
        } while (1);
 
        finish_wait(&rqw->wait, &wait);
@@ -478,8 +469,7 @@ static void check_scale_change(struct iolatency_grp *iolat)
        scale_change(iolat, direction > 0);
 }
 
-static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
-                                    spinlock_t *lock)
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
 {
        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
        struct blkcg *blkcg;
@@ -495,13 +485,11 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
        bio_associate_blkcg(bio, &blkcg->css);
        blkg = blkg_lookup(blkcg, q);
        if (unlikely(!blkg)) {
-               if (!lock)
-                       spin_lock_irq(q->queue_lock);
+               spin_lock_irq(q->queue_lock);
                blkg = blkg_lookup_create(blkcg, q);
                if (IS_ERR(blkg))
                        blkg = NULL;
-               if (!lock)
-                       spin_unlock_irq(q->queue_lock);
+               spin_unlock_irq(q->queue_lock);
        }
        if (!blkg)
                goto out;
@@ -518,7 +506,7 @@ out:
                }
 
                check_scale_change(iolat);
-               __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+               __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
                                     (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
                blkg = blkg->parent;
        }
index e2717e843727c34f0de92e06bd0509ed7026aa52..a3f057fdd0457687948dafbd7a190fba826695e1 100644 (file)
@@ -1886,7 +1886,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        if (blk_mq_sched_bio_merge(q, bio))
                return BLK_QC_T_NONE;
 
-       rq_qos_throttle(q, bio, NULL);
+       rq_qos_throttle(q, bio);
 
        rq = blk_mq_get_request(q, bio, &data);
        if (unlikely(!rq)) {
index 0005dfd568dd5baaf3804106b25ef6a37b2a79ee..f8a4d3fbb98cdafac5179ea395625a3455a0f78b 100644 (file)
@@ -67,14 +67,13 @@ void rq_qos_requeue(struct request_queue *q, struct request *rq)
        }
 }
 
-void rq_qos_throttle(struct request_queue *q, struct bio *bio,
-                    spinlock_t *lock)
+void rq_qos_throttle(struct request_queue *q, struct bio *bio)
 {
        struct rq_qos *rqos;
 
        for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
                if (rqos->ops->throttle)
-                       rqos->ops->throttle(rqos, bio, lock);
+                       rqos->ops->throttle(rqos, bio);
        }
 }
 
index 32b02efbfa66dda638a02d07aa00c63df48a5dab..b6b11d4960078d2e3a0f3a91339573b63d3de308 100644 (file)
@@ -25,7 +25,7 @@ struct rq_qos {
 };
 
 struct rq_qos_ops {
-       void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
+       void (*throttle)(struct rq_qos *, struct bio *);
        void (*track)(struct rq_qos *, struct request *, struct bio *);
        void (*issue)(struct rq_qos *, struct request *);
        void (*requeue)(struct rq_qos *, struct request *);
@@ -103,7 +103,7 @@ void rq_qos_done(struct request_queue *, struct request *);
 void rq_qos_issue(struct request_queue *, struct request *);
 void rq_qos_requeue(struct request_queue *, struct request *);
 void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
-void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
+void rq_qos_throttle(struct request_queue *, struct bio *);
 void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
 void rq_qos_exit(struct request_queue *);
 #endif
index 0fc222d4194b53e87b2fc95cb65d560b0277b59e..e5a66c574683848fc08f4108200637a0eb7c5c34 100644 (file)
@@ -521,9 +521,7 @@ static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
  * the timer to kick off queuing again.
  */
 static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
-                      unsigned long rw, spinlock_t *lock)
-       __releases(lock)
-       __acquires(lock)
+                      unsigned long rw)
 {
        struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
        struct wbt_wait_data data = {
@@ -561,13 +559,7 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
                        break;
                }
 
-               if (lock) {
-                       spin_unlock_irq(lock);
-                       io_schedule();
-                       spin_lock_irq(lock);
-               } else
-                       io_schedule();
-
+               io_schedule();
                has_sleeper = false;
        } while (1);
 
@@ -624,7 +616,7 @@ static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio)
  * in an irq held spinlock, if it holds one when calling this function.
  * If we do sleep, we'll release and re-grab it.
  */
-static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
 {
        struct rq_wb *rwb = RQWB(rqos);
        enum wbt_flags flags;
@@ -636,7 +628,7 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
                return;
        }
 
-       __wbt_wait(rwb, flags, bio->bi_opf, lock);
+       __wbt_wait(rwb, flags, bio->bi_opf);
 
        if (!blk_stat_is_active(rwb->cb))
                rwb_arm_timer(rwb);