Unused now that the legacy request path is gone.
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
struct iolatency_grp *iolat,
- spinlock_t *lock, bool issue_as_root,
+ bool issue_as_root,
bool use_memdelay)
- __releases(lock)
- __acquires(lock)
{
struct rq_wait *rqw = &iolat->rq_wait;
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
if (iolatency_may_queue(iolat, &wait, first_block))
break;
first_block = false;
-
- if (lock) {
- spin_unlock_irq(lock);
- io_schedule();
- spin_lock_irq(lock);
- } else {
- io_schedule();
- }
+ io_schedule();
} while (1);
finish_wait(&rqw->wait, &wait);
scale_change(iolat, direction > 0);
}
-static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
- spinlock_t *lock)
+static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
struct blkcg *blkcg;
bio_associate_blkcg(bio, &blkcg->css);
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg)) {
- if (!lock)
- spin_lock_irq(q->queue_lock);
+ spin_lock_irq(q->queue_lock);
blkg = blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
- if (!lock)
- spin_unlock_irq(q->queue_lock);
+ spin_unlock_irq(q->queue_lock);
}
if (!blkg)
goto out;
}
check_scale_change(iolat);
- __blkcg_iolatency_throttle(rqos, iolat, lock, issue_as_root,
+ __blkcg_iolatency_throttle(rqos, iolat, issue_as_root,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP);
blkg = blkg->parent;
}
if (blk_mq_sched_bio_merge(q, bio))
return BLK_QC_T_NONE;
- rq_qos_throttle(q, bio, NULL);
+ rq_qos_throttle(q, bio);
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
}
}
-void rq_qos_throttle(struct request_queue *q, struct bio *bio,
- spinlock_t *lock)
+void rq_qos_throttle(struct request_queue *q, struct bio *bio)
{
struct rq_qos *rqos;
for(rqos = q->rq_qos; rqos; rqos = rqos->next) {
if (rqos->ops->throttle)
- rqos->ops->throttle(rqos, bio, lock);
+ rqos->ops->throttle(rqos, bio);
}
}
};
struct rq_qos_ops {
- void (*throttle)(struct rq_qos *, struct bio *, spinlock_t *);
+ void (*throttle)(struct rq_qos *, struct bio *);
void (*track)(struct rq_qos *, struct request *, struct bio *);
void (*issue)(struct rq_qos *, struct request *);
void (*requeue)(struct rq_qos *, struct request *);
void rq_qos_issue(struct request_queue *, struct request *);
void rq_qos_requeue(struct request_queue *, struct request *);
void rq_qos_done_bio(struct request_queue *q, struct bio *bio);
-void rq_qos_throttle(struct request_queue *, struct bio *, spinlock_t *);
+void rq_qos_throttle(struct request_queue *, struct bio *);
void rq_qos_track(struct request_queue *q, struct request *, struct bio *);
void rq_qos_exit(struct request_queue *);
#endif
* the timer to kick off queuing again.
*/
static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
- unsigned long rw, spinlock_t *lock)
- __releases(lock)
- __acquires(lock)
+ unsigned long rw)
{
struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
struct wbt_wait_data data = {
break;
}
- if (lock) {
- spin_unlock_irq(lock);
- io_schedule();
- spin_lock_irq(lock);
- } else
- io_schedule();
-
+ io_schedule();
has_sleeper = false;
} while (1);
* in an irq held spinlock, if it holds one when calling this function.
* If we do sleep, we'll release and re-grab it.
*/
-static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
+static void wbt_wait(struct rq_qos *rqos, struct bio *bio)
{
struct rq_wb *rwb = RQWB(rqos);
enum wbt_flags flags;
return;
}
- __wbt_wait(rwb, flags, bio->bi_opf, lock);
+ __wbt_wait(rwb, flags, bio->bi_opf);
if (!blk_stat_is_active(rwb->cb))
rwb_arm_timer(rwb);