*/
void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_set(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ set_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_flag_set);
*/
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- queue_flag_clear(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ clear_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_flag_clear);
*/
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
{
- unsigned long flags;
- bool res;
-
- spin_lock_irqsave(q->queue_lock, flags);
- res = queue_flag_test_and_set(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return res;
+ return test_and_set_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
-/**
- * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
- * @flag: flag to be cleared
- * @q: request queue
- *
- * Returns the previous value of @flag - 0 if the flag was not set and 1 if
- * the flag was set.
- */
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
-{
- unsigned long flags;
- bool res;
-
- spin_lock_irqsave(q->queue_lock, flags);
- res = queue_flag_test_and_clear(flag, q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- return res;
-}
-EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
-
void blk_rq_init(struct request_queue *q, struct request *rq)
{
memset(rq, 0, sizeof(*rq));
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q);
- spin_lock_irq(lock);
- queue_flag_set(QUEUE_FLAG_NOMERGES, q);
- queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- queue_flag_set(QUEUE_FLAG_DYING, q);
- spin_unlock_irq(lock);
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
mutex_unlock(&q->sysfs_lock);
/*
rq_qos_exit(q);
- spin_lock_irq(lock);
- queue_flag_set(QUEUE_FLAG_DEAD, q);
- spin_unlock_irq(lock);
+ blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
/*
* make sure all in-progress dispatch are completed because
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
if (!(set->flags & BLK_MQ_F_SG_MERGE))
- queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+ blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
q->sg_reserved_size = INT_MAX;
*/
void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
{
- spin_lock_irq(q->queue_lock);
if (wc)
- queue_flag_set(QUEUE_FLAG_WC, q);
+ blk_queue_flag_set(QUEUE_FLAG_WC, q);
else
- queue_flag_clear(QUEUE_FLAG_WC, q);
+ blk_queue_flag_clear(QUEUE_FLAG_WC, q);
if (fua)
- queue_flag_set(QUEUE_FLAG_FUA, q);
+ blk_queue_flag_set(QUEUE_FLAG_FUA, q);
else
- queue_flag_clear(QUEUE_FLAG_FUA, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
}
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
- queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
- queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
if (nm == 2)
- queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+ blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
else if (nm)
- queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
- spin_unlock_irq(q->queue_lock);
+ blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
return ret;
}
if (ret < 0)
return ret;
- spin_lock_irq(q->queue_lock);
if (val == 2) {
- queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
} else if (val == 1) {
- queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
} else if (val == 0) {
- queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
- queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+ blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
}
- spin_unlock_irq(q->queue_lock);
#endif
return ret;
}
WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
"%s is registering an already registered queue\n",
kobject_name(&dev->kobj));
- queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
+ blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
/*
* SCSI probing may synchronously create and destroy a lot of
* request_queues for non-existent devices never get registered.
*/
if (!blk_queue_init_done(q)) {
- queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+ blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
percpu_ref_switch_to_percpu(&q->q_usage_counter);
}
lockdep_assert_held(q->queue_lock);
}
-static inline void queue_flag_set_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
- kref_read(&q->kobj.kref))
- lockdep_assert_held(q->queue_lock);
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
- kref_read(&q->kobj.kref))
- lockdep_assert_held(q->queue_lock);
- __clear_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (test_bit(flag, &q->queue_flags)) {
- __clear_bit(flag, &q->queue_flags);
- return 1;
- }
-
- return 0;
-}
-
-static inline int queue_flag_test_and_set(unsigned int flag,
- struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
-
- if (!test_bit(flag, &q->queue_flags)) {
- __set_bit(flag, &q->queue_flags);
- return 0;
- }
-
- return 1;
-}
-
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
-{
- queue_lockdep_assert_held(q);
- __clear_bit(flag, &q->queue_flags);
-}
-
static inline struct blk_flush_queue *
blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
{
void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)