if (blk_queue_stopped(q))
return;
- if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
- __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
+ if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
}
{
WARN_ON(!irqs_disabled());
- if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+ if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
return 0;
- queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
del_timer(&q->unplug_timer);
return 1;
}
* one level of recursion is ok and is much faster than kicking
* the unplug handling
*/
- if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
- queue_flag_set(QUEUE_FLAG_REENTER, q);
+ if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
* handling reinvoke the handler shortly if we already got there.
*/
if (!elv_queue_empty(q)) {
- if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
- queue_flag_set(QUEUE_FLAG_REENTER, q);
+ if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
q->request_fn(q);
queue_flag_clear(QUEUE_FLAG_REENTER, q);
} else {
__set_bit(flag, &q->queue_flags);
}
+static inline int queue_flag_test_and_clear(unsigned int flag,
+ struct request_queue *q)
+{
+ WARN_ON_ONCE(!queue_is_locked(q));
+
+ if (test_bit(flag, &q->queue_flags)) {
+ __clear_bit(flag, &q->queue_flags);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int queue_flag_test_and_set(unsigned int flag,
+ struct request_queue *q)
+{
+ WARN_ON_ONCE(!queue_is_locked(q));
+
+ if (!test_bit(flag, &q->queue_flags)) {
+ __set_bit(flag, &q->queue_flags);
+ return 0;
+ }
+
+ return 1;
+}
+
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));