diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-07-03 07:18:54 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-07-03 07:21:15 -0400 |
commit | e48ec69005f02b70b7ecfde1bc39a599086d16ef (patch) | |
tree | 1868fc162e00af21332a82cdf348229c6b985d2f | |
parent | e180f5949327e897bc35a816f4f4010186632df9 (diff) |
block: extend queue_flag bitops
Add test_and_clear and test_and_set.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blk-core.c | 12 | ||||
-rw-r--r-- | include/linux/blkdev.h | 26 |
2 files changed, 30 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index e0fb0bcc0c17..dbc7f42b5d2b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -205,8 +205,7 @@ void blk_plug_device(struct request_queue *q) | |||
205 | if (blk_queue_stopped(q)) | 205 | if (blk_queue_stopped(q)) |
206 | return; | 206 | return; |
207 | 207 | ||
208 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { | 208 | if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { |
209 | __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); | ||
210 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 209 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
211 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | 210 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); |
212 | } | 211 | } |
@@ -221,10 +220,9 @@ int blk_remove_plug(struct request_queue *q) | |||
221 | { | 220 | { |
222 | WARN_ON(!irqs_disabled()); | 221 | WARN_ON(!irqs_disabled()); |
223 | 222 | ||
224 | if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | 223 | if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) |
225 | return 0; | 224 | return 0; |
226 | 225 | ||
227 | queue_flag_clear(QUEUE_FLAG_PLUGGED, q); | ||
228 | del_timer(&q->unplug_timer); | 226 | del_timer(&q->unplug_timer); |
229 | return 1; | 227 | return 1; |
230 | } | 228 | } |
@@ -328,8 +326,7 @@ void blk_start_queue(struct request_queue *q) | |||
328 | * one level of recursion is ok and is much faster than kicking | 326 | * one level of recursion is ok and is much faster than kicking |
329 | * the unplug handling | 327 | * the unplug handling |
330 | */ | 328 | */ |
331 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 329 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
332 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
333 | q->request_fn(q); | 330 | q->request_fn(q); |
334 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 331 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
335 | } else { | 332 | } else { |
@@ -394,8 +391,7 @@ void __blk_run_queue(struct request_queue *q) | |||
394 | * handling reinvoke the handler shortly if we already got there. | 391 | * handling reinvoke the handler shortly if we already got there. |
395 | */ | 392 | */ |
396 | if (!elv_queue_empty(q)) { | 393 | if (!elv_queue_empty(q)) { |
397 | if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { | 394 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
398 | queue_flag_set(QUEUE_FLAG_REENTER, q); | ||
399 | q->request_fn(q); | 395 | q->request_fn(q); |
400 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 396 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
401 | } else { | 397 | } else { |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ff9d0bdf2a16..e04c4ac8a7cf 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -428,6 +428,32 @@ static inline void queue_flag_set_unlocked(unsigned int flag, | |||
428 | __set_bit(flag, &q->queue_flags); | 428 | __set_bit(flag, &q->queue_flags); |
429 | } | 429 | } |
430 | 430 | ||
431 | static inline int queue_flag_test_and_clear(unsigned int flag, | ||
432 | struct request_queue *q) | ||
433 | { | ||
434 | WARN_ON_ONCE(!queue_is_locked(q)); | ||
435 | |||
436 | if (test_bit(flag, &q->queue_flags)) { | ||
437 | __clear_bit(flag, &q->queue_flags); | ||
438 | return 1; | ||
439 | } | ||
440 | |||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static inline int queue_flag_test_and_set(unsigned int flag, | ||
445 | struct request_queue *q) | ||
446 | { | ||
447 | WARN_ON_ONCE(!queue_is_locked(q)); | ||
448 | |||
449 | if (!test_bit(flag, &q->queue_flags)) { | ||
450 | __set_bit(flag, &q->queue_flags); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | return 1; | ||
455 | } | ||
456 | |||
431 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) | 457 | static inline void queue_flag_set(unsigned int flag, struct request_queue *q) |
432 | { | 458 | { |
433 | WARN_ON_ONCE(!queue_is_locked(q)); | 459 | WARN_ON_ONCE(!queue_is_locked(q)); |