aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-07-03 07:18:54 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-07-03 07:21:15 -0400
commite48ec69005f02b70b7ecfde1bc39a599086d16ef (patch)
tree1868fc162e00af21332a82cdf348229c6b985d2f /block
parente180f5949327e897bc35a816f4f4010186632df9 (diff)
block: extend queue_flag bitops
Add test_and_clear and test_and_set. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index e0fb0bcc0c17..dbc7f42b5d2b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -205,8 +205,7 @@ void blk_plug_device(struct request_queue *q)
205 if (blk_queue_stopped(q)) 205 if (blk_queue_stopped(q))
206 return; 206 return;
207 207
208 if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { 208 if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) {
209 __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
210 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); 209 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
211 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 210 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
212 } 211 }
@@ -221,10 +220,9 @@ int blk_remove_plug(struct request_queue *q)
221{ 220{
222 WARN_ON(!irqs_disabled()); 221 WARN_ON(!irqs_disabled());
223 222
224 if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) 223 if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q))
225 return 0; 224 return 0;
226 225
227 queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
228 del_timer(&q->unplug_timer); 226 del_timer(&q->unplug_timer);
229 return 1; 227 return 1;
230} 228}
@@ -328,8 +326,7 @@ void blk_start_queue(struct request_queue *q)
328 * one level of recursion is ok and is much faster than kicking 326 * one level of recursion is ok and is much faster than kicking
329 * the unplug handling 327 * the unplug handling
330 */ 328 */
331 if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 329 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
332 queue_flag_set(QUEUE_FLAG_REENTER, q);
333 q->request_fn(q); 330 q->request_fn(q);
334 queue_flag_clear(QUEUE_FLAG_REENTER, q); 331 queue_flag_clear(QUEUE_FLAG_REENTER, q);
335 } else { 332 } else {
@@ -394,8 +391,7 @@ void __blk_run_queue(struct request_queue *q)
394 * handling reinvoke the handler shortly if we already got there. 391 * handling reinvoke the handler shortly if we already got there.
395 */ 392 */
396 if (!elv_queue_empty(q)) { 393 if (!elv_queue_empty(q)) {
397 if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 394 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
398 queue_flag_set(QUEUE_FLAG_REENTER, q);
399 q->request_fn(q); 395 q->request_fn(q);
400 queue_flag_clear(QUEUE_FLAG_REENTER, q); 396 queue_flag_clear(QUEUE_FLAG_REENTER, q);
401 } else { 397 } else {