aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-04-19 07:32:46 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-04-19 07:32:46 -0400
commitc21e6beba8835d09bb80e34961430b13e60381c5 (patch)
treecdf6f6d40130b95e641ab5db1de0f6ecc179054a /block
parent5f45c69589b7d2953584e6cd0b31e35dbe960ad0 (diff)
block: get rid of QUEUE_FLAG_REENTER
We are currently using this flag to check whether it's safe to call into ->request_fn(). If it is set, we punt to kblockd. But we get a lot of false positives and excessive punts to kblockd, which hurts performance. The only real abuser of this infrastructure is SCSI. So export the async queue run and convert SCSI over to use that. There's room for improvement in that SCSI need not always use the async call, but this fixes our performance issue and they can fix that up in due time. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c11
-rw-r--r--block/blk.h1
2 files changed, 2 insertions, 10 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 580eee5743e5..40725b9091f1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -303,15 +303,7 @@ void __blk_run_queue(struct request_queue *q)
303 if (unlikely(blk_queue_stopped(q))) 303 if (unlikely(blk_queue_stopped(q)))
304 return; 304 return;
305 305
306 /* 306 q->request_fn(q);
307 * Only recurse once to avoid overrunning the stack, let the unplug
308 * handling reinvoke the handler shortly if we already got there.
309 */
310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
311 q->request_fn(q);
312 queue_flag_clear(QUEUE_FLAG_REENTER, q);
313 } else
314 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
315} 307}
316EXPORT_SYMBOL(__blk_run_queue); 308EXPORT_SYMBOL(__blk_run_queue);
317 309
@@ -328,6 +320,7 @@ void blk_run_queue_async(struct request_queue *q)
328 if (likely(!blk_queue_stopped(q))) 320 if (likely(!blk_queue_stopped(q)))
329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 321 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
330} 322}
323EXPORT_SYMBOL(blk_run_queue_async);
331 324
332/** 325/**
333 * blk_run_queue - run a single device queue 326 * blk_run_queue - run a single device queue
diff --git a/block/blk.h b/block/blk.h
index c9df8fc3c999..61263463e38e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data);
22void blk_delete_timer(struct request *); 22void blk_delete_timer(struct request *);
23void blk_add_timer(struct request *); 23void blk_add_timer(struct request *);
24void __generic_unplug_device(struct request_queue *); 24void __generic_unplug_device(struct request_queue *);
25void blk_run_queue_async(struct request_queue *q);
26 25
27/* 26/*
28 * Internal atomic flags for request handling 27 * Internal atomic flags for request handling