aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2011-04-18 05:41:33 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-04-18 05:41:33 -0400
commit24ecfbe27f65563909b14492afda2f1c21f7c044 (patch)
treea7e51d903c400d0925f87be5f3069a5a44e0af24 /block/blk-core.c
parent4521cc4ed5173f92714f6999a69910c3385fed68 (diff)
block: add blk_run_queue_async
Instead of overloading __blk_run_queue to force an offload to kblockd add a new blk_run_queue_async helper to do it explicitly. I've kept the blk_queue_stopped check for now, but I suspect it's not needed as the check we do when the workqueue items runs should be enough. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c36
1 files changed, 24 insertions, 12 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index e2bacfa46cc3..5fa3dd2705c6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work)
204 204
205 q = container_of(work, struct request_queue, delay_work.work); 205 q = container_of(work, struct request_queue, delay_work.work);
206 spin_lock_irq(q->queue_lock); 206 spin_lock_irq(q->queue_lock);
207 __blk_run_queue(q, false); 207 __blk_run_queue(q);
208 spin_unlock_irq(q->queue_lock); 208 spin_unlock_irq(q->queue_lock);
209} 209}
210 210
@@ -239,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
239 WARN_ON(!irqs_disabled()); 239 WARN_ON(!irqs_disabled());
240 240
241 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
242 __blk_run_queue(q, false); 242 __blk_run_queue(q);
243} 243}
244EXPORT_SYMBOL(blk_start_queue); 244EXPORT_SYMBOL(blk_start_queue);
245 245
@@ -296,11 +296,9 @@ EXPORT_SYMBOL(blk_sync_queue);
296 * 296 *
297 * Description: 297 * Description:
298 * See @blk_run_queue. This variant must be called with the queue lock 298 * See @blk_run_queue. This variant must be called with the queue lock
299 * held and interrupts disabled. If force_kblockd is true, then it is 299 * held and interrupts disabled.
300 * safe to call this without holding the queue lock.
301 *
302 */ 300 */
303void __blk_run_queue(struct request_queue *q, bool force_kblockd) 301void __blk_run_queue(struct request_queue *q)
304{ 302{
305 if (unlikely(blk_queue_stopped(q))) 303 if (unlikely(blk_queue_stopped(q)))
306 return; 304 return;
@@ -309,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
309 * Only recurse once to avoid overrunning the stack, let the unplug 307 * Only recurse once to avoid overrunning the stack, let the unplug
310 * handling reinvoke the handler shortly if we already got there. 308 * handling reinvoke the handler shortly if we already got there.
311 */ 309 */
312 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
313 q->request_fn(q); 311 q->request_fn(q);
314 queue_flag_clear(QUEUE_FLAG_REENTER, q); 312 queue_flag_clear(QUEUE_FLAG_REENTER, q);
315 } else 313 } else
@@ -318,6 +316,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
318EXPORT_SYMBOL(__blk_run_queue); 316EXPORT_SYMBOL(__blk_run_queue);
319 317
320/** 318/**
319 * blk_run_queue_async - run a single device queue in workqueue context
320 * @q: The queue to run
321 *
322 * Description:
323 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
324 * of us.
325 */
326void blk_run_queue_async(struct request_queue *q)
327{
328 if (likely(!blk_queue_stopped(q)))
329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
330}
331
332/**
321 * blk_run_queue - run a single device queue 333 * blk_run_queue - run a single device queue
322 * @q: The queue to run 334 * @q: The queue to run
323 * 335 *
@@ -330,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
330 unsigned long flags; 342 unsigned long flags;
331 343
332 spin_lock_irqsave(q->queue_lock, flags); 344 spin_lock_irqsave(q->queue_lock, flags);
333 __blk_run_queue(q, false); 345 __blk_run_queue(q);
334 spin_unlock_irqrestore(q->queue_lock, flags); 346 spin_unlock_irqrestore(q->queue_lock, flags);
335} 347}
336EXPORT_SYMBOL(blk_run_queue); 348EXPORT_SYMBOL(blk_run_queue);
@@ -979,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
979 blk_queue_end_tag(q, rq); 991 blk_queue_end_tag(q, rq);
980 992
981 add_acct_request(q, rq, where); 993 add_acct_request(q, rq, where);
982 __blk_run_queue(q, false); 994 __blk_run_queue(q);
983 spin_unlock_irqrestore(q->queue_lock, flags); 995 spin_unlock_irqrestore(q->queue_lock, flags);
984} 996}
985EXPORT_SYMBOL(blk_insert_request); 997EXPORT_SYMBOL(blk_insert_request);
@@ -1323,7 +1335,7 @@ get_rq:
1323 } else { 1335 } else {
1324 spin_lock_irq(q->queue_lock); 1336 spin_lock_irq(q->queue_lock);
1325 add_acct_request(q, req, where); 1337 add_acct_request(q, req, where);
1326 __blk_run_queue(q, false); 1338 __blk_run_queue(q);
1327out_unlock: 1339out_unlock:
1328 spin_unlock_irq(q->queue_lock); 1340 spin_unlock_irq(q->queue_lock);
1329 } 1341 }
@@ -2684,9 +2696,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
2684 */ 2696 */
2685 if (from_schedule) { 2697 if (from_schedule) {
2686 spin_unlock(q->queue_lock); 2698 spin_unlock(q->queue_lock);
2687 __blk_run_queue(q, true); 2699 blk_run_queue_async(q);
2688 } else { 2700 } else {
2689 __blk_run_queue(q, false); 2701 __blk_run_queue(q);
2690 spin_unlock(q->queue_lock); 2702 spin_unlock(q->queue_lock);
2691 } 2703 }
2692 2704