aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c36
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/elevator.c4
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--include/linux/blkdev.h2
9 files changed, 36 insertions, 23 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index e2bacfa46cc3..5fa3dd2705c6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -204,7 +204,7 @@ static void blk_delay_work(struct work_struct *work)
204 204
205 q = container_of(work, struct request_queue, delay_work.work); 205 q = container_of(work, struct request_queue, delay_work.work);
206 spin_lock_irq(q->queue_lock); 206 spin_lock_irq(q->queue_lock);
207 __blk_run_queue(q, false); 207 __blk_run_queue(q);
208 spin_unlock_irq(q->queue_lock); 208 spin_unlock_irq(q->queue_lock);
209} 209}
210 210
@@ -239,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
239 WARN_ON(!irqs_disabled()); 239 WARN_ON(!irqs_disabled());
240 240
241 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
242 __blk_run_queue(q, false); 242 __blk_run_queue(q);
243} 243}
244EXPORT_SYMBOL(blk_start_queue); 244EXPORT_SYMBOL(blk_start_queue);
245 245
@@ -296,11 +296,9 @@ EXPORT_SYMBOL(blk_sync_queue);
296 * 296 *
297 * Description: 297 * Description:
298 * See @blk_run_queue. This variant must be called with the queue lock 298 * See @blk_run_queue. This variant must be called with the queue lock
299 * held and interrupts disabled. If force_kblockd is true, then it is 299 * held and interrupts disabled.
300 * safe to call this without holding the queue lock.
301 *
302 */ 300 */
303void __blk_run_queue(struct request_queue *q, bool force_kblockd) 301void __blk_run_queue(struct request_queue *q)
304{ 302{
305 if (unlikely(blk_queue_stopped(q))) 303 if (unlikely(blk_queue_stopped(q)))
306 return; 304 return;
@@ -309,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
309 * Only recurse once to avoid overrunning the stack, let the unplug 307 * Only recurse once to avoid overrunning the stack, let the unplug
310 * handling reinvoke the handler shortly if we already got there. 308 * handling reinvoke the handler shortly if we already got there.
311 */ 309 */
312 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
313 q->request_fn(q); 311 q->request_fn(q);
314 queue_flag_clear(QUEUE_FLAG_REENTER, q); 312 queue_flag_clear(QUEUE_FLAG_REENTER, q);
315 } else 313 } else
@@ -318,6 +316,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
318EXPORT_SYMBOL(__blk_run_queue); 316EXPORT_SYMBOL(__blk_run_queue);
319 317
320/** 318/**
319 * blk_run_queue_async - run a single device queue in workqueue context
320 * @q: The queue to run
321 *
322 * Description:
323 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
324 * of us.
325 */
326void blk_run_queue_async(struct request_queue *q)
327{
328 if (likely(!blk_queue_stopped(q)))
329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
330}
331
332/**
321 * blk_run_queue - run a single device queue 333 * blk_run_queue - run a single device queue
322 * @q: The queue to run 334 * @q: The queue to run
323 * 335 *
@@ -330,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
330 unsigned long flags; 342 unsigned long flags;
331 343
332 spin_lock_irqsave(q->queue_lock, flags); 344 spin_lock_irqsave(q->queue_lock, flags);
333 __blk_run_queue(q, false); 345 __blk_run_queue(q);
334 spin_unlock_irqrestore(q->queue_lock, flags); 346 spin_unlock_irqrestore(q->queue_lock, flags);
335} 347}
336EXPORT_SYMBOL(blk_run_queue); 348EXPORT_SYMBOL(blk_run_queue);
@@ -979,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
979 blk_queue_end_tag(q, rq); 991 blk_queue_end_tag(q, rq);
980 992
981 add_acct_request(q, rq, where); 993 add_acct_request(q, rq, where);
982 __blk_run_queue(q, false); 994 __blk_run_queue(q);
983 spin_unlock_irqrestore(q->queue_lock, flags); 995 spin_unlock_irqrestore(q->queue_lock, flags);
984} 996}
985EXPORT_SYMBOL(blk_insert_request); 997EXPORT_SYMBOL(blk_insert_request);
@@ -1323,7 +1335,7 @@ get_rq:
1323 } else { 1335 } else {
1324 spin_lock_irq(q->queue_lock); 1336 spin_lock_irq(q->queue_lock);
1325 add_acct_request(q, req, where); 1337 add_acct_request(q, req, where);
1326 __blk_run_queue(q, false); 1338 __blk_run_queue(q);
1327out_unlock: 1339out_unlock:
1328 spin_unlock_irq(q->queue_lock); 1340 spin_unlock_irq(q->queue_lock);
1329 } 1341 }
@@ -2684,9 +2696,9 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
2684 */ 2696 */
2685 if (from_schedule) { 2697 if (from_schedule) {
2686 spin_unlock(q->queue_lock); 2698 spin_unlock(q->queue_lock);
2687 __blk_run_queue(q, true); 2699 blk_run_queue_async(q);
2688 } else { 2700 } else {
2689 __blk_run_queue(q, false); 2701 __blk_run_queue(q);
2690 spin_unlock(q->queue_lock); 2702 spin_unlock(q->queue_lock);
2691 } 2703 }
2692 2704
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 7482b7fa863b..81e31819a597 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
55 WARN_ON(irqs_disabled()); 55 WARN_ON(irqs_disabled());
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 __elv_add_request(q, rq, where); 57 __elv_add_request(q, rq, where);
58 __blk_run_queue(q, false); 58 __blk_run_queue(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be plugged+unplugged */
60 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index eba4a2790c6c..6c9b5e189e62 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
218 * request_fn may confuse the driver. Always use kblockd. 218 * request_fn may confuse the driver. Always use kblockd.
219 */ 219 */
220 if (queued) 220 if (queued)
221 __blk_run_queue(q, true); 221 blk_run_queue_async(q);
222} 222}
223 223
224/** 224/**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
274 * the comment in flush_end_io(). 274 * the comment in flush_end_io().
275 */ 275 */
276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
277 __blk_run_queue(q, true); 277 blk_run_queue_async(q);
278} 278}
279 279
280/** 280/**
diff --git a/block/blk.h b/block/blk.h
index 61263463e38e..c9df8fc3c999 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
22void blk_delete_timer(struct request *); 22void blk_delete_timer(struct request *);
23void blk_add_timer(struct request *); 23void blk_add_timer(struct request *);
24void __generic_unplug_device(struct request_queue *); 24void __generic_unplug_device(struct request_queue *);
25void blk_run_queue_async(struct request_queue *q);
25 26
26/* 27/*
27 * Internal atomic flags for request handling 28 * Internal atomic flags for request handling
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3be881ec95ad..46b0a1d1d925 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3368 cfqd->busy_queues > 1) { 3368 cfqd->busy_queues > 1) {
3369 cfq_del_timer(cfqd, cfqq); 3369 cfq_del_timer(cfqd, cfqq);
3370 cfq_clear_cfqq_wait_request(cfqq); 3370 cfq_clear_cfqq_wait_request(cfqq);
3371 __blk_run_queue(cfqd->queue, false); 3371 __blk_run_queue(cfqd->queue);
3372 } else { 3372 } else {
3373 cfq_blkiocg_update_idle_time_stats( 3373 cfq_blkiocg_update_idle_time_stats(
3374 &cfqq->cfqg->blkg); 3374 &cfqq->cfqg->blkg);
@@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3383 * this new queue is RT and the current one is BE 3383 * this new queue is RT and the current one is BE
3384 */ 3384 */
3385 cfq_preempt_queue(cfqd, cfqq); 3385 cfq_preempt_queue(cfqd, cfqq);
3386 __blk_run_queue(cfqd->queue, false); 3386 __blk_run_queue(cfqd->queue);
3387 } 3387 }
3388} 3388}
3389 3389
@@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work)
3743 struct request_queue *q = cfqd->queue; 3743 struct request_queue *q = cfqd->queue;
3744 3744
3745 spin_lock_irq(q->queue_lock); 3745 spin_lock_irq(q->queue_lock);
3746 __blk_run_queue(cfqd->queue, false); 3746 __blk_run_queue(cfqd->queue);
3747 spin_unlock_irq(q->queue_lock); 3747 spin_unlock_irq(q->queue_lock);
3748} 3748}
3749 3749
diff --git a/block/elevator.c b/block/elevator.c
index 0cdb4e7ebab4..6f6abc08bb56 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
642 */ 642 */
643 elv_drain_elevator(q); 643 elv_drain_elevator(q);
644 while (q->rq.elvpriv) { 644 while (q->rq.elvpriv) {
645 __blk_run_queue(q, false); 645 __blk_run_queue(q);
646 spin_unlock_irq(q->queue_lock); 646 spin_unlock_irq(q->queue_lock);
647 msleep(10); 647 msleep(10);
648 spin_lock_irq(q->queue_lock); 648 spin_lock_irq(q->queue_lock);
@@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
695 * with anything. There's no point in delaying queue 695 * with anything. There's no point in delaying queue
696 * processing. 696 * processing.
697 */ 697 */
698 __blk_run_queue(q, false); 698 __blk_run_queue(q);
699 break; 699 break;
700 700
701 case ELEVATOR_INSERT_SORT_MERGE: 701 case ELEVATOR_INSERT_SORT_MERGE:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 6d5c7ff43f5b..ab55c2fa7ce2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
443 &sdev->request_queue->queue_flags); 443 &sdev->request_queue->queue_flags);
444 if (flagset) 444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue, false); 446 __blk_run_queue(sdev->request_queue);
447 if (flagset) 447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock); 449 spin_unlock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index fdf3fa639056..28c33506e4ad 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3830 if (flagset) 3830 if (flagset)
3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3832 __blk_run_queue(rport->rqst_q, false); 3832 __blk_run_queue(rport->rqst_q);
3833 if (flagset) 3833 if (flagset)
3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); 3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3448d89297e8..cbbfd98ad4a3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -697,7 +697,7 @@ extern void blk_start_queue(struct request_queue *q);
697extern void blk_stop_queue(struct request_queue *q); 697extern void blk_stop_queue(struct request_queue *q);
698extern void blk_sync_queue(struct request_queue *q); 698extern void blk_sync_queue(struct request_queue *q);
699extern void __blk_stop_queue(struct request_queue *q); 699extern void __blk_stop_queue(struct request_queue *q);
700extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); 700extern void __blk_run_queue(struct request_queue *q);
701extern void blk_run_queue(struct request_queue *); 701extern void blk_run_queue(struct request_queue *);
702extern int blk_rq_map_user(struct request_queue *, struct request *, 702extern int blk_rq_map_user(struct request_queue *, struct request *,
703 struct rq_map_data *, void __user *, unsigned long, 703 struct rq_map_data *, void __user *, unsigned long,