aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c169
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/elevator.c4
7 files changed, 124 insertions, 65 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 90f22cc30799..5fa3dd2705c6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
198} 198}
199EXPORT_SYMBOL(blk_dump_rq_flags); 199EXPORT_SYMBOL(blk_dump_rq_flags);
200 200
201/*
202 * Make sure that plugs that were pending when this function was entered,
203 * are now complete and requests pushed to the queue.
204*/
205static inline void queue_sync_plugs(struct request_queue *q)
206{
207 /*
208 * If the current process is plugged and has barriers submitted,
209 * we will livelock if we don't unplug first.
210 */
211 blk_flush_plug(current);
212}
213
214static void blk_delay_work(struct work_struct *work) 201static void blk_delay_work(struct work_struct *work)
215{ 202{
216 struct request_queue *q; 203 struct request_queue *q;
217 204
218 q = container_of(work, struct request_queue, delay_work.work); 205 q = container_of(work, struct request_queue, delay_work.work);
219 spin_lock_irq(q->queue_lock); 206 spin_lock_irq(q->queue_lock);
220 __blk_run_queue(q, false); 207 __blk_run_queue(q);
221 spin_unlock_irq(q->queue_lock); 208 spin_unlock_irq(q->queue_lock);
222} 209}
223 210
@@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
233 */ 220 */
234void blk_delay_queue(struct request_queue *q, unsigned long msecs) 221void blk_delay_queue(struct request_queue *q, unsigned long msecs)
235{ 222{
236 schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); 223 queue_delayed_work(kblockd_workqueue, &q->delay_work,
224 msecs_to_jiffies(msecs));
237} 225}
238EXPORT_SYMBOL(blk_delay_queue); 226EXPORT_SYMBOL(blk_delay_queue);
239 227
@@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
251 WARN_ON(!irqs_disabled()); 239 WARN_ON(!irqs_disabled());
252 240
253 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
254 __blk_run_queue(q, false); 242 __blk_run_queue(q);
255} 243}
256EXPORT_SYMBOL(blk_start_queue); 244EXPORT_SYMBOL(blk_start_queue);
257 245
@@ -298,7 +286,6 @@ void blk_sync_queue(struct request_queue *q)
298{ 286{
299 del_timer_sync(&q->timeout); 287 del_timer_sync(&q->timeout);
300 cancel_delayed_work_sync(&q->delay_work); 288 cancel_delayed_work_sync(&q->delay_work);
301 queue_sync_plugs(q);
302} 289}
303EXPORT_SYMBOL(blk_sync_queue); 290EXPORT_SYMBOL(blk_sync_queue);
304 291
@@ -310,9 +297,8 @@ EXPORT_SYMBOL(blk_sync_queue);
310 * Description: 297 * Description:
311 * See @blk_run_queue. This variant must be called with the queue lock 298 * See @blk_run_queue. This variant must be called with the queue lock
312 * held and interrupts disabled. 299 * held and interrupts disabled.
313 *
314 */ 300 */
315void __blk_run_queue(struct request_queue *q, bool force_kblockd) 301void __blk_run_queue(struct request_queue *q)
316{ 302{
317 if (unlikely(blk_queue_stopped(q))) 303 if (unlikely(blk_queue_stopped(q)))
318 return; 304 return;
@@ -321,7 +307,7 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
321 * Only recurse once to avoid overrunning the stack, let the unplug 307 * Only recurse once to avoid overrunning the stack, let the unplug
322 * handling reinvoke the handler shortly if we already got there. 308 * handling reinvoke the handler shortly if we already got there.
323 */ 309 */
324 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
325 q->request_fn(q); 311 q->request_fn(q);
326 queue_flag_clear(QUEUE_FLAG_REENTER, q); 312 queue_flag_clear(QUEUE_FLAG_REENTER, q);
327 } else 313 } else
@@ -330,6 +316,20 @@ void __blk_run_queue(struct request_queue *q, bool force_kblockd)
330EXPORT_SYMBOL(__blk_run_queue); 316EXPORT_SYMBOL(__blk_run_queue);
331 317
332/** 318/**
319 * blk_run_queue_async - run a single device queue in workqueue context
320 * @q: The queue to run
321 *
322 * Description:
323 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
324 * of us.
325 */
326void blk_run_queue_async(struct request_queue *q)
327{
328 if (likely(!blk_queue_stopped(q)))
329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
330}
331
332/**
333 * blk_run_queue - run a single device queue 333 * blk_run_queue - run a single device queue
334 * @q: The queue to run 334 * @q: The queue to run
335 * 335 *
@@ -342,7 +342,7 @@ void blk_run_queue(struct request_queue *q)
342 unsigned long flags; 342 unsigned long flags;
343 343
344 spin_lock_irqsave(q->queue_lock, flags); 344 spin_lock_irqsave(q->queue_lock, flags);
345 __blk_run_queue(q, false); 345 __blk_run_queue(q);
346 spin_unlock_irqrestore(q->queue_lock, flags); 346 spin_unlock_irqrestore(q->queue_lock, flags);
347} 347}
348EXPORT_SYMBOL(blk_run_queue); 348EXPORT_SYMBOL(blk_run_queue);
@@ -991,7 +991,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
991 blk_queue_end_tag(q, rq); 991 blk_queue_end_tag(q, rq);
992 992
993 add_acct_request(q, rq, where); 993 add_acct_request(q, rq, where);
994 __blk_run_queue(q, false); 994 __blk_run_queue(q);
995 spin_unlock_irqrestore(q->queue_lock, flags); 995 spin_unlock_irqrestore(q->queue_lock, flags);
996} 996}
997EXPORT_SYMBOL(blk_insert_request); 997EXPORT_SYMBOL(blk_insert_request);
@@ -1311,7 +1311,15 @@ get_rq:
1311 1311
1312 plug = current->plug; 1312 plug = current->plug;
1313 if (plug) { 1313 if (plug) {
1314 if (!plug->should_sort && !list_empty(&plug->list)) { 1314 /*
1315 * If this is the first request added after a plug, fire
1316 * of a plug trace. If others have been added before, check
1317 * if we have multiple devices in this plug. If so, make a
1318 * note to sort the list before dispatch.
1319 */
1320 if (list_empty(&plug->list))
1321 trace_block_plug(q);
1322 else if (!plug->should_sort) {
1315 struct request *__rq; 1323 struct request *__rq;
1316 1324
1317 __rq = list_entry_rq(plug->list.prev); 1325 __rq = list_entry_rq(plug->list.prev);
@@ -1327,7 +1335,7 @@ get_rq:
1327 } else { 1335 } else {
1328 spin_lock_irq(q->queue_lock); 1336 spin_lock_irq(q->queue_lock);
1329 add_acct_request(q, req, where); 1337 add_acct_request(q, req, where);
1330 __blk_run_queue(q, false); 1338 __blk_run_queue(q);
1331out_unlock: 1339out_unlock:
1332 spin_unlock_irq(q->queue_lock); 1340 spin_unlock_irq(q->queue_lock);
1333 } 1341 }
@@ -2644,6 +2652,7 @@ void blk_start_plug(struct blk_plug *plug)
2644 2652
2645 plug->magic = PLUG_MAGIC; 2653 plug->magic = PLUG_MAGIC;
2646 INIT_LIST_HEAD(&plug->list); 2654 INIT_LIST_HEAD(&plug->list);
2655 INIT_LIST_HEAD(&plug->cb_list);
2647 plug->should_sort = 0; 2656 plug->should_sort = 0;
2648 2657
2649 /* 2658 /*
@@ -2668,33 +2677,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2668 return !(rqa->q <= rqb->q); 2677 return !(rqa->q <= rqb->q);
2669} 2678}
2670 2679
2671static void flush_plug_list(struct blk_plug *plug) 2680/*
2681 * If 'from_schedule' is true, then postpone the dispatch of requests
2682 * until a safe kblockd context. We due this to avoid accidental big
2683 * additional stack usage in driver dispatch, in places where the originally
2684 * plugger did not intend it.
2685 */
2686static void queue_unplugged(struct request_queue *q, unsigned int depth,
2687 bool from_schedule)
2688 __releases(q->queue_lock)
2689{
2690 trace_block_unplug(q, depth, !from_schedule);
2691
2692 /*
2693 * If we are punting this to kblockd, then we can safely drop
2694 * the queue_lock before waking kblockd (which needs to take
2695 * this lock).
2696 */
2697 if (from_schedule) {
2698 spin_unlock(q->queue_lock);
2699 blk_run_queue_async(q);
2700 } else {
2701 __blk_run_queue(q);
2702 spin_unlock(q->queue_lock);
2703 }
2704
2705}
2706
2707static void flush_plug_callbacks(struct blk_plug *plug)
2708{
2709 LIST_HEAD(callbacks);
2710
2711 if (list_empty(&plug->cb_list))
2712 return;
2713
2714 list_splice_init(&plug->cb_list, &callbacks);
2715
2716 while (!list_empty(&callbacks)) {
2717 struct blk_plug_cb *cb = list_first_entry(&callbacks,
2718 struct blk_plug_cb,
2719 list);
2720 list_del(&cb->list);
2721 cb->callback(cb);
2722 }
2723}
2724
2725void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2672{ 2726{
2673 struct request_queue *q; 2727 struct request_queue *q;
2674 unsigned long flags; 2728 unsigned long flags;
2675 struct request *rq; 2729 struct request *rq;
2730 LIST_HEAD(list);
2731 unsigned int depth;
2676 2732
2677 BUG_ON(plug->magic != PLUG_MAGIC); 2733 BUG_ON(plug->magic != PLUG_MAGIC);
2678 2734
2735 flush_plug_callbacks(plug);
2679 if (list_empty(&plug->list)) 2736 if (list_empty(&plug->list))
2680 return; 2737 return;
2681 2738
2682 if (plug->should_sort) 2739 list_splice_init(&plug->list, &list);
2683 list_sort(NULL, &plug->list, plug_rq_cmp); 2740
2741 if (plug->should_sort) {
2742 list_sort(NULL, &list, plug_rq_cmp);
2743 plug->should_sort = 0;
2744 }
2684 2745
2685 q = NULL; 2746 q = NULL;
2747 depth = 0;
2748
2749 /*
2750 * Save and disable interrupts here, to avoid doing it for every
2751 * queue lock we have to take.
2752 */
2686 local_irq_save(flags); 2753 local_irq_save(flags);
2687 while (!list_empty(&plug->list)) { 2754 while (!list_empty(&list)) {
2688 rq = list_entry_rq(plug->list.next); 2755 rq = list_entry_rq(list.next);
2689 list_del_init(&rq->queuelist); 2756 list_del_init(&rq->queuelist);
2690 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); 2757 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2691 BUG_ON(!rq->q); 2758 BUG_ON(!rq->q);
2692 if (rq->q != q) { 2759 if (rq->q != q) {
2693 if (q) { 2760 /*
2694 __blk_run_queue(q, false); 2761 * This drops the queue lock
2695 spin_unlock(q->queue_lock); 2762 */
2696 } 2763 if (q)
2764 queue_unplugged(q, depth, from_schedule);
2697 q = rq->q; 2765 q = rq->q;
2766 depth = 0;
2698 spin_lock(q->queue_lock); 2767 spin_lock(q->queue_lock);
2699 } 2768 }
2700 rq->cmd_flags &= ~REQ_ON_PLUG; 2769 rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2706,38 +2775,28 @@ static void flush_plug_list(struct blk_plug *plug)
2706 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2775 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
2707 else 2776 else
2708 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2777 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
2709 }
2710 2778
2711 if (q) { 2779 depth++;
2712 __blk_run_queue(q, false);
2713 spin_unlock(q->queue_lock);
2714 } 2780 }
2715 2781
2716 BUG_ON(!list_empty(&plug->list)); 2782 /*
2717 local_irq_restore(flags); 2783 * This drops the queue lock
2718} 2784 */
2719 2785 if (q)
2720static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug) 2786 queue_unplugged(q, depth, from_schedule);
2721{
2722 flush_plug_list(plug);
2723 2787
2724 if (plug == tsk->plug) 2788 local_irq_restore(flags);
2725 tsk->plug = NULL;
2726} 2789}
2790EXPORT_SYMBOL(blk_flush_plug_list);
2727 2791
2728void blk_finish_plug(struct blk_plug *plug) 2792void blk_finish_plug(struct blk_plug *plug)
2729{ 2793{
2730 if (plug) 2794 blk_flush_plug_list(plug, false);
2731 __blk_finish_plug(current, plug);
2732}
2733EXPORT_SYMBOL(blk_finish_plug);
2734 2795
2735void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug) 2796 if (plug == current->plug)
2736{ 2797 current->plug = NULL;
2737 __blk_finish_plug(tsk, plug);
2738 tsk->plug = plug;
2739} 2798}
2740EXPORT_SYMBOL(__blk_flush_plug); 2799EXPORT_SYMBOL(blk_finish_plug);
2741 2800
2742int __init blk_dev_init(void) 2801int __init blk_dev_init(void)
2743{ 2802{
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 7482b7fa863b..81e31819a597 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
55 WARN_ON(irqs_disabled()); 55 WARN_ON(irqs_disabled());
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 __elv_add_request(q, rq, where); 57 __elv_add_request(q, rq, where);
58 __blk_run_queue(q, false); 58 __blk_run_queue(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be plugged+unplugged */
60 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index eba4a2790c6c..6c9b5e189e62 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
218 * request_fn may confuse the driver. Always use kblockd. 218 * request_fn may confuse the driver. Always use kblockd.
219 */ 219 */
220 if (queued) 220 if (queued)
221 __blk_run_queue(q, true); 221 blk_run_queue_async(q);
222} 222}
223 223
224/** 224/**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
274 * the comment in flush_end_io(). 274 * the comment in flush_end_io().
275 */ 275 */
276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
277 __blk_run_queue(q, true); 277 blk_run_queue_async(q);
278} 278}
279 279
280/** 280/**
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 261c75c665ae..6d735122bc59 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk)
498{ 498{
499 int ret; 499 int ret;
500 struct device *dev = disk_to_dev(disk); 500 struct device *dev = disk_to_dev(disk);
501
502 struct request_queue *q = disk->queue; 501 struct request_queue *q = disk->queue;
503 502
504 if (WARN_ON(!q)) 503 if (WARN_ON(!q))
@@ -521,7 +520,7 @@ int blk_register_queue(struct gendisk *disk)
521 if (ret) { 520 if (ret) {
522 kobject_uevent(&q->kobj, KOBJ_REMOVE); 521 kobject_uevent(&q->kobj, KOBJ_REMOVE);
523 kobject_del(&q->kobj); 522 kobject_del(&q->kobj);
524 blk_trace_remove_sysfs(disk_to_dev(disk)); 523 blk_trace_remove_sysfs(dev);
525 kobject_put(&dev->kobj); 524 kobject_put(&dev->kobj);
526 return ret; 525 return ret;
527 } 526 }
diff --git a/block/blk.h b/block/blk.h
index 61263463e38e..c9df8fc3c999 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -22,6 +22,7 @@ void blk_rq_timed_out_timer(unsigned long data);
22void blk_delete_timer(struct request *); 22void blk_delete_timer(struct request *);
23void blk_add_timer(struct request *); 23void blk_add_timer(struct request *);
24void __generic_unplug_device(struct request_queue *); 24void __generic_unplug_device(struct request_queue *);
25void blk_run_queue_async(struct request_queue *q);
25 26
26/* 27/*
27 * Internal atomic flags for request handling 28 * Internal atomic flags for request handling
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3be881ec95ad..46b0a1d1d925 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3368,7 +3368,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3368 cfqd->busy_queues > 1) { 3368 cfqd->busy_queues > 1) {
3369 cfq_del_timer(cfqd, cfqq); 3369 cfq_del_timer(cfqd, cfqq);
3370 cfq_clear_cfqq_wait_request(cfqq); 3370 cfq_clear_cfqq_wait_request(cfqq);
3371 __blk_run_queue(cfqd->queue, false); 3371 __blk_run_queue(cfqd->queue);
3372 } else { 3372 } else {
3373 cfq_blkiocg_update_idle_time_stats( 3373 cfq_blkiocg_update_idle_time_stats(
3374 &cfqq->cfqg->blkg); 3374 &cfqq->cfqg->blkg);
@@ -3383,7 +3383,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3383 * this new queue is RT and the current one is BE 3383 * this new queue is RT and the current one is BE
3384 */ 3384 */
3385 cfq_preempt_queue(cfqd, cfqq); 3385 cfq_preempt_queue(cfqd, cfqq);
3386 __blk_run_queue(cfqd->queue, false); 3386 __blk_run_queue(cfqd->queue);
3387 } 3387 }
3388} 3388}
3389 3389
@@ -3743,7 +3743,7 @@ static void cfq_kick_queue(struct work_struct *work)
3743 struct request_queue *q = cfqd->queue; 3743 struct request_queue *q = cfqd->queue;
3744 3744
3745 spin_lock_irq(q->queue_lock); 3745 spin_lock_irq(q->queue_lock);
3746 __blk_run_queue(cfqd->queue, false); 3746 __blk_run_queue(cfqd->queue);
3747 spin_unlock_irq(q->queue_lock); 3747 spin_unlock_irq(q->queue_lock);
3748} 3748}
3749 3749
diff --git a/block/elevator.c b/block/elevator.c
index 0cdb4e7ebab4..6f6abc08bb56 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
642 */ 642 */
643 elv_drain_elevator(q); 643 elv_drain_elevator(q);
644 while (q->rq.elvpriv) { 644 while (q->rq.elvpriv) {
645 __blk_run_queue(q, false); 645 __blk_run_queue(q);
646 spin_unlock_irq(q->queue_lock); 646 spin_unlock_irq(q->queue_lock);
647 msleep(10); 647 msleep(10);
648 spin_lock_irq(q->queue_lock); 648 spin_lock_irq(q->queue_lock);
@@ -695,7 +695,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
695 * with anything. There's no point in delaying queue 695 * with anything. There's no point in delaying queue
696 * processing. 696 * processing.
697 */ 697 */
698 __blk_run_queue(q, false); 698 __blk_run_queue(q);
699 break; 699 break;
700 700
701 case ELEVATOR_INSERT_SORT_MERGE: 701 case ELEVATOR_INSERT_SORT_MERGE: