aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2011-05-24 02:32:20 -0400
committerPaul Mundt <lethal@linux-sh.org>2011-05-24 02:32:20 -0400
commit9fb4c7fbbcb1e947567d13b82e429ae47a46e337 (patch)
tree6c5f11f347d0f58565381f92680a7a9cc63c0bd8 /block
parentdc3e5b6a6e842116ec2436161adf31877f09b6b9 (diff)
parentd762f4383100c2a87b1a3f2d678cd3b5425655b4 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-cgroup.h3
-rw-r--r--block/blk-core.c180
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-sysfs.c11
-rw-r--r--block/blk-throttle.c9
-rw-r--r--block/cfq-iosched.c37
-rw-r--r--block/elevator.c7
-rw-r--r--block/genhd.c8
10 files changed, 163 insertions, 105 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index f0605ab2a761..471fdcc5df85 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
114} 114}
115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); 115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
116 116
117struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
118{
119 return container_of(task_subsys_state(tsk, blkio_subsys_id),
120 struct blkio_cgroup, css);
121}
122EXPORT_SYMBOL_GPL(task_blkio_cgroup);
123
117static inline void 124static inline void
118blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) 125blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
119{ 126{
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 10919fae2d3a..c774930cc206 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
291#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) 291#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
292extern struct blkio_cgroup blkio_root_cgroup; 292extern struct blkio_cgroup blkio_root_cgroup;
293extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 293extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
294extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
294extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 295extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
295 struct blkio_group *blkg, void *key, dev_t dev, 296 struct blkio_group *blkg, void *key, dev_t dev,
296 enum blkio_policy_id plid); 297 enum blkio_policy_id plid);
@@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
314struct cgroup; 315struct cgroup;
315static inline struct blkio_cgroup * 316static inline struct blkio_cgroup *
316cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } 317cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
318static inline struct blkio_cgroup *
319task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
317 320
318static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 321static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
319 struct blkio_group *blkg, void *key, dev_t dev, 322 struct blkio_group *blkg, void *key, dev_t dev,
diff --git a/block/blk-core.c b/block/blk-core.c
index 90f22cc30799..3fe00a14822a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -198,26 +198,13 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
198} 198}
199EXPORT_SYMBOL(blk_dump_rq_flags); 199EXPORT_SYMBOL(blk_dump_rq_flags);
200 200
201/*
202 * Make sure that plugs that were pending when this function was entered,
203 * are now complete and requests pushed to the queue.
204*/
205static inline void queue_sync_plugs(struct request_queue *q)
206{
207 /*
208 * If the current process is plugged and has barriers submitted,
209 * we will livelock if we don't unplug first.
210 */
211 blk_flush_plug(current);
212}
213
214static void blk_delay_work(struct work_struct *work) 201static void blk_delay_work(struct work_struct *work)
215{ 202{
216 struct request_queue *q; 203 struct request_queue *q;
217 204
218 q = container_of(work, struct request_queue, delay_work.work); 205 q = container_of(work, struct request_queue, delay_work.work);
219 spin_lock_irq(q->queue_lock); 206 spin_lock_irq(q->queue_lock);
220 __blk_run_queue(q, false); 207 __blk_run_queue(q);
221 spin_unlock_irq(q->queue_lock); 208 spin_unlock_irq(q->queue_lock);
222} 209}
223 210
@@ -233,7 +220,8 @@ static void blk_delay_work(struct work_struct *work)
233 */ 220 */
234void blk_delay_queue(struct request_queue *q, unsigned long msecs) 221void blk_delay_queue(struct request_queue *q, unsigned long msecs)
235{ 222{
236 schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs)); 223 queue_delayed_work(kblockd_workqueue, &q->delay_work,
224 msecs_to_jiffies(msecs));
237} 225}
238EXPORT_SYMBOL(blk_delay_queue); 226EXPORT_SYMBOL(blk_delay_queue);
239 227
@@ -251,7 +239,7 @@ void blk_start_queue(struct request_queue *q)
251 WARN_ON(!irqs_disabled()); 239 WARN_ON(!irqs_disabled());
252 240
253 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 241 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
254 __blk_run_queue(q, false); 242 __blk_run_queue(q);
255} 243}
256EXPORT_SYMBOL(blk_start_queue); 244EXPORT_SYMBOL(blk_start_queue);
257 245
@@ -298,38 +286,44 @@ void blk_sync_queue(struct request_queue *q)
298{ 286{
299 del_timer_sync(&q->timeout); 287 del_timer_sync(&q->timeout);
300 cancel_delayed_work_sync(&q->delay_work); 288 cancel_delayed_work_sync(&q->delay_work);
301 queue_sync_plugs(q);
302} 289}
303EXPORT_SYMBOL(blk_sync_queue); 290EXPORT_SYMBOL(blk_sync_queue);
304 291
305/** 292/**
306 * __blk_run_queue - run a single device queue 293 * __blk_run_queue - run a single device queue
307 * @q: The queue to run 294 * @q: The queue to run
308 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
309 * 295 *
310 * Description: 296 * Description:
311 * See @blk_run_queue. This variant must be called with the queue lock 297 * See @blk_run_queue. This variant must be called with the queue lock
312 * held and interrupts disabled. 298 * held and interrupts disabled.
313 *
314 */ 299 */
315void __blk_run_queue(struct request_queue *q, bool force_kblockd) 300void __blk_run_queue(struct request_queue *q)
316{ 301{
317 if (unlikely(blk_queue_stopped(q))) 302 if (unlikely(blk_queue_stopped(q)))
318 return; 303 return;
319 304
320 /* 305 q->request_fn(q);
321 * Only recurse once to avoid overrunning the stack, let the unplug
322 * handling reinvoke the handler shortly if we already got there.
323 */
324 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
325 q->request_fn(q);
326 queue_flag_clear(QUEUE_FLAG_REENTER, q);
327 } else
328 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
329} 306}
330EXPORT_SYMBOL(__blk_run_queue); 307EXPORT_SYMBOL(__blk_run_queue);
331 308
332/** 309/**
310 * blk_run_queue_async - run a single device queue in workqueue context
311 * @q: The queue to run
312 *
313 * Description:
314 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
315 * of us.
316 */
317void blk_run_queue_async(struct request_queue *q)
318{
319 if (likely(!blk_queue_stopped(q))) {
320 __cancel_delayed_work(&q->delay_work);
321 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
322 }
323}
324EXPORT_SYMBOL(blk_run_queue_async);
325
326/**
333 * blk_run_queue - run a single device queue 327 * blk_run_queue - run a single device queue
334 * @q: The queue to run 328 * @q: The queue to run
335 * 329 *
@@ -342,7 +336,7 @@ void blk_run_queue(struct request_queue *q)
342 unsigned long flags; 336 unsigned long flags;
343 337
344 spin_lock_irqsave(q->queue_lock, flags); 338 spin_lock_irqsave(q->queue_lock, flags);
345 __blk_run_queue(q, false); 339 __blk_run_queue(q);
346 spin_unlock_irqrestore(q->queue_lock, flags); 340 spin_unlock_irqrestore(q->queue_lock, flags);
347} 341}
348EXPORT_SYMBOL(blk_run_queue); 342EXPORT_SYMBOL(blk_run_queue);
@@ -991,7 +985,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
991 blk_queue_end_tag(q, rq); 985 blk_queue_end_tag(q, rq);
992 986
993 add_acct_request(q, rq, where); 987 add_acct_request(q, rq, where);
994 __blk_run_queue(q, false); 988 __blk_run_queue(q);
995 spin_unlock_irqrestore(q->queue_lock, flags); 989 spin_unlock_irqrestore(q->queue_lock, flags);
996} 990}
997EXPORT_SYMBOL(blk_insert_request); 991EXPORT_SYMBOL(blk_insert_request);
@@ -1311,7 +1305,15 @@ get_rq:
1311 1305
1312 plug = current->plug; 1306 plug = current->plug;
1313 if (plug) { 1307 if (plug) {
1314 if (!plug->should_sort && !list_empty(&plug->list)) { 1308 /*
1309 * If this is the first request added after a plug, fire
1310 * of a plug trace. If others have been added before, check
1311 * if we have multiple devices in this plug. If so, make a
1312 * note to sort the list before dispatch.
1313 */
1314 if (list_empty(&plug->list))
1315 trace_block_plug(q);
1316 else if (!plug->should_sort) {
1315 struct request *__rq; 1317 struct request *__rq;
1316 1318
1317 __rq = list_entry_rq(plug->list.prev); 1319 __rq = list_entry_rq(plug->list.prev);
@@ -1327,7 +1329,7 @@ get_rq:
1327 } else { 1329 } else {
1328 spin_lock_irq(q->queue_lock); 1330 spin_lock_irq(q->queue_lock);
1329 add_acct_request(q, req, where); 1331 add_acct_request(q, req, where);
1330 __blk_run_queue(q, false); 1332 __blk_run_queue(q);
1331out_unlock: 1333out_unlock:
1332 spin_unlock_irq(q->queue_lock); 1334 spin_unlock_irq(q->queue_lock);
1333 } 1335 }
@@ -2644,6 +2646,7 @@ void blk_start_plug(struct blk_plug *plug)
2644 2646
2645 plug->magic = PLUG_MAGIC; 2647 plug->magic = PLUG_MAGIC;
2646 INIT_LIST_HEAD(&plug->list); 2648 INIT_LIST_HEAD(&plug->list);
2649 INIT_LIST_HEAD(&plug->cb_list);
2647 plug->should_sort = 0; 2650 plug->should_sort = 0;
2648 2651
2649 /* 2652 /*
@@ -2668,33 +2671,93 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2668 return !(rqa->q <= rqb->q); 2671 return !(rqa->q <= rqb->q);
2669} 2672}
2670 2673
2671static void flush_plug_list(struct blk_plug *plug) 2674/*
2675 * If 'from_schedule' is true, then postpone the dispatch of requests
2676 * until a safe kblockd context. We due this to avoid accidental big
2677 * additional stack usage in driver dispatch, in places where the originally
2678 * plugger did not intend it.
2679 */
2680static void queue_unplugged(struct request_queue *q, unsigned int depth,
2681 bool from_schedule)
2682 __releases(q->queue_lock)
2683{
2684 trace_block_unplug(q, depth, !from_schedule);
2685
2686 /*
2687 * If we are punting this to kblockd, then we can safely drop
2688 * the queue_lock before waking kblockd (which needs to take
2689 * this lock).
2690 */
2691 if (from_schedule) {
2692 spin_unlock(q->queue_lock);
2693 blk_run_queue_async(q);
2694 } else {
2695 __blk_run_queue(q);
2696 spin_unlock(q->queue_lock);
2697 }
2698
2699}
2700
2701static void flush_plug_callbacks(struct blk_plug *plug)
2702{
2703 LIST_HEAD(callbacks);
2704
2705 if (list_empty(&plug->cb_list))
2706 return;
2707
2708 list_splice_init(&plug->cb_list, &callbacks);
2709
2710 while (!list_empty(&callbacks)) {
2711 struct blk_plug_cb *cb = list_first_entry(&callbacks,
2712 struct blk_plug_cb,
2713 list);
2714 list_del(&cb->list);
2715 cb->callback(cb);
2716 }
2717}
2718
2719void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2672{ 2720{
2673 struct request_queue *q; 2721 struct request_queue *q;
2674 unsigned long flags; 2722 unsigned long flags;
2675 struct request *rq; 2723 struct request *rq;
2724 LIST_HEAD(list);
2725 unsigned int depth;
2676 2726
2677 BUG_ON(plug->magic != PLUG_MAGIC); 2727 BUG_ON(plug->magic != PLUG_MAGIC);
2678 2728
2729 flush_plug_callbacks(plug);
2679 if (list_empty(&plug->list)) 2730 if (list_empty(&plug->list))
2680 return; 2731 return;
2681 2732
2682 if (plug->should_sort) 2733 list_splice_init(&plug->list, &list);
2683 list_sort(NULL, &plug->list, plug_rq_cmp); 2734
2735 if (plug->should_sort) {
2736 list_sort(NULL, &list, plug_rq_cmp);
2737 plug->should_sort = 0;
2738 }
2684 2739
2685 q = NULL; 2740 q = NULL;
2741 depth = 0;
2742
2743 /*
2744 * Save and disable interrupts here, to avoid doing it for every
2745 * queue lock we have to take.
2746 */
2686 local_irq_save(flags); 2747 local_irq_save(flags);
2687 while (!list_empty(&plug->list)) { 2748 while (!list_empty(&list)) {
2688 rq = list_entry_rq(plug->list.next); 2749 rq = list_entry_rq(list.next);
2689 list_del_init(&rq->queuelist); 2750 list_del_init(&rq->queuelist);
2690 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG)); 2751 BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
2691 BUG_ON(!rq->q); 2752 BUG_ON(!rq->q);
2692 if (rq->q != q) { 2753 if (rq->q != q) {
2693 if (q) { 2754 /*
2694 __blk_run_queue(q, false); 2755 * This drops the queue lock
2695 spin_unlock(q->queue_lock); 2756 */
2696 } 2757 if (q)
2758 queue_unplugged(q, depth, from_schedule);
2697 q = rq->q; 2759 q = rq->q;
2760 depth = 0;
2698 spin_lock(q->queue_lock); 2761 spin_lock(q->queue_lock);
2699 } 2762 }
2700 rq->cmd_flags &= ~REQ_ON_PLUG; 2763 rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2706,38 +2769,27 @@ static void flush_plug_list(struct blk_plug *plug)
2706 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 2769 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
2707 else 2770 else
2708 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 2771 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
2709 }
2710 2772
2711 if (q) { 2773 depth++;
2712 __blk_run_queue(q, false);
2713 spin_unlock(q->queue_lock);
2714 } 2774 }
2715 2775
2716 BUG_ON(!list_empty(&plug->list)); 2776 /*
2717 local_irq_restore(flags); 2777 * This drops the queue lock
2718} 2778 */
2719 2779 if (q)
2720static void __blk_finish_plug(struct task_struct *tsk, struct blk_plug *plug) 2780 queue_unplugged(q, depth, from_schedule);
2721{
2722 flush_plug_list(plug);
2723 2781
2724 if (plug == tsk->plug) 2782 local_irq_restore(flags);
2725 tsk->plug = NULL;
2726} 2783}
2727 2784
2728void blk_finish_plug(struct blk_plug *plug) 2785void blk_finish_plug(struct blk_plug *plug)
2729{ 2786{
2730 if (plug) 2787 blk_flush_plug_list(plug, false);
2731 __blk_finish_plug(current, plug);
2732}
2733EXPORT_SYMBOL(blk_finish_plug);
2734 2788
2735void __blk_flush_plug(struct task_struct *tsk, struct blk_plug *plug) 2789 if (plug == current->plug)
2736{ 2790 current->plug = NULL;
2737 __blk_finish_plug(tsk, plug);
2738 tsk->plug = plug;
2739} 2791}
2740EXPORT_SYMBOL(__blk_flush_plug); 2792EXPORT_SYMBOL(blk_finish_plug);
2741 2793
2742int __init blk_dev_init(void) 2794int __init blk_dev_init(void)
2743{ 2795{
diff --git a/block/blk-exec.c b/block/blk-exec.c
index 7482b7fa863b..81e31819a597 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,7 +55,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
55 WARN_ON(irqs_disabled()); 55 WARN_ON(irqs_disabled());
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 __elv_add_request(q, rq, where); 57 __elv_add_request(q, rq, where);
58 __blk_run_queue(q, false); 58 __blk_run_queue(q);
59 /* the queue is stopped so it won't be plugged+unplugged */ 59 /* the queue is stopped so it won't be plugged+unplugged */
60 if (rq->cmd_type == REQ_TYPE_PM_RESUME) 60 if (rq->cmd_type == REQ_TYPE_PM_RESUME)
61 q->request_fn(q); 61 q->request_fn(q);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index eba4a2790c6c..6c9b5e189e62 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -218,7 +218,7 @@ static void flush_end_io(struct request *flush_rq, int error)
218 * request_fn may confuse the driver. Always use kblockd. 218 * request_fn may confuse the driver. Always use kblockd.
219 */ 219 */
220 if (queued) 220 if (queued)
221 __blk_run_queue(q, true); 221 blk_run_queue_async(q);
222} 222}
223 223
224/** 224/**
@@ -274,7 +274,7 @@ static void flush_data_end_io(struct request *rq, int error)
274 * the comment in flush_end_io(). 274 * the comment in flush_end_io().
275 */ 275 */
276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) 276 if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error))
277 __blk_run_queue(q, true); 277 blk_run_queue_async(q);
278} 278}
279 279
280/** 280/**
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 261c75c665ae..bd236313f35d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
66 66
67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
68 blk_set_queue_full(q, BLK_RW_SYNC); 68 blk_set_queue_full(q, BLK_RW_SYNC);
69 } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { 69 } else {
70 blk_clear_queue_full(q, BLK_RW_SYNC); 70 blk_clear_queue_full(q, BLK_RW_SYNC);
71 wake_up(&rl->wait[BLK_RW_SYNC]); 71 wake_up(&rl->wait[BLK_RW_SYNC]);
72 } 72 }
73 73
74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
75 blk_set_queue_full(q, BLK_RW_ASYNC); 75 blk_set_queue_full(q, BLK_RW_ASYNC);
76 } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { 76 } else {
77 blk_clear_queue_full(q, BLK_RW_ASYNC); 77 blk_clear_queue_full(q, BLK_RW_ASYNC);
78 wake_up(&rl->wait[BLK_RW_ASYNC]); 78 wake_up(&rl->wait[BLK_RW_ASYNC]);
79 } 79 }
@@ -498,7 +498,6 @@ int blk_register_queue(struct gendisk *disk)
498{ 498{
499 int ret; 499 int ret;
500 struct device *dev = disk_to_dev(disk); 500 struct device *dev = disk_to_dev(disk);
501
502 struct request_queue *q = disk->queue; 501 struct request_queue *q = disk->queue;
503 502
504 if (WARN_ON(!q)) 503 if (WARN_ON(!q))
@@ -509,8 +508,10 @@ int blk_register_queue(struct gendisk *disk)
509 return ret; 508 return ret;
510 509
511 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 510 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
512 if (ret < 0) 511 if (ret < 0) {
512 blk_trace_remove_sysfs(dev);
513 return ret; 513 return ret;
514 }
514 515
515 kobject_uevent(&q->kobj, KOBJ_ADD); 516 kobject_uevent(&q->kobj, KOBJ_ADD);
516 517
@@ -521,7 +522,7 @@ int blk_register_queue(struct gendisk *disk)
521 if (ret) { 522 if (ret) {
522 kobject_uevent(&q->kobj, KOBJ_REMOVE); 523 kobject_uevent(&q->kobj, KOBJ_REMOVE);
523 kobject_del(&q->kobj); 524 kobject_del(&q->kobj);
524 blk_trace_remove_sysfs(disk_to_dev(disk)); 525 blk_trace_remove_sysfs(dev);
525 kobject_put(&dev->kobj); 526 kobject_put(&dev->kobj);
526 return ret; 527 return ret;
527 } 528 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0475a22a420d..252a81a306f7 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg)
160} 160}
161 161
162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
163 struct cgroup *cgroup) 163 struct blkio_cgroup *blkcg)
164{ 164{
165 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
166 struct throtl_grp *tg = NULL; 165 struct throtl_grp *tg = NULL;
167 void *key = td; 166 void *key = td;
168 struct backing_dev_info *bdi = &td->queue->backing_dev_info; 167 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
@@ -229,12 +228,12 @@ done:
229 228
230static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 229static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
231{ 230{
232 struct cgroup *cgroup;
233 struct throtl_grp *tg = NULL; 231 struct throtl_grp *tg = NULL;
232 struct blkio_cgroup *blkcg;
234 233
235 rcu_read_lock(); 234 rcu_read_lock();
236 cgroup = task_cgroup(current, blkio_subsys_id); 235 blkcg = task_blkio_cgroup(current);
237 tg = throtl_find_alloc_tg(td, cgroup); 236 tg = throtl_find_alloc_tg(td, blkcg);
238 if (!tg) 237 if (!tg)
239 tg = &td->root_tg; 238 tg = &td->root_tg;
240 rcu_read_unlock(); 239 rcu_read_unlock();
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 3be881ec95ad..ab7a9e6a9b1c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1014 cfqg->needs_update = true; 1014 cfqg->needs_update = true;
1015} 1015}
1016 1016
1017static struct cfq_group * 1017static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
1018cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) 1018 struct blkio_cgroup *blkcg, int create)
1019{ 1019{
1020 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1021 struct cfq_group *cfqg = NULL; 1020 struct cfq_group *cfqg = NULL;
1022 void *key = cfqd; 1021 void *key = cfqd;
1023 int i, j; 1022 int i, j;
@@ -1079,12 +1078,12 @@ done:
1079 */ 1078 */
1080static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) 1079static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1081{ 1080{
1082 struct cgroup *cgroup; 1081 struct blkio_cgroup *blkcg;
1083 struct cfq_group *cfqg = NULL; 1082 struct cfq_group *cfqg = NULL;
1084 1083
1085 rcu_read_lock(); 1084 rcu_read_lock();
1086 cgroup = task_cgroup(current, blkio_subsys_id); 1085 blkcg = task_blkio_cgroup(current);
1087 cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); 1086 cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create);
1088 if (!cfqg && create) 1087 if (!cfqg && create)
1089 cfqg = &cfqd->root_group; 1088 cfqg = &cfqd->root_group;
1090 rcu_read_unlock(); 1089 rcu_read_unlock();
@@ -2582,28 +2581,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2582} 2581}
2583 2582
2584/* 2583/*
2585 * Must always be called with the rcu_read_lock() held 2584 * Call func for each cic attached to this ioc.
2586 */ 2585 */
2587static void 2586static void
2588__call_for_each_cic(struct io_context *ioc, 2587call_for_each_cic(struct io_context *ioc,
2589 void (*func)(struct io_context *, struct cfq_io_context *)) 2588 void (*func)(struct io_context *, struct cfq_io_context *))
2590{ 2589{
2591 struct cfq_io_context *cic; 2590 struct cfq_io_context *cic;
2592 struct hlist_node *n; 2591 struct hlist_node *n;
2593 2592
2593 rcu_read_lock();
2594
2594 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 2595 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2595 func(ioc, cic); 2596 func(ioc, cic);
2596}
2597 2597
2598/*
2599 * Call func for each cic attached to this ioc.
2600 */
2601static void
2602call_for_each_cic(struct io_context *ioc,
2603 void (*func)(struct io_context *, struct cfq_io_context *))
2604{
2605 rcu_read_lock();
2606 __call_for_each_cic(ioc, func);
2607 rcu_read_unlock(); 2598 rcu_read_unlock();
2608} 2599}
2609 2600
@@ -2664,7 +2655,7 @@ static void cfq_free_io_context(struct io_context *ioc)
2664 * should be ok to iterate over the known list, we will see all cic's 2655 * should be ok to iterate over the known list, we will see all cic's
2665 * since no new ones are added. 2656 * since no new ones are added.
2666 */ 2657 */
2667 __call_for_each_cic(ioc, cic_free_func); 2658 call_for_each_cic(ioc, cic_free_func);
2668} 2659}
2669 2660
2670static void cfq_put_cooperator(struct cfq_queue *cfqq) 2661static void cfq_put_cooperator(struct cfq_queue *cfqq)
@@ -3368,7 +3359,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3368 cfqd->busy_queues > 1) { 3359 cfqd->busy_queues > 1) {
3369 cfq_del_timer(cfqd, cfqq); 3360 cfq_del_timer(cfqd, cfqq);
3370 cfq_clear_cfqq_wait_request(cfqq); 3361 cfq_clear_cfqq_wait_request(cfqq);
3371 __blk_run_queue(cfqd->queue, false); 3362 __blk_run_queue(cfqd->queue);
3372 } else { 3363 } else {
3373 cfq_blkiocg_update_idle_time_stats( 3364 cfq_blkiocg_update_idle_time_stats(
3374 &cfqq->cfqg->blkg); 3365 &cfqq->cfqg->blkg);
@@ -3383,7 +3374,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3383 * this new queue is RT and the current one is BE 3374 * this new queue is RT and the current one is BE
3384 */ 3375 */
3385 cfq_preempt_queue(cfqd, cfqq); 3376 cfq_preempt_queue(cfqd, cfqq);
3386 __blk_run_queue(cfqd->queue, false); 3377 __blk_run_queue(cfqd->queue);
3387 } 3378 }
3388} 3379}
3389 3380
@@ -3743,7 +3734,7 @@ static void cfq_kick_queue(struct work_struct *work)
3743 struct request_queue *q = cfqd->queue; 3734 struct request_queue *q = cfqd->queue;
3744 3735
3745 spin_lock_irq(q->queue_lock); 3736 spin_lock_irq(q->queue_lock);
3746 __blk_run_queue(cfqd->queue, false); 3737 __blk_run_queue(cfqd->queue);
3747 spin_unlock_irq(q->queue_lock); 3738 spin_unlock_irq(q->queue_lock);
3748} 3739}
3749 3740
diff --git a/block/elevator.c b/block/elevator.c
index 0cdb4e7ebab4..45ca1e34f582 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -642,7 +642,7 @@ void elv_quiesce_start(struct request_queue *q)
642 */ 642 */
643 elv_drain_elevator(q); 643 elv_drain_elevator(q);
644 while (q->rq.elvpriv) { 644 while (q->rq.elvpriv) {
645 __blk_run_queue(q, false); 645 __blk_run_queue(q);
646 spin_unlock_irq(q->queue_lock); 646 spin_unlock_irq(q->queue_lock);
647 msleep(10); 647 msleep(10);
648 spin_lock_irq(q->queue_lock); 648 spin_lock_irq(q->queue_lock);
@@ -671,7 +671,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
671 q->boundary_rq = rq; 671 q->boundary_rq = rq;
672 } 672 }
673 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 673 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
674 where == ELEVATOR_INSERT_SORT) 674 (where == ELEVATOR_INSERT_SORT ||
675 where == ELEVATOR_INSERT_SORT_MERGE))
675 where = ELEVATOR_INSERT_BACK; 676 where = ELEVATOR_INSERT_BACK;
676 677
677 switch (where) { 678 switch (where) {
@@ -695,7 +696,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
695 * with anything. There's no point in delaying queue 696 * with anything. There's no point in delaying queue
696 * processing. 697 * processing.
697 */ 698 */
698 __blk_run_queue(q, false); 699 __blk_run_queue(q);
699 break; 700 break;
700 701
701 case ELEVATOR_INSERT_SORT_MERGE: 702 case ELEVATOR_INSERT_SORT_MERGE:
diff --git a/block/genhd.c b/block/genhd.c
index b364bd038a18..2dd988723d73 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1588,9 +1588,13 @@ static void disk_events_workfn(struct work_struct *work)
1588 1588
1589 spin_unlock_irq(&ev->lock); 1589 spin_unlock_irq(&ev->lock);
1590 1590
1591 /* tell userland about new events */ 1591 /*
1592 * Tell userland about new events. Only the events listed in
1593 * @disk->events are reported. Unlisted events are processed the
1594 * same internally but never get reported to userland.
1595 */
1592 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) 1596 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
1593 if (events & (1 << i)) 1597 if (events & disk->events & (1 << i))
1594 envp[nr_events++] = disk_uevents[i]; 1598 envp[nr_events++] = disk_uevents[i];
1595 1599
1596 if (nr_events) 1600 if (nr_events)