aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c7
-rw-r--r--block/blk-cgroup.h3
-rw-r--r--block/blk-core.c17
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/blk-throttle.c9
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c31
-rw-r--r--block/elevator.c3
-rw-r--r--block/genhd.c8
9 files changed, 43 insertions, 44 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index f0605ab2a761..471fdcc5df85 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
114} 114}
115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); 115EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
116 116
117struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
118{
119 return container_of(task_subsys_state(tsk, blkio_subsys_id),
120 struct blkio_cgroup, css);
121}
122EXPORT_SYMBOL_GPL(task_blkio_cgroup);
123
117static inline void 124static inline void
118blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight) 125blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
119{ 126{
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 10919fae2d3a..c774930cc206 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
291#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) 291#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
292extern struct blkio_cgroup blkio_root_cgroup; 292extern struct blkio_cgroup blkio_root_cgroup;
293extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 293extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
294extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
294extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 295extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
295 struct blkio_group *blkg, void *key, dev_t dev, 296 struct blkio_group *blkg, void *key, dev_t dev,
296 enum blkio_policy_id plid); 297 enum blkio_policy_id plid);
@@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
314struct cgroup; 315struct cgroup;
315static inline struct blkio_cgroup * 316static inline struct blkio_cgroup *
316cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } 317cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
318static inline struct blkio_cgroup *
319task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
317 320
318static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 321static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
319 struct blkio_group *blkg, void *key, dev_t dev, 322 struct blkio_group *blkg, void *key, dev_t dev,
diff --git a/block/blk-core.c b/block/blk-core.c
index 5fa3dd2705c6..3fe00a14822a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -292,7 +292,6 @@ EXPORT_SYMBOL(blk_sync_queue);
292/** 292/**
293 * __blk_run_queue - run a single device queue 293 * __blk_run_queue - run a single device queue
294 * @q: The queue to run 294 * @q: The queue to run
295 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
296 * 295 *
297 * Description: 296 * Description:
298 * See @blk_run_queue. This variant must be called with the queue lock 297 * See @blk_run_queue. This variant must be called with the queue lock
@@ -303,15 +302,7 @@ void __blk_run_queue(struct request_queue *q)
303 if (unlikely(blk_queue_stopped(q))) 302 if (unlikely(blk_queue_stopped(q)))
304 return; 303 return;
305 304
306 /* 305 q->request_fn(q);
307 * Only recurse once to avoid overrunning the stack, let the unplug
308 * handling reinvoke the handler shortly if we already got there.
309 */
310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
311 q->request_fn(q);
312 queue_flag_clear(QUEUE_FLAG_REENTER, q);
313 } else
314 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
315} 306}
316EXPORT_SYMBOL(__blk_run_queue); 307EXPORT_SYMBOL(__blk_run_queue);
317 308
@@ -325,9 +316,12 @@ EXPORT_SYMBOL(__blk_run_queue);
325 */ 316 */
326void blk_run_queue_async(struct request_queue *q) 317void blk_run_queue_async(struct request_queue *q)
327{ 318{
328 if (likely(!blk_queue_stopped(q))) 319 if (likely(!blk_queue_stopped(q))) {
320 __cancel_delayed_work(&q->delay_work);
329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 321 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
322 }
330} 323}
324EXPORT_SYMBOL(blk_run_queue_async);
331 325
332/** 326/**
333 * blk_run_queue - run a single device queue 327 * blk_run_queue - run a single device queue
@@ -2787,7 +2781,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2787 2781
2788 local_irq_restore(flags); 2782 local_irq_restore(flags);
2789} 2783}
2790EXPORT_SYMBOL(blk_flush_plug_list);
2791 2784
2792void blk_finish_plug(struct blk_plug *plug) 2785void blk_finish_plug(struct blk_plug *plug)
2793{ 2786{
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 53bd0c77bfda..d935bd859c87 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
66 66
67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
68 blk_set_queue_full(q, BLK_RW_SYNC); 68 blk_set_queue_full(q, BLK_RW_SYNC);
69 } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { 69 } else {
70 blk_clear_queue_full(q, BLK_RW_SYNC); 70 blk_clear_queue_full(q, BLK_RW_SYNC);
71 wake_up(&rl->wait[BLK_RW_SYNC]); 71 wake_up(&rl->wait[BLK_RW_SYNC]);
72 } 72 }
73 73
74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
75 blk_set_queue_full(q, BLK_RW_ASYNC); 75 blk_set_queue_full(q, BLK_RW_ASYNC);
76 } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { 76 } else {
77 blk_clear_queue_full(q, BLK_RW_ASYNC); 77 blk_clear_queue_full(q, BLK_RW_ASYNC);
78 wake_up(&rl->wait[BLK_RW_ASYNC]); 78 wake_up(&rl->wait[BLK_RW_ASYNC]);
79 } 79 }
@@ -509,8 +509,10 @@ int blk_register_queue(struct gendisk *disk)
509 return ret; 509 return ret;
510 510
511 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 511 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
512 if (ret < 0) 512 if (ret < 0) {
513 blk_trace_remove_sysfs(dev);
513 return ret; 514 return ret;
515 }
514 516
515 kobject_uevent(&q->kobj, KOBJ_ADD); 517 kobject_uevent(&q->kobj, KOBJ_ADD);
516 518
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0475a22a420d..252a81a306f7 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg)
160} 160}
161 161
162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, 162static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
163 struct cgroup *cgroup) 163 struct blkio_cgroup *blkcg)
164{ 164{
165 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
166 struct throtl_grp *tg = NULL; 165 struct throtl_grp *tg = NULL;
167 void *key = td; 166 void *key = td;
168 struct backing_dev_info *bdi = &td->queue->backing_dev_info; 167 struct backing_dev_info *bdi = &td->queue->backing_dev_info;
@@ -229,12 +228,12 @@ done:
229 228
230static struct throtl_grp * throtl_get_tg(struct throtl_data *td) 229static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
231{ 230{
232 struct cgroup *cgroup;
233 struct throtl_grp *tg = NULL; 231 struct throtl_grp *tg = NULL;
232 struct blkio_cgroup *blkcg;
234 233
235 rcu_read_lock(); 234 rcu_read_lock();
236 cgroup = task_cgroup(current, blkio_subsys_id); 235 blkcg = task_blkio_cgroup(current);
237 tg = throtl_find_alloc_tg(td, cgroup); 236 tg = throtl_find_alloc_tg(td, blkcg);
238 if (!tg) 237 if (!tg)
239 tg = &td->root_tg; 238 tg = &td->root_tg;
240 rcu_read_unlock(); 239 rcu_read_unlock();
diff --git a/block/blk.h b/block/blk.h
index 83e4bff36201..1f798b5a6f19 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data);
22void blk_delete_timer(struct request *); 22void blk_delete_timer(struct request *);
23void blk_add_timer(struct request *); 23void blk_add_timer(struct request *);
24void __generic_unplug_device(struct request_queue *); 24void __generic_unplug_device(struct request_queue *);
25void blk_run_queue_async(struct request_queue *q);
26 25
27/* 26/*
28 * Internal atomic flags for request handling 27 * Internal atomic flags for request handling
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 46b0a1d1d925..ab7a9e6a9b1c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
1014 cfqg->needs_update = true; 1014 cfqg->needs_update = true;
1015} 1015}
1016 1016
1017static struct cfq_group * 1017static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
1018cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) 1018 struct blkio_cgroup *blkcg, int create)
1019{ 1019{
1020 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1021 struct cfq_group *cfqg = NULL; 1020 struct cfq_group *cfqg = NULL;
1022 void *key = cfqd; 1021 void *key = cfqd;
1023 int i, j; 1022 int i, j;
@@ -1079,12 +1078,12 @@ done:
1079 */ 1078 */
1080static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) 1079static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1081{ 1080{
1082 struct cgroup *cgroup; 1081 struct blkio_cgroup *blkcg;
1083 struct cfq_group *cfqg = NULL; 1082 struct cfq_group *cfqg = NULL;
1084 1083
1085 rcu_read_lock(); 1084 rcu_read_lock();
1086 cgroup = task_cgroup(current, blkio_subsys_id); 1085 blkcg = task_blkio_cgroup(current);
1087 cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); 1086 cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create);
1088 if (!cfqg && create) 1087 if (!cfqg && create)
1089 cfqg = &cfqd->root_group; 1088 cfqg = &cfqd->root_group;
1090 rcu_read_unlock(); 1089 rcu_read_unlock();
@@ -2582,28 +2581,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2582} 2581}
2583 2582
2584/* 2583/*
2585 * Must always be called with the rcu_read_lock() held 2584 * Call func for each cic attached to this ioc.
2586 */ 2585 */
2587static void 2586static void
2588__call_for_each_cic(struct io_context *ioc, 2587call_for_each_cic(struct io_context *ioc,
2589 void (*func)(struct io_context *, struct cfq_io_context *)) 2588 void (*func)(struct io_context *, struct cfq_io_context *))
2590{ 2589{
2591 struct cfq_io_context *cic; 2590 struct cfq_io_context *cic;
2592 struct hlist_node *n; 2591 struct hlist_node *n;
2593 2592
2593 rcu_read_lock();
2594
2594 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 2595 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2595 func(ioc, cic); 2596 func(ioc, cic);
2596}
2597 2597
2598/*
2599 * Call func for each cic attached to this ioc.
2600 */
2601static void
2602call_for_each_cic(struct io_context *ioc,
2603 void (*func)(struct io_context *, struct cfq_io_context *))
2604{
2605 rcu_read_lock();
2606 __call_for_each_cic(ioc, func);
2607 rcu_read_unlock(); 2598 rcu_read_unlock();
2608} 2599}
2609 2600
@@ -2664,7 +2655,7 @@ static void cfq_free_io_context(struct io_context *ioc)
2664 * should be ok to iterate over the known list, we will see all cic's 2655 * should be ok to iterate over the known list, we will see all cic's
2665 * since no new ones are added. 2656 * since no new ones are added.
2666 */ 2657 */
2667 __call_for_each_cic(ioc, cic_free_func); 2658 call_for_each_cic(ioc, cic_free_func);
2668} 2659}
2669 2660
2670static void cfq_put_cooperator(struct cfq_queue *cfqq) 2661static void cfq_put_cooperator(struct cfq_queue *cfqq)
diff --git a/block/elevator.c b/block/elevator.c
index 3cd0d8c84902..2a0b653c90fd 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -666,7 +666,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
666 q->boundary_rq = rq; 666 q->boundary_rq = rq;
667 } 667 }
668 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 668 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
669 where == ELEVATOR_INSERT_SORT) 669 (where == ELEVATOR_INSERT_SORT ||
670 where == ELEVATOR_INSERT_SORT_MERGE))
670 where = ELEVATOR_INSERT_BACK; 671 where = ELEVATOR_INSERT_BACK;
671 672
672 switch (where) { 673 switch (where) {
diff --git a/block/genhd.c b/block/genhd.c
index b364bd038a18..2dd988723d73 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1588,9 +1588,13 @@ static void disk_events_workfn(struct work_struct *work)
1588 1588
1589 spin_unlock_irq(&ev->lock); 1589 spin_unlock_irq(&ev->lock);
1590 1590
1591 /* tell userland about new events */ 1591 /*
1592 * Tell userland about new events. Only the events listed in
1593 * @disk->events are reported. Unlisted events are processed the
1594 * same internally but never get reported to userland.
1595 */
1592 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) 1596 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
1593 if (events & (1 << i)) 1597 if (events & disk->events & (1 << i))
1594 envp[nr_events++] = disk_uevents[i]; 1598 envp[nr_events++] = disk_uevents[i];
1595 1599
1596 if (nr_events) 1600 if (nr_events)