aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-05-12 03:36:18 -0400
committerIngo Molnar <mingo@elte.hu>2011-05-12 03:36:18 -0400
commit9cb5baba5e3acba0994ad899ee908799104c9965 (patch)
treed5ff16000256a0bf56279926e6114b4603ede2b4 /block
parent7142d17e8f935fa842e9f6eece2281b6d41625d6 (diff)
parent693d92a1bbc9e42681c42ed190bd42b636ca876f (diff)
Merge commit 'v2.6.39-rc7' into sched/core
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-sysfs.c8
-rw-r--r--block/blk.h1
-rw-r--r--block/cfq-iosched.c20
-rw-r--r--block/elevator.c3
-rw-r--r--block/genhd.c8
6 files changed, 21 insertions, 32 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 5fa3dd2705c6..a2e58eeb3549 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -292,7 +292,6 @@ EXPORT_SYMBOL(blk_sync_queue);
292/** 292/**
293 * __blk_run_queue - run a single device queue 293 * __blk_run_queue - run a single device queue
294 * @q: The queue to run 294 * @q: The queue to run
295 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
296 * 295 *
297 * Description: 296 * Description:
298 * See @blk_run_queue. This variant must be called with the queue lock 297 * See @blk_run_queue. This variant must be called with the queue lock
@@ -303,15 +302,7 @@ void __blk_run_queue(struct request_queue *q)
303 if (unlikely(blk_queue_stopped(q))) 302 if (unlikely(blk_queue_stopped(q)))
304 return; 303 return;
305 304
306 /* 305 q->request_fn(q);
307 * Only recurse once to avoid overrunning the stack, let the unplug
308 * handling reinvoke the handler shortly if we already got there.
309 */
310 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
311 q->request_fn(q);
312 queue_flag_clear(QUEUE_FLAG_REENTER, q);
313 } else
314 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
315} 306}
316EXPORT_SYMBOL(__blk_run_queue); 307EXPORT_SYMBOL(__blk_run_queue);
317 308
@@ -328,6 +319,7 @@ void blk_run_queue_async(struct request_queue *q)
328 if (likely(!blk_queue_stopped(q))) 319 if (likely(!blk_queue_stopped(q)))
329 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0); 320 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
330} 321}
322EXPORT_SYMBOL(blk_run_queue_async);
331 323
332/** 324/**
333 * blk_run_queue - run a single device queue 325 * blk_run_queue - run a single device queue
@@ -2787,7 +2779,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2787 2779
2788 local_irq_restore(flags); 2780 local_irq_restore(flags);
2789} 2781}
2790EXPORT_SYMBOL(blk_flush_plug_list);
2791 2782
2792void blk_finish_plug(struct blk_plug *plug) 2783void blk_finish_plug(struct blk_plug *plug)
2793{ 2784{
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 6d735122bc59..bd236313f35d 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -66,14 +66,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
66 66
67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 67 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
68 blk_set_queue_full(q, BLK_RW_SYNC); 68 blk_set_queue_full(q, BLK_RW_SYNC);
69 } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { 69 } else {
70 blk_clear_queue_full(q, BLK_RW_SYNC); 70 blk_clear_queue_full(q, BLK_RW_SYNC);
71 wake_up(&rl->wait[BLK_RW_SYNC]); 71 wake_up(&rl->wait[BLK_RW_SYNC]);
72 } 72 }
73 73
74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 74 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
75 blk_set_queue_full(q, BLK_RW_ASYNC); 75 blk_set_queue_full(q, BLK_RW_ASYNC);
76 } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { 76 } else {
77 blk_clear_queue_full(q, BLK_RW_ASYNC); 77 blk_clear_queue_full(q, BLK_RW_ASYNC);
78 wake_up(&rl->wait[BLK_RW_ASYNC]); 78 wake_up(&rl->wait[BLK_RW_ASYNC]);
79 } 79 }
@@ -508,8 +508,10 @@ int blk_register_queue(struct gendisk *disk)
508 return ret; 508 return ret;
509 509
510 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 510 ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue");
511 if (ret < 0) 511 if (ret < 0) {
512 blk_trace_remove_sysfs(dev);
512 return ret; 513 return ret;
514 }
513 515
514 kobject_uevent(&q->kobj, KOBJ_ADD); 516 kobject_uevent(&q->kobj, KOBJ_ADD);
515 517
diff --git a/block/blk.h b/block/blk.h
index c9df8fc3c999..61263463e38e 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -22,7 +22,6 @@ void blk_rq_timed_out_timer(unsigned long data);
22void blk_delete_timer(struct request *); 22void blk_delete_timer(struct request *);
23void blk_add_timer(struct request *); 23void blk_add_timer(struct request *);
24void __generic_unplug_device(struct request_queue *); 24void __generic_unplug_device(struct request_queue *);
25void blk_run_queue_async(struct request_queue *q);
26 25
27/* 26/*
28 * Internal atomic flags for request handling 27 * Internal atomic flags for request handling
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 46b0a1d1d925..5b52011e3a40 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2582,28 +2582,20 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
2582} 2582}
2583 2583
2584/* 2584/*
2585 * Must always be called with the rcu_read_lock() held 2585 * Call func for each cic attached to this ioc.
2586 */ 2586 */
2587static void 2587static void
2588__call_for_each_cic(struct io_context *ioc, 2588call_for_each_cic(struct io_context *ioc,
2589 void (*func)(struct io_context *, struct cfq_io_context *)) 2589 void (*func)(struct io_context *, struct cfq_io_context *))
2590{ 2590{
2591 struct cfq_io_context *cic; 2591 struct cfq_io_context *cic;
2592 struct hlist_node *n; 2592 struct hlist_node *n;
2593 2593
2594 rcu_read_lock();
2595
2594 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 2596 hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2595 func(ioc, cic); 2597 func(ioc, cic);
2596}
2597 2598
2598/*
2599 * Call func for each cic attached to this ioc.
2600 */
2601static void
2602call_for_each_cic(struct io_context *ioc,
2603 void (*func)(struct io_context *, struct cfq_io_context *))
2604{
2605 rcu_read_lock();
2606 __call_for_each_cic(ioc, func);
2607 rcu_read_unlock(); 2599 rcu_read_unlock();
2608} 2600}
2609 2601
@@ -2664,7 +2656,7 @@ static void cfq_free_io_context(struct io_context *ioc)
2664 * should be ok to iterate over the known list, we will see all cic's 2656 * should be ok to iterate over the known list, we will see all cic's
2665 * since no new ones are added. 2657 * since no new ones are added.
2666 */ 2658 */
2667 __call_for_each_cic(ioc, cic_free_func); 2659 call_for_each_cic(ioc, cic_free_func);
2668} 2660}
2669 2661
2670static void cfq_put_cooperator(struct cfq_queue *cfqq) 2662static void cfq_put_cooperator(struct cfq_queue *cfqq)
diff --git a/block/elevator.c b/block/elevator.c
index 6f6abc08bb56..45ca1e34f582 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -671,7 +671,8 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
671 q->boundary_rq = rq; 671 q->boundary_rq = rq;
672 } 672 }
673 } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 673 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
674 where == ELEVATOR_INSERT_SORT) 674 (where == ELEVATOR_INSERT_SORT ||
675 where == ELEVATOR_INSERT_SORT_MERGE))
675 where = ELEVATOR_INSERT_BACK; 676 where = ELEVATOR_INSERT_BACK;
676 677
677 switch (where) { 678 switch (where) {
diff --git a/block/genhd.c b/block/genhd.c
index b364bd038a18..2dd988723d73 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1588,9 +1588,13 @@ static void disk_events_workfn(struct work_struct *work)
1588 1588
1589 spin_unlock_irq(&ev->lock); 1589 spin_unlock_irq(&ev->lock);
1590 1590
1591 /* tell userland about new events */ 1591 /*
1592 * Tell userland about new events. Only the events listed in
1593 * @disk->events are reported. Unlisted events are processed the
1594 * same internally but never get reported to userland.
1595 */
1592 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++) 1596 for (i = 0; i < ARRAY_SIZE(disk_uevents); i++)
1593 if (events & (1 << i)) 1597 if (events & disk->events & (1 << i))
1594 envp[nr_events++] = disk_uevents[i]; 1598 envp[nr_events++] = disk_uevents[i];
1595 1599
1596 if (nr_events) 1600 if (nr_events)