aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 15:52:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 15:52:24 -0500
commitee89f81252179dcbf6cd65bd48299f5e52292d88 (patch)
tree805846cd12821f84cfe619d44c9e3e36e0b0f9e6 /kernel
parent21f3b24da9328415792efc780f50b9f434c12465 (diff)
parentde33127d8d3f1d570aad8c2223cd81b206636bc1 (diff)
Merge branch 'for-3.9/core' of git://git.kernel.dk/linux-block
Pull block IO core bits from Jens Axboe: "Below are the core block IO bits for 3.9. It was delayed a few days since my workstation kept crashing every 2-8h after pulling it into current -git, but turns out it is a bug in the new pstate code (divide by zero, will report separately). In any case, it contains: - The big cfq/blkcg update from Tejun and and Vivek. - Additional block and writeback tracepoints from Tejun. - Improvement of the should sort (based on queues) logic in the plug flushing. - _io() variants of the wait_for_completion() interface, using io_schedule() instead of schedule() to contribute to io wait properly. - Various little fixes. You'll get two trivial merge conflicts, which should be easy enough to fix up" Fix up the trivial conflicts due to hlist traversal cleanups (commit b67bfe0d42ca: "hlist: drop the node parameter from iterators"). * 'for-3.9/core' of git://git.kernel.dk/linux-block: (39 commits) block: remove redundant check to bd_openers() block: use i_size_write() in bd_set_size() cfq: fix lock imbalance with failed allocations drivers/block/swim3.c: fix null pointer dereference block: don't select PERCPU_RWSEM block: account iowait time when waiting for completion of IO request sched: add wait_for_completion_io[_timeout] writeback: add more tracepoints block: add block_{touch|dirty}_buffer tracepoint buffer: make touch_buffer() an exported function block: add @req to bio_{front|back}_merge tracepoints block: add missing block_bio_complete() tracepoint block: Remove should_sort judgement when flush blk_plug block,elevator: use new hashtable implementation cfq-iosched: add hierarchical cfq_group statistics cfq-iosched: collect stats from dead cfqgs cfq-iosched: separate out cfqg_stats_reset() from cfq_pd_reset_stats() blkcg: make blkcg_print_blkgs() grab q locks instead of blkcg lock block: RCU free request_queue blkcg: implement blkg_[rw]stat_recursive_sum() and blkg_[rw]stat_merge() ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c57
-rw-r--r--kernel/trace/blktrace.c28
2 files changed, 77 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 12af4270c9c1..7f12624a393c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3258,7 +3258,8 @@ void complete_all(struct completion *x)
3258EXPORT_SYMBOL(complete_all); 3258EXPORT_SYMBOL(complete_all);
3259 3259
3260static inline long __sched 3260static inline long __sched
3261do_wait_for_common(struct completion *x, long timeout, int state) 3261do_wait_for_common(struct completion *x,
3262 long (*action)(long), long timeout, int state)
3262{ 3263{
3263 if (!x->done) { 3264 if (!x->done) {
3264 DECLARE_WAITQUEUE(wait, current); 3265 DECLARE_WAITQUEUE(wait, current);
@@ -3271,7 +3272,7 @@ do_wait_for_common(struct completion *x, long timeout, int state)
3271 } 3272 }
3272 __set_current_state(state); 3273 __set_current_state(state);
3273 spin_unlock_irq(&x->wait.lock); 3274 spin_unlock_irq(&x->wait.lock);
3274 timeout = schedule_timeout(timeout); 3275 timeout = action(timeout);
3275 spin_lock_irq(&x->wait.lock); 3276 spin_lock_irq(&x->wait.lock);
3276 } while (!x->done && timeout); 3277 } while (!x->done && timeout);
3277 __remove_wait_queue(&x->wait, &wait); 3278 __remove_wait_queue(&x->wait, &wait);
@@ -3282,17 +3283,30 @@ do_wait_for_common(struct completion *x, long timeout, int state)
3282 return timeout ?: 1; 3283 return timeout ?: 1;
3283} 3284}
3284 3285
3285static long __sched 3286static inline long __sched
3286wait_for_common(struct completion *x, long timeout, int state) 3287__wait_for_common(struct completion *x,
3288 long (*action)(long), long timeout, int state)
3287{ 3289{
3288 might_sleep(); 3290 might_sleep();
3289 3291
3290 spin_lock_irq(&x->wait.lock); 3292 spin_lock_irq(&x->wait.lock);
3291 timeout = do_wait_for_common(x, timeout, state); 3293 timeout = do_wait_for_common(x, action, timeout, state);
3292 spin_unlock_irq(&x->wait.lock); 3294 spin_unlock_irq(&x->wait.lock);
3293 return timeout; 3295 return timeout;
3294} 3296}
3295 3297
3298static long __sched
3299wait_for_common(struct completion *x, long timeout, int state)
3300{
3301 return __wait_for_common(x, schedule_timeout, timeout, state);
3302}
3303
3304static long __sched
3305wait_for_common_io(struct completion *x, long timeout, int state)
3306{
3307 return __wait_for_common(x, io_schedule_timeout, timeout, state);
3308}
3309
3296/** 3310/**
3297 * wait_for_completion: - waits for completion of a task 3311 * wait_for_completion: - waits for completion of a task
3298 * @x: holds the state of this particular completion 3312 * @x: holds the state of this particular completion
@@ -3329,6 +3343,39 @@ wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3329EXPORT_SYMBOL(wait_for_completion_timeout); 3343EXPORT_SYMBOL(wait_for_completion_timeout);
3330 3344
3331/** 3345/**
3346 * wait_for_completion_io: - waits for completion of a task
3347 * @x: holds the state of this particular completion
3348 *
3349 * This waits to be signaled for completion of a specific task. It is NOT
3350 * interruptible and there is no timeout. The caller is accounted as waiting
3351 * for IO.
3352 */
3353void __sched wait_for_completion_io(struct completion *x)
3354{
3355 wait_for_common_io(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
3356}
3357EXPORT_SYMBOL(wait_for_completion_io);
3358
3359/**
3360 * wait_for_completion_io_timeout: - waits for completion of a task (w/timeout)
3361 * @x: holds the state of this particular completion
3362 * @timeout: timeout value in jiffies
3363 *
3364 * This waits for either a completion of a specific task to be signaled or for a
3365 * specified timeout to expire. The timeout is in jiffies. It is not
3366 * interruptible. The caller is accounted as waiting for IO.
3367 *
3368 * The return value is 0 if timed out, and positive (at least 1, or number of
3369 * jiffies left till timeout) if completed.
3370 */
3371unsigned long __sched
3372wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
3373{
3374 return wait_for_common_io(x, timeout, TASK_UNINTERRUPTIBLE);
3375}
3376EXPORT_SYMBOL(wait_for_completion_io_timeout);
3377
3378/**
3332 * wait_for_completion_interruptible: - waits for completion of a task (w/intr) 3379 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3333 * @x: holds the state of this particular completion 3380 * @x: holds the state of this particular completion
3334 * 3381 *
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 71259e2b6b61..9e5b8c272eec 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -739,6 +739,12 @@ static void blk_add_trace_rq_complete(void *ignore,
739 struct request_queue *q, 739 struct request_queue *q,
740 struct request *rq) 740 struct request *rq)
741{ 741{
742 struct blk_trace *bt = q->blk_trace;
743
744 /* if control ever passes through here, it's a request based driver */
745 if (unlikely(bt && !bt->rq_based))
746 bt->rq_based = true;
747
742 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); 748 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
743} 749}
744 750
@@ -774,15 +780,30 @@ static void blk_add_trace_bio_bounce(void *ignore,
774 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); 780 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
775} 781}
776 782
777static void blk_add_trace_bio_complete(void *ignore, 783static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error)
778 struct request_queue *q, struct bio *bio,
779 int error)
780{ 784{
785 struct request_queue *q;
786 struct blk_trace *bt;
787
788 if (!bio->bi_bdev)
789 return;
790
791 q = bdev_get_queue(bio->bi_bdev);
792 bt = q->blk_trace;
793
794 /*
795 * Request based drivers will generate both rq and bio completions.
796 * Ignore bio ones.
797 */
798 if (likely(!bt) || bt->rq_based)
799 return;
800
781 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); 801 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
782} 802}
783 803
784static void blk_add_trace_bio_backmerge(void *ignore, 804static void blk_add_trace_bio_backmerge(void *ignore,
785 struct request_queue *q, 805 struct request_queue *q,
806 struct request *rq,
786 struct bio *bio) 807 struct bio *bio)
787{ 808{
788 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); 809 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
@@ -790,6 +811,7 @@ static void blk_add_trace_bio_backmerge(void *ignore,
790 811
791static void blk_add_trace_bio_frontmerge(void *ignore, 812static void blk_add_trace_bio_frontmerge(void *ignore,
792 struct request_queue *q, 813 struct request_queue *q,
814 struct request *rq,
793 struct bio *bio) 815 struct bio *bio)
794{ 816{
795 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); 817 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);