diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-13 21:07:19 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-04-13 21:07:19 -0400 |
commit | d8dd0b6d4836bce81cece60509ef3b157a420776 (patch) | |
tree | 7a28f327a15443d6c9d091f3d272abd107251ab7 /block | |
parent | 2d59dcfb54ade45cacc59a6e7bd96b8c19088c3d (diff) | |
parent | 1b2e19f17ed327af6add02978efdf354e4f8e4df (diff) |
Merge branch 'for-3.4/core' of git://git.kernel.dk/linux-block
Pull block core bits from Jens Axboe:
"It's a nice and quiet round this time, since most of the tricky stuff
has been pushed to 3.5 to give it more time to mature. After a few
hectic block IO core changes for 3.3 and 3.2, I'm quite happy with a
slow round.
Really minor stuff in here, the only real functional change is making
the auto-unplug threshold a per-queue entity. The threshold is set so
that it's low enough that we don't hold off IO for too long, but still
big enough to get a nice benefit from the batched insert (and hence
queue lock cost reduction). For raid configurations, this currently
breaks down."
* 'for-3.4/core' of git://git.kernel.dk/linux-block:
block: make auto block plug flush threshold per-disk based
Documentation: Add sysfs ABI change for cfq's target latency.
block: Make cfq_target_latency tunable through sysfs.
block: use lockdep_assert_held for queue locking
block: blk_alloc_queue_node(): use caller's GFP flags instead of GFP_KERNEL
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 5 | ||||
-rw-r--r-- | block/blk-throttle.c | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 10 |
3 files changed, 12 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 3a78b00edd71..1f61b74867e4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -483,7 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
483 | if (!q) | 483 | if (!q) |
484 | return NULL; | 484 | return NULL; |
485 | 485 | ||
486 | q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); | 486 | q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); |
487 | if (q->id < 0) | 487 | if (q->id < 0) |
488 | goto fail_q; | 488 | goto fail_q; |
489 | 489 | ||
@@ -1277,7 +1277,8 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, | |||
1277 | list_for_each_entry_reverse(rq, &plug->list, queuelist) { | 1277 | list_for_each_entry_reverse(rq, &plug->list, queuelist) { |
1278 | int el_ret; | 1278 | int el_ret; |
1279 | 1279 | ||
1280 | (*request_count)++; | 1280 | if (rq->q == q) |
1281 | (*request_count)++; | ||
1281 | 1282 | ||
1282 | if (rq->q != q || !blk_rq_merge_ok(rq, bio)) | 1283 | if (rq->q != q || !blk_rq_merge_ok(rq, bio)) |
1283 | continue; | 1284 | continue; |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 5eed6a76721d..f2ddb94626bd 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -1218,7 +1218,7 @@ void blk_throtl_drain(struct request_queue *q) | |||
1218 | struct bio_list bl; | 1218 | struct bio_list bl; |
1219 | struct bio *bio; | 1219 | struct bio *bio; |
1220 | 1220 | ||
1221 | WARN_ON_ONCE(!queue_is_locked(q)); | 1221 | queue_lockdep_assert_held(q); |
1222 | 1222 | ||
1223 | bio_list_init(&bl); | 1223 | bio_list_init(&bl); |
1224 | 1224 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 457295253566..3c38536bd52c 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -295,6 +295,7 @@ struct cfq_data { | |||
295 | unsigned int cfq_slice_idle; | 295 | unsigned int cfq_slice_idle; |
296 | unsigned int cfq_group_idle; | 296 | unsigned int cfq_group_idle; |
297 | unsigned int cfq_latency; | 297 | unsigned int cfq_latency; |
298 | unsigned int cfq_target_latency; | ||
298 | 299 | ||
299 | /* | 300 | /* |
300 | * Fallback dummy cfqq for extreme OOM conditions | 301 | * Fallback dummy cfqq for extreme OOM conditions |
@@ -604,7 +605,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
604 | { | 605 | { |
605 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 606 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
606 | 607 | ||
607 | return cfq_target_latency * cfqg->weight / st->total_weight; | 608 | return cfqd->cfq_target_latency * cfqg->weight / st->total_weight; |
608 | } | 609 | } |
609 | 610 | ||
610 | static inline unsigned | 611 | static inline unsigned |
@@ -2271,7 +2272,8 @@ new_workload: | |||
2271 | * to have higher weight. A more accurate thing would be to | 2272 | * to have higher weight. A more accurate thing would be to |
2272 | * calculate system wide asnc/sync ratio. | 2273 | * calculate system wide asnc/sync ratio. |
2273 | */ | 2274 | */ |
2274 | tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); | 2275 | tmp = cfqd->cfq_target_latency * |
2276 | cfqg_busy_async_queues(cfqd, cfqg); | ||
2275 | tmp = tmp/cfqd->busy_queues; | 2277 | tmp = tmp/cfqd->busy_queues; |
2276 | slice = min_t(unsigned, slice, tmp); | 2278 | slice = min_t(unsigned, slice, tmp); |
2277 | 2279 | ||
@@ -3737,6 +3739,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3737 | cfqd->cfq_back_penalty = cfq_back_penalty; | 3739 | cfqd->cfq_back_penalty = cfq_back_penalty; |
3738 | cfqd->cfq_slice[0] = cfq_slice_async; | 3740 | cfqd->cfq_slice[0] = cfq_slice_async; |
3739 | cfqd->cfq_slice[1] = cfq_slice_sync; | 3741 | cfqd->cfq_slice[1] = cfq_slice_sync; |
3742 | cfqd->cfq_target_latency = cfq_target_latency; | ||
3740 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 3743 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
3741 | cfqd->cfq_slice_idle = cfq_slice_idle; | 3744 | cfqd->cfq_slice_idle = cfq_slice_idle; |
3742 | cfqd->cfq_group_idle = cfq_group_idle; | 3745 | cfqd->cfq_group_idle = cfq_group_idle; |
@@ -3788,6 +3791,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | |||
3788 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 3791 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
3789 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 3792 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
3790 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); | 3793 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); |
3794 | SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); | ||
3791 | #undef SHOW_FUNCTION | 3795 | #undef SHOW_FUNCTION |
3792 | 3796 | ||
3793 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 3797 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
@@ -3821,6 +3825,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | |||
3821 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 3825 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
3822 | UINT_MAX, 0); | 3826 | UINT_MAX, 0); |
3823 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); | 3827 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); |
3828 | STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); | ||
3824 | #undef STORE_FUNCTION | 3829 | #undef STORE_FUNCTION |
3825 | 3830 | ||
3826 | #define CFQ_ATTR(name) \ | 3831 | #define CFQ_ATTR(name) \ |
@@ -3838,6 +3843,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
3838 | CFQ_ATTR(slice_idle), | 3843 | CFQ_ATTR(slice_idle), |
3839 | CFQ_ATTR(group_idle), | 3844 | CFQ_ATTR(group_idle), |
3840 | CFQ_ATTR(low_latency), | 3845 | CFQ_ATTR(low_latency), |
3846 | CFQ_ATTR(target_latency), | ||
3841 | __ATTR_NULL | 3847 | __ATTR_NULL |
3842 | }; | 3848 | }; |
3843 | 3849 | ||