diff options
author | Jens Axboe <axboe@kernel.dk> | 2012-05-01 08:29:55 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-05-01 08:29:55 -0400 |
commit | 0b7877d4eea3f93e3dd941999522bbd8c538cb53 (patch) | |
tree | ade6d4e411b9b9b569c802e3b2179826162c934c /block | |
parent | bd1a68b59c8e3bce45fb76632c64e1e063c3962d (diff) | |
parent | 69964ea4c7b68c9399f7977aa5b9aa6539a6a98a (diff) |
Merge tag 'v3.4-rc5' into for-3.5/core
The core branch is behind driver commits that we want to build
on for 3.5, hence I'm pulling in a later -rc.
Linux 3.4-rc5
Conflicts:
Documentation/feature-removal-schedule.txt
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 5 | ||||
-rw-r--r-- | block/blk-throttle.c | 2 | ||||
-rw-r--r-- | block/cfq-iosched.c | 10 |
3 files changed, 12 insertions, 5 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 6cf13df43c80..3c923a7aeb56 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -542,7 +542,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
542 | if (!q) | 542 | if (!q) |
543 | return NULL; | 543 | return NULL; |
544 | 544 | ||
545 | q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); | 545 | q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); |
546 | if (q->id < 0) | 546 | if (q->id < 0) |
547 | goto fail_q; | 547 | goto fail_q; |
548 | 548 | ||
@@ -1372,7 +1372,8 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, | |||
1372 | list_for_each_entry_reverse(rq, &plug->list, queuelist) { | 1372 | list_for_each_entry_reverse(rq, &plug->list, queuelist) { |
1373 | int el_ret; | 1373 | int el_ret; |
1374 | 1374 | ||
1375 | (*request_count)++; | 1375 | if (rq->q == q) |
1376 | (*request_count)++; | ||
1376 | 1377 | ||
1377 | if (rq->q != q || !blk_rq_merge_ok(rq, bio)) | 1378 | if (rq->q != q || !blk_rq_merge_ok(rq, bio)) |
1378 | continue; | 1379 | continue; |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 46310ec93d1c..14dedecfc7e8 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -1218,7 +1218,7 @@ void blk_throtl_drain(struct request_queue *q) | |||
1218 | struct bio_list bl; | 1218 | struct bio_list bl; |
1219 | struct bio *bio; | 1219 | struct bio *bio; |
1220 | 1220 | ||
1221 | WARN_ON_ONCE(!queue_is_locked(q)); | 1221 | queue_lockdep_assert_held(q); |
1222 | 1222 | ||
1223 | bio_list_init(&bl); | 1223 | bio_list_init(&bl); |
1224 | 1224 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 832b2ac8cb8d..673c977cc2bf 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -343,6 +343,7 @@ struct cfq_data { | |||
343 | unsigned int cfq_slice_idle; | 343 | unsigned int cfq_slice_idle; |
344 | unsigned int cfq_group_idle; | 344 | unsigned int cfq_group_idle; |
345 | unsigned int cfq_latency; | 345 | unsigned int cfq_latency; |
346 | unsigned int cfq_target_latency; | ||
346 | 347 | ||
347 | /* | 348 | /* |
348 | * Fallback dummy cfqq for extreme OOM conditions | 349 | * Fallback dummy cfqq for extreme OOM conditions |
@@ -910,7 +911,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
910 | { | 911 | { |
911 | struct cfq_rb_root *st = &cfqd->grp_service_tree; | 912 | struct cfq_rb_root *st = &cfqd->grp_service_tree; |
912 | 913 | ||
913 | return cfq_target_latency * cfqg->weight / st->total_weight; | 914 | return cfqd->cfq_target_latency * cfqg->weight / st->total_weight; |
914 | } | 915 | } |
915 | 916 | ||
916 | static inline unsigned | 917 | static inline unsigned |
@@ -2579,7 +2580,8 @@ new_workload: | |||
2579 | * to have higher weight. A more accurate thing would be to | 2580 | * to have higher weight. A more accurate thing would be to |
2580 | * calculate system wide asnc/sync ratio. | 2581 | * calculate system wide asnc/sync ratio. |
2581 | */ | 2582 | */ |
2582 | tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); | 2583 | tmp = cfqd->cfq_target_latency * |
2584 | cfqg_busy_async_queues(cfqd, cfqg); | ||
2583 | tmp = tmp/cfqd->busy_queues; | 2585 | tmp = tmp/cfqd->busy_queues; |
2584 | slice = min_t(unsigned, slice, tmp); | 2586 | slice = min_t(unsigned, slice, tmp); |
2585 | 2587 | ||
@@ -4026,6 +4028,7 @@ static int cfq_init_queue(struct request_queue *q) | |||
4026 | cfqd->cfq_back_penalty = cfq_back_penalty; | 4028 | cfqd->cfq_back_penalty = cfq_back_penalty; |
4027 | cfqd->cfq_slice[0] = cfq_slice_async; | 4029 | cfqd->cfq_slice[0] = cfq_slice_async; |
4028 | cfqd->cfq_slice[1] = cfq_slice_sync; | 4030 | cfqd->cfq_slice[1] = cfq_slice_sync; |
4031 | cfqd->cfq_target_latency = cfq_target_latency; | ||
4029 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 4032 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
4030 | cfqd->cfq_slice_idle = cfq_slice_idle; | 4033 | cfqd->cfq_slice_idle = cfq_slice_idle; |
4031 | cfqd->cfq_group_idle = cfq_group_idle; | 4034 | cfqd->cfq_group_idle = cfq_group_idle; |
@@ -4081,6 +4084,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | |||
4081 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 4084 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
4082 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 4085 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
4083 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); | 4086 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); |
4087 | SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); | ||
4084 | #undef SHOW_FUNCTION | 4088 | #undef SHOW_FUNCTION |
4085 | 4089 | ||
4086 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 4090 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
@@ -4114,6 +4118,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | |||
4114 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 4118 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
4115 | UINT_MAX, 0); | 4119 | UINT_MAX, 0); |
4116 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); | 4120 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); |
4121 | STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); | ||
4117 | #undef STORE_FUNCTION | 4122 | #undef STORE_FUNCTION |
4118 | 4123 | ||
4119 | #define CFQ_ATTR(name) \ | 4124 | #define CFQ_ATTR(name) \ |
@@ -4131,6 +4136,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
4131 | CFQ_ATTR(slice_idle), | 4136 | CFQ_ATTR(slice_idle), |
4132 | CFQ_ATTR(group_idle), | 4137 | CFQ_ATTR(group_idle), |
4133 | CFQ_ATTR(low_latency), | 4138 | CFQ_ATTR(low_latency), |
4139 | CFQ_ATTR(target_latency), | ||
4134 | __ATTR_NULL | 4140 | __ATTR_NULL |
4135 | }; | 4141 | }; |
4136 | 4142 | ||