aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-cfq-target-latency8
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/cfq-iosched.c10
-rw-r--r--include/linux/blkdev.h18
5 files changed, 27 insertions, 16 deletions
diff --git a/Documentation/ABI/testing/sysfs-cfq-target-latency b/Documentation/ABI/testing/sysfs-cfq-target-latency
new file mode 100644
index 00000000000..df0f7828c5e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-cfq-target-latency
@@ -0,0 +1,8 @@
1What: /sys/block/<device>/iosched/target_latency
2Date: March 2012
3contact: Tao Ma <boyu.mt@taobao.com>
4Description:
5 The /sys/block/<device>/iosched/target_latency only exists
6 when the user sets cfq to /sys/block/<device>/scheduler.
7 It contains an estimated latency time for the cfq. cfq will
8 use it to calculate the time slice used for every task.
diff --git a/block/blk-core.c b/block/blk-core.c
index 3a78b00edd7..1f61b74867e 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -483,7 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
483 if (!q) 483 if (!q)
484 return NULL; 484 return NULL;
485 485
486 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); 486 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
487 if (q->id < 0) 487 if (q->id < 0)
488 goto fail_q; 488 goto fail_q;
489 489
@@ -1277,7 +1277,8 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1277 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1277 list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1278 int el_ret; 1278 int el_ret;
1279 1279
1280 (*request_count)++; 1280 if (rq->q == q)
1281 (*request_count)++;
1281 1282
1282 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1283 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1283 continue; 1284 continue;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5eed6a76721..f2ddb94626b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1218,7 +1218,7 @@ void blk_throtl_drain(struct request_queue *q)
1218 struct bio_list bl; 1218 struct bio_list bl;
1219 struct bio *bio; 1219 struct bio *bio;
1220 1220
1221 WARN_ON_ONCE(!queue_is_locked(q)); 1221 queue_lockdep_assert_held(q);
1222 1222
1223 bio_list_init(&bl); 1223 bio_list_init(&bl);
1224 1224
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 45729525356..3c38536bd52 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -295,6 +295,7 @@ struct cfq_data {
295 unsigned int cfq_slice_idle; 295 unsigned int cfq_slice_idle;
296 unsigned int cfq_group_idle; 296 unsigned int cfq_group_idle;
297 unsigned int cfq_latency; 297 unsigned int cfq_latency;
298 unsigned int cfq_target_latency;
298 299
299 /* 300 /*
300 * Fallback dummy cfqq for extreme OOM conditions 301 * Fallback dummy cfqq for extreme OOM conditions
@@ -604,7 +605,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
604{ 605{
605 struct cfq_rb_root *st = &cfqd->grp_service_tree; 606 struct cfq_rb_root *st = &cfqd->grp_service_tree;
606 607
607 return cfq_target_latency * cfqg->weight / st->total_weight; 608 return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
608} 609}
609 610
610static inline unsigned 611static inline unsigned
@@ -2271,7 +2272,8 @@ new_workload:
2271 * to have higher weight. A more accurate thing would be to 2272 * to have higher weight. A more accurate thing would be to
2272 * calculate system wide asnc/sync ratio. 2273 * calculate system wide asnc/sync ratio.
2273 */ 2274 */
2274 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); 2275 tmp = cfqd->cfq_target_latency *
2276 cfqg_busy_async_queues(cfqd, cfqg);
2275 tmp = tmp/cfqd->busy_queues; 2277 tmp = tmp/cfqd->busy_queues;
2276 slice = min_t(unsigned, slice, tmp); 2278 slice = min_t(unsigned, slice, tmp);
2277 2279
@@ -3737,6 +3739,7 @@ static void *cfq_init_queue(struct request_queue *q)
3737 cfqd->cfq_back_penalty = cfq_back_penalty; 3739 cfqd->cfq_back_penalty = cfq_back_penalty;
3738 cfqd->cfq_slice[0] = cfq_slice_async; 3740 cfqd->cfq_slice[0] = cfq_slice_async;
3739 cfqd->cfq_slice[1] = cfq_slice_sync; 3741 cfqd->cfq_slice[1] = cfq_slice_sync;
3742 cfqd->cfq_target_latency = cfq_target_latency;
3740 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 3743 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3741 cfqd->cfq_slice_idle = cfq_slice_idle; 3744 cfqd->cfq_slice_idle = cfq_slice_idle;
3742 cfqd->cfq_group_idle = cfq_group_idle; 3745 cfqd->cfq_group_idle = cfq_group_idle;
@@ -3788,6 +3791,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3788SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 3791SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3789SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 3792SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3790SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); 3793SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3794SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
3791#undef SHOW_FUNCTION 3795#undef SHOW_FUNCTION
3792 3796
3793#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 3797#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -3821,6 +3825,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3821STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 3825STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3822 UINT_MAX, 0); 3826 UINT_MAX, 0);
3823STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); 3827STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3828STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
3824#undef STORE_FUNCTION 3829#undef STORE_FUNCTION
3825 3830
3826#define CFQ_ATTR(name) \ 3831#define CFQ_ATTR(name) \
@@ -3838,6 +3843,7 @@ static struct elv_fs_entry cfq_attrs[] = {
3838 CFQ_ATTR(slice_idle), 3843 CFQ_ATTR(slice_idle),
3839 CFQ_ATTR(group_idle), 3844 CFQ_ATTR(group_idle),
3840 CFQ_ATTR(low_latency), 3845 CFQ_ATTR(low_latency),
3846 CFQ_ATTR(target_latency),
3841 __ATTR_NULL 3847 __ATTR_NULL
3842}; 3848};
3843 3849
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 606cf339bb5..2aa24664a5b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -426,14 +426,10 @@ struct request_queue {
426 (1 << QUEUE_FLAG_SAME_COMP) | \ 426 (1 << QUEUE_FLAG_SAME_COMP) | \
427 (1 << QUEUE_FLAG_ADD_RANDOM)) 427 (1 << QUEUE_FLAG_ADD_RANDOM))
428 428
429static inline int queue_is_locked(struct request_queue *q) 429static inline void queue_lockdep_assert_held(struct request_queue *q)
430{ 430{
431#ifdef CONFIG_SMP 431 if (q->queue_lock)
432 spinlock_t *lock = q->queue_lock; 432 lockdep_assert_held(q->queue_lock);
433 return lock && spin_is_locked(lock);
434#else
435 return 1;
436#endif
437} 433}
438 434
439static inline void queue_flag_set_unlocked(unsigned int flag, 435static inline void queue_flag_set_unlocked(unsigned int flag,
@@ -445,7 +441,7 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
445static inline int queue_flag_test_and_clear(unsigned int flag, 441static inline int queue_flag_test_and_clear(unsigned int flag,
446 struct request_queue *q) 442 struct request_queue *q)
447{ 443{
448 WARN_ON_ONCE(!queue_is_locked(q)); 444 queue_lockdep_assert_held(q);
449 445
450 if (test_bit(flag, &q->queue_flags)) { 446 if (test_bit(flag, &q->queue_flags)) {
451 __clear_bit(flag, &q->queue_flags); 447 __clear_bit(flag, &q->queue_flags);
@@ -458,7 +454,7 @@ static inline int queue_flag_test_and_clear(unsigned int flag,
458static inline int queue_flag_test_and_set(unsigned int flag, 454static inline int queue_flag_test_and_set(unsigned int flag,
459 struct request_queue *q) 455 struct request_queue *q)
460{ 456{
461 WARN_ON_ONCE(!queue_is_locked(q)); 457 queue_lockdep_assert_held(q);
462 458
463 if (!test_bit(flag, &q->queue_flags)) { 459 if (!test_bit(flag, &q->queue_flags)) {
464 __set_bit(flag, &q->queue_flags); 460 __set_bit(flag, &q->queue_flags);
@@ -470,7 +466,7 @@ static inline int queue_flag_test_and_set(unsigned int flag,
470 466
471static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 467static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
472{ 468{
473 WARN_ON_ONCE(!queue_is_locked(q)); 469 queue_lockdep_assert_held(q);
474 __set_bit(flag, &q->queue_flags); 470 __set_bit(flag, &q->queue_flags);
475} 471}
476 472
@@ -487,7 +483,7 @@ static inline int queue_in_flight(struct request_queue *q)
487 483
488static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 484static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
489{ 485{
490 WARN_ON_ONCE(!queue_is_locked(q)); 486 queue_lockdep_assert_held(q);
491 __clear_bit(flag, &q->queue_flags); 487 __clear_bit(flag, &q->queue_flags);
492} 488}
493 489