aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-12-28 03:23:13 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-28 03:23:13 -0500
commit605c1a187f3ce82fbc243e2163c5ca8d1926df8e (patch)
treec8065a8c5606a66f81dc494ce22a5baa5e0dfe7e /block
parent17a2a9b57a9a7d2fd8f97df951b5e63e0bd56ef5 (diff)
parentce9277fb08e6e721482f7011ca28dcd0449b197c (diff)
Merge branch 'iommu/fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu into x86/urgent
Diffstat (limited to 'block')
-rw-r--r--block/blk-settings.c7
-rw-r--r--block/cfq-iosched.c94
2 files changed, 82 insertions, 19 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dd1f1e0e196..6ae118d6e19 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -554,11 +554,18 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
554 ret = -1; 554 ret = -1;
555 } 555 }
556 556
557 /*
558 * Temporarily disable discard granularity. It's currently buggy
559 * since we default to 0 for discard_granularity, hence this
560 * "failure" will always trigger for non-zero offsets.
561 */
562#if 0
557 if (offset && 563 if (offset &&
558 (offset & (b->discard_granularity - 1)) != b->discard_alignment) { 564 (offset & (b->discard_granularity - 1)) != b->discard_alignment) {
559 t->discard_misaligned = 1; 565 t->discard_misaligned = 1;
560 ret = -1; 566 ret = -1;
561 } 567 }
568#endif
562 569
563 /* If top has no alignment offset, inherit from bottom */ 570 /* If top has no alignment offset, inherit from bottom */
564 if (!t->alignment_offset) 571 if (!t->alignment_offset)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index cfb0b2f5f63..e2f80463ed0 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -283,7 +283,7 @@ struct cfq_data {
283 */ 283 */
284 struct cfq_queue oom_cfqq; 284 struct cfq_queue oom_cfqq;
285 285
286 unsigned long last_end_sync_rq; 286 unsigned long last_delayed_sync;
287 287
288 /* List of cfq groups being managed on this device*/ 288 /* List of cfq groups being managed on this device*/
289 struct hlist_head cfqg_list; 289 struct hlist_head cfqg_list;
@@ -319,7 +319,6 @@ enum cfqq_state_flags {
319 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 319 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
320 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 320 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
321 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ 321 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
322 CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */
323}; 322};
324 323
325#define CFQ_CFQQ_FNS(name) \ 324#define CFQ_CFQQ_FNS(name) \
@@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync);
348CFQ_CFQQ_FNS(coop); 347CFQ_CFQQ_FNS(coop);
349CFQ_CFQQ_FNS(deep); 348CFQ_CFQQ_FNS(deep);
350CFQ_CFQQ_FNS(wait_busy); 349CFQ_CFQQ_FNS(wait_busy);
351CFQ_CFQQ_FNS(wait_busy_done);
352#undef CFQ_CFQQ_FNS 350#undef CFQ_CFQQ_FNS
353 351
354#ifdef CONFIG_DEBUG_CFQ_IOSCHED 352#ifdef CONFIG_DEBUG_CFQ_IOSCHED
@@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1574 1572
1575 cfq_clear_cfqq_wait_request(cfqq); 1573 cfq_clear_cfqq_wait_request(cfqq);
1576 cfq_clear_cfqq_wait_busy(cfqq); 1574 cfq_clear_cfqq_wait_busy(cfqq);
1577 cfq_clear_cfqq_wait_busy_done(cfqq);
1578 1575
1579 /* 1576 /*
1580 * store what was left of this slice, if the queue idled/timed out 1577 * store what was left of this slice, if the queue idled/timed out
@@ -1750,6 +1747,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1750 return NULL; 1747 return NULL;
1751 1748
1752 /* 1749 /*
1750 * Don't search priority tree if it's the only queue in the group.
1751 */
1752 if (cur_cfqq->cfqg->nr_cfqq == 1)
1753 return NULL;
1754
1755 /*
1753 * We should notice if some of the queues are cooperating, eg 1756 * We should notice if some of the queues are cooperating, eg
1754 * working closely on the same area of the disk. In that case, 1757 * working closely on the same area of the disk. In that case,
1755 * we can group them together and don't waste time idling. 1758 * we can group them together and don't waste time idling.
@@ -2110,7 +2113,9 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd)
2110 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; 2113 cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2111 cfqd->serving_type = cfqg->saved_workload; 2114 cfqd->serving_type = cfqg->saved_workload;
2112 cfqd->serving_prio = cfqg->saved_serving_prio; 2115 cfqd->serving_prio = cfqg->saved_serving_prio;
2113 } 2116 } else
2117 cfqd->workload_expires = jiffies - 1;
2118
2114 choose_service_tree(cfqd, cfqg); 2119 choose_service_tree(cfqd, cfqg);
2115} 2120}
2116 2121
@@ -2128,14 +2133,35 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2128 2133
2129 if (!cfqd->rq_queued) 2134 if (!cfqd->rq_queued)
2130 return NULL; 2135 return NULL;
2136
2131 /* 2137 /*
2132 * The active queue has run out of time, expire it and select new. 2138 * We were waiting for group to get backlogged. Expire the queue
2133 */ 2139 */
2134 if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq)) 2140 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2135 && !cfq_cfqq_must_dispatch(cfqq))
2136 goto expire; 2141 goto expire;
2137 2142
2138 /* 2143 /*
2144 * The active queue has run out of time, expire it and select new.
2145 */
2146 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2147 /*
2148 * If slice had not expired at the completion of last request
2149 * we might not have turned on wait_busy flag. Don't expire
2150 * the queue yet. Allow the group to get backlogged.
2151 *
2152 * The very fact that we have used the slice, that means we
2153 * have been idling all along on this queue and it should be
2154 * ok to wait for this request to complete.
2155 */
2156 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2157 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2158 cfqq = NULL;
2159 goto keep_queue;
2160 } else
2161 goto expire;
2162 }
2163
2164 /*
2139 * The active queue has requests and isn't expired, allow it to 2165 * The active queue has requests and isn't expired, allow it to
2140 * dispatch. 2166 * dispatch.
2141 */ 2167 */
@@ -2264,7 +2290,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2264 * based on the last sync IO we serviced 2290 * based on the last sync IO we serviced
2265 */ 2291 */
2266 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { 2292 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2267 unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; 2293 unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2268 unsigned int depth; 2294 unsigned int depth;
2269 2295
2270 depth = last_sync / cfqd->cfq_slice[1]; 2296 depth = last_sync / cfqd->cfq_slice[1];
@@ -3165,10 +3191,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3165 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 3191 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3166 3192
3167 if (cfqq == cfqd->active_queue) { 3193 if (cfqq == cfqd->active_queue) {
3168 if (cfq_cfqq_wait_busy(cfqq)) {
3169 cfq_clear_cfqq_wait_busy(cfqq);
3170 cfq_mark_cfqq_wait_busy_done(cfqq);
3171 }
3172 /* 3194 /*
3173 * Remember that we saw a request from this process, but 3195 * Remember that we saw a request from this process, but
3174 * don't start queuing just yet. Otherwise we risk seeing lots 3196 * don't start queuing just yet. Otherwise we risk seeing lots
@@ -3183,6 +3205,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3183 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 3205 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3184 cfqd->busy_queues > 1) { 3206 cfqd->busy_queues > 1) {
3185 del_timer(&cfqd->idle_slice_timer); 3207 del_timer(&cfqd->idle_slice_timer);
3208 cfq_clear_cfqq_wait_request(cfqq);
3186 __blk_run_queue(cfqd->queue); 3209 __blk_run_queue(cfqd->queue);
3187 } else 3210 } else
3188 cfq_mark_cfqq_must_dispatch(cfqq); 3211 cfq_mark_cfqq_must_dispatch(cfqq);
@@ -3251,6 +3274,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
3251 cfqd->hw_tag = 0; 3274 cfqd->hw_tag = 0;
3252} 3275}
3253 3276
3277static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3278{
3279 struct cfq_io_context *cic = cfqd->active_cic;
3280
3281 /* If there are other queues in the group, don't wait */
3282 if (cfqq->cfqg->nr_cfqq > 1)
3283 return false;
3284
3285 if (cfq_slice_used(cfqq))
3286 return true;
3287
3288 /* if slice left is less than think time, wait busy */
3289 if (cic && sample_valid(cic->ttime_samples)
3290 && (cfqq->slice_end - jiffies < cic->ttime_mean))
3291 return true;
3292
3293 /*
3294 * If think times is less than a jiffy than ttime_mean=0 and above
3295 * will not be true. It might happen that slice has not expired yet
3296 * but will expire soon (4-5 ns) during select_queue(). To cover the
3297 * case where think time is less than a jiffy, mark the queue wait
3298 * busy if only 1 jiffy is left in the slice.
3299 */
3300 if (cfqq->slice_end - jiffies == 1)
3301 return true;
3302
3303 return false;
3304}
3305
3254static void cfq_completed_request(struct request_queue *q, struct request *rq) 3306static void cfq_completed_request(struct request_queue *q, struct request *rq)
3255{ 3307{
3256 struct cfq_queue *cfqq = RQ_CFQQ(rq); 3308 struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -3273,7 +3325,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3273 3325
3274 if (sync) { 3326 if (sync) {
3275 RQ_CIC(rq)->last_end_request = now; 3327 RQ_CIC(rq)->last_end_request = now;
3276 cfqd->last_end_sync_rq = now; 3328 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3329 cfqd->last_delayed_sync = now;
3277 } 3330 }
3278 3331
3279 /* 3332 /*
@@ -3289,11 +3342,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3289 } 3342 }
3290 3343
3291 /* 3344 /*
3292 * If this queue consumed its slice and this is last queue 3345 * Should we wait for next request to come in before we expire
3293 * in the group, wait for next request before we expire 3346 * the queue.
3294 * the queue
3295 */ 3347 */
3296 if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) { 3348 if (cfq_should_wait_busy(cfqd, cfqq)) {
3297 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3349 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
3298 cfq_mark_cfqq_wait_busy(cfqq); 3350 cfq_mark_cfqq_wait_busy(cfqq);
3299 } 3351 }
@@ -3711,7 +3763,11 @@ static void *cfq_init_queue(struct request_queue *q)
3711 cfqd->cfq_latency = 1; 3763 cfqd->cfq_latency = 1;
3712 cfqd->cfq_group_isolation = 0; 3764 cfqd->cfq_group_isolation = 0;
3713 cfqd->hw_tag = -1; 3765 cfqd->hw_tag = -1;
3714 cfqd->last_end_sync_rq = jiffies; 3766 /*
3767 * we optimistically start assuming sync ops weren't delayed in last
3768 * second, in order to have larger depth for async operations.
3769 */
3770 cfqd->last_delayed_sync = jiffies - HZ;
3715 INIT_RCU_HEAD(&cfqd->rcu); 3771 INIT_RCU_HEAD(&cfqd->rcu);
3716 return cfqd; 3772 return cfqd;
3717} 3773}