diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 12:11:28 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 12:11:28 -0500 |
commit | 51b736b85155a56543fda8aeca5f8592795d7983 (patch) | |
tree | 937c8be978bf66084128cd62fca3e94fa50c3f36 | |
parent | a8aa1ebdf880ebe7b5738ccebf67e18d62cbdc0b (diff) | |
parent | 66ae291978177d5c012015f12b8fbc76dc7d0965 (diff) |
Merge branch 'for-2.6.33' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.33' of git://git.kernel.dk/linux-2.6-block:
cfq: set workload as expired if it doesn't have any slice left
Fix a CFQ crash in "for-2.6.33" branch of block tree
cfq: Remove wait_request flag when idle time is being deleted
cfq-iosched: commenting non-obvious initialization
cfq-iosched: Take care of corner cases of group losing share due to deletion
cfq-iosched: Get rid of cfqq wait_busy_done flag
cfq: Optimization for close cooperating queue searching
block,xd: Delay allocation of DMA buffers until device is known
drbd: Following the hmac change to SHASH (see linux commit 8bd1209cfff)
cfq-iosched: reduce write depth only if sync was delayed
-rw-r--r-- | block/cfq-iosched.c | 94 | ||||
-rw-r--r-- | drivers/block/drbd/drbd_nl.c | 3 | ||||
-rw-r--r-- | drivers/block/xd.c | 30 |
3 files changed, 95 insertions, 32 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index cfb0b2f5f63d..e2f80463ed0d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -283,7 +283,7 @@ struct cfq_data { | |||
283 | */ | 283 | */ |
284 | struct cfq_queue oom_cfqq; | 284 | struct cfq_queue oom_cfqq; |
285 | 285 | ||
286 | unsigned long last_end_sync_rq; | 286 | unsigned long last_delayed_sync; |
287 | 287 | ||
288 | /* List of cfq groups being managed on this device*/ | 288 | /* List of cfq groups being managed on this device*/ |
289 | struct hlist_head cfqg_list; | 289 | struct hlist_head cfqg_list; |
@@ -319,7 +319,6 @@ enum cfqq_state_flags { | |||
319 | CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ | 319 | CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ |
320 | CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ | 320 | CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ |
321 | CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ | 321 | CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ |
322 | CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */ | ||
323 | }; | 322 | }; |
324 | 323 | ||
325 | #define CFQ_CFQQ_FNS(name) \ | 324 | #define CFQ_CFQQ_FNS(name) \ |
@@ -348,7 +347,6 @@ CFQ_CFQQ_FNS(sync); | |||
348 | CFQ_CFQQ_FNS(coop); | 347 | CFQ_CFQQ_FNS(coop); |
349 | CFQ_CFQQ_FNS(deep); | 348 | CFQ_CFQQ_FNS(deep); |
350 | CFQ_CFQQ_FNS(wait_busy); | 349 | CFQ_CFQQ_FNS(wait_busy); |
351 | CFQ_CFQQ_FNS(wait_busy_done); | ||
352 | #undef CFQ_CFQQ_FNS | 350 | #undef CFQ_CFQQ_FNS |
353 | 351 | ||
354 | #ifdef CONFIG_DEBUG_CFQ_IOSCHED | 352 | #ifdef CONFIG_DEBUG_CFQ_IOSCHED |
@@ -1574,7 +1572,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1574 | 1572 | ||
1575 | cfq_clear_cfqq_wait_request(cfqq); | 1573 | cfq_clear_cfqq_wait_request(cfqq); |
1576 | cfq_clear_cfqq_wait_busy(cfqq); | 1574 | cfq_clear_cfqq_wait_busy(cfqq); |
1577 | cfq_clear_cfqq_wait_busy_done(cfqq); | ||
1578 | 1575 | ||
1579 | /* | 1576 | /* |
1580 | * store what was left of this slice, if the queue idled/timed out | 1577 | * store what was left of this slice, if the queue idled/timed out |
@@ -1750,6 +1747,12 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, | |||
1750 | return NULL; | 1747 | return NULL; |
1751 | 1748 | ||
1752 | /* | 1749 | /* |
1750 | * Don't search priority tree if it's the only queue in the group. | ||
1751 | */ | ||
1752 | if (cur_cfqq->cfqg->nr_cfqq == 1) | ||
1753 | return NULL; | ||
1754 | |||
1755 | /* | ||
1753 | * We should notice if some of the queues are cooperating, eg | 1756 | * We should notice if some of the queues are cooperating, eg |
1754 | * working closely on the same area of the disk. In that case, | 1757 | * working closely on the same area of the disk. In that case, |
1755 | * we can group them together and don't waste time idling. | 1758 | * we can group them together and don't waste time idling. |
@@ -2110,7 +2113,9 @@ static void cfq_choose_cfqg(struct cfq_data *cfqd) | |||
2110 | cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; | 2113 | cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; |
2111 | cfqd->serving_type = cfqg->saved_workload; | 2114 | cfqd->serving_type = cfqg->saved_workload; |
2112 | cfqd->serving_prio = cfqg->saved_serving_prio; | 2115 | cfqd->serving_prio = cfqg->saved_serving_prio; |
2113 | } | 2116 | } else |
2117 | cfqd->workload_expires = jiffies - 1; | ||
2118 | |||
2114 | choose_service_tree(cfqd, cfqg); | 2119 | choose_service_tree(cfqd, cfqg); |
2115 | } | 2120 | } |
2116 | 2121 | ||
@@ -2128,14 +2133,35 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
2128 | 2133 | ||
2129 | if (!cfqd->rq_queued) | 2134 | if (!cfqd->rq_queued) |
2130 | return NULL; | 2135 | return NULL; |
2136 | |||
2131 | /* | 2137 | /* |
2132 | * The active queue has run out of time, expire it and select new. | 2138 | * We were waiting for group to get backlogged. Expire the queue |
2133 | */ | 2139 | */ |
2134 | if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq)) | 2140 | if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) |
2135 | && !cfq_cfqq_must_dispatch(cfqq)) | ||
2136 | goto expire; | 2141 | goto expire; |
2137 | 2142 | ||
2138 | /* | 2143 | /* |
2144 | * The active queue has run out of time, expire it and select new. | ||
2145 | */ | ||
2146 | if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { | ||
2147 | /* | ||
2148 | * If slice had not expired at the completion of last request | ||
2149 | * we might not have turned on wait_busy flag. Don't expire | ||
2150 | * the queue yet. Allow the group to get backlogged. | ||
2151 | * | ||
2152 | * The very fact that we have used the slice, that means we | ||
2153 | * have been idling all along on this queue and it should be | ||
2154 | * ok to wait for this request to complete. | ||
2155 | */ | ||
2156 | if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) | ||
2157 | && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { | ||
2158 | cfqq = NULL; | ||
2159 | goto keep_queue; | ||
2160 | } else | ||
2161 | goto expire; | ||
2162 | } | ||
2163 | |||
2164 | /* | ||
2139 | * The active queue has requests and isn't expired, allow it to | 2165 | * The active queue has requests and isn't expired, allow it to |
2140 | * dispatch. | 2166 | * dispatch. |
2141 | */ | 2167 | */ |
@@ -2264,7 +2290,7 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
2264 | * based on the last sync IO we serviced | 2290 | * based on the last sync IO we serviced |
2265 | */ | 2291 | */ |
2266 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { | 2292 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { |
2267 | unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; | 2293 | unsigned long last_sync = jiffies - cfqd->last_delayed_sync; |
2268 | unsigned int depth; | 2294 | unsigned int depth; |
2269 | 2295 | ||
2270 | depth = last_sync / cfqd->cfq_slice[1]; | 2296 | depth = last_sync / cfqd->cfq_slice[1]; |
@@ -3165,10 +3191,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3165 | cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); | 3191 | cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); |
3166 | 3192 | ||
3167 | if (cfqq == cfqd->active_queue) { | 3193 | if (cfqq == cfqd->active_queue) { |
3168 | if (cfq_cfqq_wait_busy(cfqq)) { | ||
3169 | cfq_clear_cfqq_wait_busy(cfqq); | ||
3170 | cfq_mark_cfqq_wait_busy_done(cfqq); | ||
3171 | } | ||
3172 | /* | 3194 | /* |
3173 | * Remember that we saw a request from this process, but | 3195 | * Remember that we saw a request from this process, but |
3174 | * don't start queuing just yet. Otherwise we risk seeing lots | 3196 | * don't start queuing just yet. Otherwise we risk seeing lots |
@@ -3183,6 +3205,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3183 | if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || | 3205 | if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || |
3184 | cfqd->busy_queues > 1) { | 3206 | cfqd->busy_queues > 1) { |
3185 | del_timer(&cfqd->idle_slice_timer); | 3207 | del_timer(&cfqd->idle_slice_timer); |
3208 | cfq_clear_cfqq_wait_request(cfqq); | ||
3186 | __blk_run_queue(cfqd->queue); | 3209 | __blk_run_queue(cfqd->queue); |
3187 | } else | 3210 | } else |
3188 | cfq_mark_cfqq_must_dispatch(cfqq); | 3211 | cfq_mark_cfqq_must_dispatch(cfqq); |
@@ -3251,6 +3274,35 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) | |||
3251 | cfqd->hw_tag = 0; | 3274 | cfqd->hw_tag = 0; |
3252 | } | 3275 | } |
3253 | 3276 | ||
3277 | static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
3278 | { | ||
3279 | struct cfq_io_context *cic = cfqd->active_cic; | ||
3280 | |||
3281 | /* If there are other queues in the group, don't wait */ | ||
3282 | if (cfqq->cfqg->nr_cfqq > 1) | ||
3283 | return false; | ||
3284 | |||
3285 | if (cfq_slice_used(cfqq)) | ||
3286 | return true; | ||
3287 | |||
3288 | /* if slice left is less than think time, wait busy */ | ||
3289 | if (cic && sample_valid(cic->ttime_samples) | ||
3290 | && (cfqq->slice_end - jiffies < cic->ttime_mean)) | ||
3291 | return true; | ||
3292 | |||
3293 | /* | ||
3294 | * If think times is less than a jiffy than ttime_mean=0 and above | ||
3295 | * will not be true. It might happen that slice has not expired yet | ||
3296 | * but will expire soon (4-5 ns) during select_queue(). To cover the | ||
3297 | * case where think time is less than a jiffy, mark the queue wait | ||
3298 | * busy if only 1 jiffy is left in the slice. | ||
3299 | */ | ||
3300 | if (cfqq->slice_end - jiffies == 1) | ||
3301 | return true; | ||
3302 | |||
3303 | return false; | ||
3304 | } | ||
3305 | |||
3254 | static void cfq_completed_request(struct request_queue *q, struct request *rq) | 3306 | static void cfq_completed_request(struct request_queue *q, struct request *rq) |
3255 | { | 3307 | { |
3256 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 3308 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
@@ -3273,7 +3325,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3273 | 3325 | ||
3274 | if (sync) { | 3326 | if (sync) { |
3275 | RQ_CIC(rq)->last_end_request = now; | 3327 | RQ_CIC(rq)->last_end_request = now; |
3276 | cfqd->last_end_sync_rq = now; | 3328 | if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) |
3329 | cfqd->last_delayed_sync = now; | ||
3277 | } | 3330 | } |
3278 | 3331 | ||
3279 | /* | 3332 | /* |
@@ -3289,11 +3342,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3289 | } | 3342 | } |
3290 | 3343 | ||
3291 | /* | 3344 | /* |
3292 | * If this queue consumed its slice and this is last queue | 3345 | * Should we wait for next request to come in before we expire |
3293 | * in the group, wait for next request before we expire | 3346 | * the queue. |
3294 | * the queue | ||
3295 | */ | 3347 | */ |
3296 | if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) { | 3348 | if (cfq_should_wait_busy(cfqd, cfqq)) { |
3297 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; | 3349 | cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; |
3298 | cfq_mark_cfqq_wait_busy(cfqq); | 3350 | cfq_mark_cfqq_wait_busy(cfqq); |
3299 | } | 3351 | } |
@@ -3711,7 +3763,11 @@ static void *cfq_init_queue(struct request_queue *q) | |||
3711 | cfqd->cfq_latency = 1; | 3763 | cfqd->cfq_latency = 1; |
3712 | cfqd->cfq_group_isolation = 0; | 3764 | cfqd->cfq_group_isolation = 0; |
3713 | cfqd->hw_tag = -1; | 3765 | cfqd->hw_tag = -1; |
3714 | cfqd->last_end_sync_rq = jiffies; | 3766 | /* |
3767 | * we optimistically start assuming sync ops weren't delayed in last | ||
3768 | * second, in order to have larger depth for async operations. | ||
3769 | */ | ||
3770 | cfqd->last_delayed_sync = jiffies - HZ; | ||
3715 | INIT_RCU_HEAD(&cfqd->rcu); | 3771 | INIT_RCU_HEAD(&cfqd->rcu); |
3716 | return cfqd; | 3772 | return cfqd; |
3717 | } | 3773 | } |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 436a090b532b..4e0726aa53b0 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
@@ -1271,8 +1271,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, | |||
1271 | goto fail; | 1271 | goto fail; |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) | 1274 | if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { |
1275 | != CRYPTO_ALG_TYPE_HASH) { | ||
1276 | retcode = ERR_AUTH_ALG_ND; | 1275 | retcode = ERR_AUTH_ALG_ND; |
1277 | goto fail; | 1276 | goto fail; |
1278 | } | 1277 | } |
diff --git a/drivers/block/xd.c b/drivers/block/xd.c index 0877d3628fda..d1fd032e7514 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c | |||
@@ -169,13 +169,6 @@ static int __init xd_init(void) | |||
169 | 169 | ||
170 | init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog; | 170 | init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog; |
171 | 171 | ||
172 | if (!xd_dma_buffer) | ||
173 | xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); | ||
174 | if (!xd_dma_buffer) { | ||
175 | printk(KERN_ERR "xd: Out of memory.\n"); | ||
176 | return -ENOMEM; | ||
177 | } | ||
178 | |||
179 | err = -EBUSY; | 172 | err = -EBUSY; |
180 | if (register_blkdev(XT_DISK_MAJOR, "xd")) | 173 | if (register_blkdev(XT_DISK_MAJOR, "xd")) |
181 | goto out1; | 174 | goto out1; |
@@ -202,6 +195,19 @@ static int __init xd_init(void) | |||
202 | xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma); | 195 | xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma); |
203 | } | 196 | } |
204 | 197 | ||
198 | /* | ||
199 | * With the drive detected, xd_maxsectors should now be known. | ||
200 | * If xd_maxsectors is 0, nothing was detected and we fall through | ||
201 | * to return -ENODEV | ||
202 | */ | ||
203 | if (!xd_dma_buffer && xd_maxsectors) { | ||
204 | xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200); | ||
205 | if (!xd_dma_buffer) { | ||
206 | printk(KERN_ERR "xd: Out of memory.\n"); | ||
207 | goto out3; | ||
208 | } | ||
209 | } | ||
210 | |||
205 | err = -ENODEV; | 211 | err = -ENODEV; |
206 | if (!xd_drives) | 212 | if (!xd_drives) |
207 | goto out3; | 213 | goto out3; |
@@ -249,15 +255,17 @@ out4: | |||
249 | for (i = 0; i < xd_drives; i++) | 255 | for (i = 0; i < xd_drives; i++) |
250 | put_disk(xd_gendisk[i]); | 256 | put_disk(xd_gendisk[i]); |
251 | out3: | 257 | out3: |
252 | release_region(xd_iobase,4); | 258 | if (xd_maxsectors) |
259 | release_region(xd_iobase,4); | ||
260 | |||
261 | if (xd_dma_buffer) | ||
262 | xd_dma_mem_free((unsigned long)xd_dma_buffer, | ||
263 | xd_maxsectors * 0x200); | ||
253 | out2: | 264 | out2: |
254 | blk_cleanup_queue(xd_queue); | 265 | blk_cleanup_queue(xd_queue); |
255 | out1a: | 266 | out1a: |
256 | unregister_blkdev(XT_DISK_MAJOR, "xd"); | 267 | unregister_blkdev(XT_DISK_MAJOR, "xd"); |
257 | out1: | 268 | out1: |
258 | if (xd_dma_buffer) | ||
259 | xd_dma_mem_free((unsigned long)xd_dma_buffer, | ||
260 | xd_maxsectors * 0x200); | ||
261 | return err; | 269 | return err; |
262 | Enomem: | 270 | Enomem: |
263 | err = -ENOMEM; | 271 | err = -ENOMEM; |