aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2009-12-03 12:59:53 -0500
committerJens Axboe <jens.axboe@oracle.com>2009-12-03 13:28:53 -0500
commitf75edf2dc828802d358393be80a6c89e919f8273 (patch)
treea3b2fb05298ea41ecc0fd7ad56e1218369f7f9fd /block
parentf8d461d692c341add957fb973fb5ee1f62039dc7 (diff)
blkio: Wait for cfq queue to get backlogged if group is empty
o If a queue consumes its slice and then gets deleted from service tree, its associated group will also get deleted from service tree if this was the only queue in the group. That will make group loose its share. o For the queues on which we have idling on and if these have used their slice, wait a bit for these queues to get backlogged again and then expire these queues so that group does not loose its share. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/cfq-iosched.c34
1 files changed, 29 insertions, 5 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index f7364621613a..1cc10489eaf0 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -117,6 +117,7 @@ struct cfq_queue {
117 117
118 /* time when queue got scheduled in to dispatch first request. */ 118 /* time when queue got scheduled in to dispatch first request. */
119 unsigned long dispatch_start; 119 unsigned long dispatch_start;
120 unsigned int allocated_slice;
120 /* time when first request from queue completed and slice started. */ 121 /* time when first request from queue completed and slice started. */
121 unsigned long slice_start; 122 unsigned long slice_start;
122 unsigned long slice_end; 123 unsigned long slice_end;
@@ -314,6 +315,8 @@ enum cfqq_state_flags {
314 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 315 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
315 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 316 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
316 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 317 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
318 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
319 CFQ_CFQQ_FLAG_wait_busy_done, /* Got new request. Expire the queue */
317}; 320};
318 321
319#define CFQ_CFQQ_FNS(name) \ 322#define CFQ_CFQQ_FNS(name) \
@@ -341,6 +344,8 @@ CFQ_CFQQ_FNS(slice_new);
341CFQ_CFQQ_FNS(sync); 344CFQ_CFQQ_FNS(sync);
342CFQ_CFQQ_FNS(coop); 345CFQ_CFQQ_FNS(coop);
343CFQ_CFQQ_FNS(deep); 346CFQ_CFQQ_FNS(deep);
347CFQ_CFQQ_FNS(wait_busy);
348CFQ_CFQQ_FNS(wait_busy_done);
344#undef CFQ_CFQQ_FNS 349#undef CFQ_CFQQ_FNS
345 350
346#ifdef CONFIG_DEBUG_CFQ_IOSCHED 351#ifdef CONFIG_DEBUG_CFQ_IOSCHED
@@ -578,6 +583,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
578 } 583 }
579 cfqq->slice_start = jiffies; 584 cfqq->slice_start = jiffies;
580 cfqq->slice_end = jiffies + slice; 585 cfqq->slice_end = jiffies + slice;
586 cfqq->allocated_slice = slice;
581 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 587 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
582} 588}
583 589
@@ -859,7 +865,7 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
859 865
860static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) 866static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
861{ 867{
862 unsigned int slice_used, allocated_slice; 868 unsigned int slice_used;
863 869
864 /* 870 /*
865 * Queue got expired before even a single request completed or 871 * Queue got expired before even a single request completed or
@@ -876,9 +882,8 @@ static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
876 1); 882 1);
877 } else { 883 } else {
878 slice_used = jiffies - cfqq->slice_start; 884 slice_used = jiffies - cfqq->slice_start;
879 allocated_slice = cfqq->slice_end - cfqq->slice_start; 885 if (slice_used > cfqq->allocated_slice)
880 if (slice_used > allocated_slice) 886 slice_used = cfqq->allocated_slice;
881 slice_used = allocated_slice;
882 } 887 }
883 888
884 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used, 889 cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used,
@@ -1495,6 +1500,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1495 cfq_log_cfqq(cfqd, cfqq, "set_active"); 1500 cfq_log_cfqq(cfqd, cfqq, "set_active");
1496 cfqq->slice_start = 0; 1501 cfqq->slice_start = 0;
1497 cfqq->dispatch_start = jiffies; 1502 cfqq->dispatch_start = jiffies;
1503 cfqq->allocated_slice = 0;
1498 cfqq->slice_end = 0; 1504 cfqq->slice_end = 0;
1499 cfqq->slice_dispatch = 0; 1505 cfqq->slice_dispatch = 0;
1500 cfqq->nr_sectors = 0; 1506 cfqq->nr_sectors = 0;
@@ -1524,6 +1530,8 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1524 del_timer(&cfqd->idle_slice_timer); 1530 del_timer(&cfqd->idle_slice_timer);
1525 1531
1526 cfq_clear_cfqq_wait_request(cfqq); 1532 cfq_clear_cfqq_wait_request(cfqq);
1533 cfq_clear_cfqq_wait_busy(cfqq);
1534 cfq_clear_cfqq_wait_busy_done(cfqq);
1527 1535
1528 /* 1536 /*
1529 * store what was left of this slice, if the queue idled/timed out 1537 * store what was left of this slice, if the queue idled/timed out
@@ -2066,7 +2074,8 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2066 /* 2074 /*
2067 * The active queue has run out of time, expire it and select new. 2075 * The active queue has run out of time, expire it and select new.
2068 */ 2076 */
2069 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) 2077 if ((cfq_slice_used(cfqq) || cfq_cfqq_wait_busy_done(cfqq))
2078 && !cfq_cfqq_must_dispatch(cfqq))
2070 goto expire; 2079 goto expire;
2071 2080
2072 /* 2081 /*
@@ -3096,6 +3105,10 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3096 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 3105 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3097 3106
3098 if (cfqq == cfqd->active_queue) { 3107 if (cfqq == cfqd->active_queue) {
3108 if (cfq_cfqq_wait_busy(cfqq)) {
3109 cfq_clear_cfqq_wait_busy(cfqq);
3110 cfq_mark_cfqq_wait_busy_done(cfqq);
3111 }
3099 /* 3112 /*
3100 * Remember that we saw a request from this process, but 3113 * Remember that we saw a request from this process, but
3101 * don't start queuing just yet. Otherwise we risk seeing lots 3114 * don't start queuing just yet. Otherwise we risk seeing lots
@@ -3214,6 +3227,17 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3214 cfq_set_prio_slice(cfqd, cfqq); 3227 cfq_set_prio_slice(cfqd, cfqq);
3215 cfq_clear_cfqq_slice_new(cfqq); 3228 cfq_clear_cfqq_slice_new(cfqq);
3216 } 3229 }
3230
3231 /*
3232 * If this queue consumed its slice and this is last queue
3233 * in the group, wait for next request before we expire
3234 * the queue
3235 */
3236 if (cfq_slice_used(cfqq) && cfqq->cfqg->nr_cfqq == 1) {
3237 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
3238 cfq_mark_cfqq_wait_busy(cfqq);
3239 }
3240
3217 /* 3241 /*
3218 * Idling is not enabled on: 3242 * Idling is not enabled on:
3219 * - expired queues 3243 * - expired queues