aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorCorrado Zoccolo <czoccolo@gmail.com>2010-02-28 13:45:05 -0500
committerJens Axboe <jens.axboe@oracle.com>2010-02-28 13:45:05 -0500
commit53c583d2269851de9df1c2e992cb2f7f124a5f55 (patch)
tree993a4f67663b4457771b7069513ad6e0471223b3 /block/cfq-iosched.c
parent87c3a922a7ee8cfb9ab837f4ae38c993e9b30711 (diff)
cfq-iosched: requests "in flight" vs "in driver" clarification
Counters for requests "in flight" and "in driver" are used asymmetrically in cfq_may_dispatch, and have slightly different meaning. We split the rq_in_flight counter (was sync_flight) to count both sync and async requests, in order to use this one, which is more accurate in some corner cases. The rq_in_driver counter is coalesced, since individual sync/async counts are not used any more. Signed-off-by: Corrado Zoccolo <czoccolo@gmail.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c44
1 files changed, 18 insertions, 26 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 423aee3fd19b..f27e535ce262 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -222,8 +222,8 @@ struct cfq_data {
222 222
223 unsigned int busy_queues; 223 unsigned int busy_queues;
224 224
225 int rq_in_driver[2]; 225 int rq_in_driver;
226 int sync_flight; 226 int rq_in_flight[2];
227 227
228 /* 228 /*
229 * queue-depth detection 229 * queue-depth detection
@@ -416,11 +416,6 @@ static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
416static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 416static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
417 struct io_context *); 417 struct io_context *);
418 418
419static inline int rq_in_driver(struct cfq_data *cfqd)
420{
421 return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
422}
423
424static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 419static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
425 bool is_sync) 420 bool is_sync)
426{ 421{
@@ -1414,9 +1409,9 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
1414{ 1409{
1415 struct cfq_data *cfqd = q->elevator->elevator_data; 1410 struct cfq_data *cfqd = q->elevator->elevator_data;
1416 1411
1417 cfqd->rq_in_driver[rq_is_sync(rq)]++; 1412 cfqd->rq_in_driver++;
1418 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 1413 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1419 rq_in_driver(cfqd)); 1414 cfqd->rq_in_driver);
1420 1415
1421 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); 1416 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1422} 1417}
@@ -1424,12 +1419,11 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
1424static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 1419static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1425{ 1420{
1426 struct cfq_data *cfqd = q->elevator->elevator_data; 1421 struct cfq_data *cfqd = q->elevator->elevator_data;
1427 const int sync = rq_is_sync(rq);
1428 1422
1429 WARN_ON(!cfqd->rq_in_driver[sync]); 1423 WARN_ON(!cfqd->rq_in_driver);
1430 cfqd->rq_in_driver[sync]--; 1424 cfqd->rq_in_driver--;
1431 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", 1425 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1432 rq_in_driver(cfqd)); 1426 cfqd->rq_in_driver);
1433} 1427}
1434 1428
1435static void cfq_remove_request(struct request *rq) 1429static void cfq_remove_request(struct request *rq)
@@ -1863,8 +1857,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1863 cfqq->dispatched++; 1857 cfqq->dispatched++;
1864 elv_dispatch_sort(q, rq); 1858 elv_dispatch_sort(q, rq);
1865 1859
1866 if (cfq_cfqq_sync(cfqq)) 1860 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1867 cfqd->sync_flight++;
1868 cfqq->nr_sectors += blk_rq_sectors(rq); 1861 cfqq->nr_sectors += blk_rq_sectors(rq);
1869} 1862}
1870 1863
@@ -2211,13 +2204,13 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2211 /* 2204 /*
2212 * Drain async requests before we start sync IO 2205 * Drain async requests before we start sync IO
2213 */ 2206 */
2214 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) 2207 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2215 return false; 2208 return false;
2216 2209
2217 /* 2210 /*
2218 * If this is an async queue and we have sync IO in flight, let it wait 2211 * If this is an async queue and we have sync IO in flight, let it wait
2219 */ 2212 */
2220 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 2213 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2221 return false; 2214 return false;
2222 2215
2223 max_dispatch = cfqd->cfq_quantum; 2216 max_dispatch = cfqd->cfq_quantum;
@@ -3189,14 +3182,14 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
3189{ 3182{
3190 struct cfq_queue *cfqq = cfqd->active_queue; 3183 struct cfq_queue *cfqq = cfqd->active_queue;
3191 3184
3192 if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth) 3185 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3193 cfqd->hw_tag_est_depth = rq_in_driver(cfqd); 3186 cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3194 3187
3195 if (cfqd->hw_tag == 1) 3188 if (cfqd->hw_tag == 1)
3196 return; 3189 return;
3197 3190
3198 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && 3191 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3199 rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) 3192 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3200 return; 3193 return;
3201 3194
3202 /* 3195 /*
@@ -3206,7 +3199,7 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
3206 */ 3199 */
3207 if (cfqq && cfq_cfqq_idle_window(cfqq) && 3200 if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3208 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < 3201 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3209 CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN) 3202 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3210 return; 3203 return;
3211 3204
3212 if (cfqd->hw_tag_samples++ < 50) 3205 if (cfqd->hw_tag_samples++ < 50)
@@ -3259,13 +3252,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3259 3252
3260 cfq_update_hw_tag(cfqd); 3253 cfq_update_hw_tag(cfqd);
3261 3254
3262 WARN_ON(!cfqd->rq_in_driver[sync]); 3255 WARN_ON(!cfqd->rq_in_driver);
3263 WARN_ON(!cfqq->dispatched); 3256 WARN_ON(!cfqq->dispatched);
3264 cfqd->rq_in_driver[sync]--; 3257 cfqd->rq_in_driver--;
3265 cfqq->dispatched--; 3258 cfqq->dispatched--;
3266 3259
3267 if (cfq_cfqq_sync(cfqq)) 3260 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3268 cfqd->sync_flight--;
3269 3261
3270 if (sync) { 3262 if (sync) {
3271 RQ_CIC(rq)->last_end_request = now; 3263 RQ_CIC(rq)->last_end_request = now;
@@ -3319,7 +3311,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3319 } 3311 }
3320 } 3312 }
3321 3313
3322 if (!rq_in_driver(cfqd)) 3314 if (!cfqd->rq_in_driver)
3323 cfq_schedule_dispatch(cfqd); 3315 cfq_schedule_dispatch(cfqd);
3324} 3316}
3325 3317