aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-07-03 06:57:48 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-09-11 08:33:30 -0400
commit5ad531db6e0f3c3c985666e83d3c1c4d53acccf9 (patch)
treef5bd394c493f425e12fc6991d2b7adf2121ba506 /block/cfq-iosched.c
parentda6c5c720c52cc717124f8f0830b710ea6a092fd (diff)
cfq-iosched: drain device queue before switching to a sync queue
To lessen the impact of async IO on sync IO, let the device drain of any async IO in progress when switching to a sync cfqq that has idling enabled. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fd7080ed7935..93693bf6083e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -140,7 +140,7 @@ struct cfq_data {
140 */ 140 */
141 unsigned int busy_rt_queues; 141 unsigned int busy_rt_queues;
142 142
143 int rq_in_driver; 143 int rq_in_driver[2];
144 int sync_flight; 144 int sync_flight;
145 145
146 /* 146 /*
@@ -239,6 +239,11 @@ static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
239static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, 239static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
240 struct io_context *); 240 struct io_context *);
241 241
242static inline int rq_in_driver(struct cfq_data *cfqd)
243{
244 return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
245}
246
242static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, 247static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
243 int is_sync) 248 int is_sync)
244{ 249{
@@ -760,9 +765,9 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
760{ 765{
761 struct cfq_data *cfqd = q->elevator->elevator_data; 766 struct cfq_data *cfqd = q->elevator->elevator_data;
762 767
763 cfqd->rq_in_driver++; 768 cfqd->rq_in_driver[rq_is_sync(rq)]++;
764 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 769 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
765 cfqd->rq_in_driver); 770 rq_in_driver(cfqd));
766 771
767 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); 772 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
768} 773}
@@ -770,11 +775,12 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq)
770static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 775static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
771{ 776{
772 struct cfq_data *cfqd = q->elevator->elevator_data; 777 struct cfq_data *cfqd = q->elevator->elevator_data;
778 const int sync = rq_is_sync(rq);
773 779
774 WARN_ON(!cfqd->rq_in_driver); 780 WARN_ON(!cfqd->rq_in_driver[sync]);
775 cfqd->rq_in_driver--; 781 cfqd->rq_in_driver[sync]--;
776 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", 782 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
777 cfqd->rq_in_driver); 783 rq_in_driver(cfqd));
778} 784}
779 785
780static void cfq_remove_request(struct request *rq) 786static void cfq_remove_request(struct request *rq)
@@ -1080,7 +1086,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1080 /* 1086 /*
1081 * still requests with the driver, don't idle 1087 * still requests with the driver, don't idle
1082 */ 1088 */
1083 if (cfqd->rq_in_driver) 1089 if (rq_in_driver(cfqd))
1084 return; 1090 return;
1085 1091
1086 /* 1092 /*
@@ -1312,6 +1318,12 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1312 return 0; 1318 return 0;
1313 1319
1314 /* 1320 /*
1321 * Drain async requests before we start sync IO
1322 */
1323 if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
1324 return 0;
1325
1326 /*
1315 * If this is an async queue and we have sync IO in flight, let it wait 1327 * If this is an async queue and we have sync IO in flight, let it wait
1316 */ 1328 */
1317 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1329 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
@@ -2130,11 +2142,11 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
2130 */ 2142 */
2131static void cfq_update_hw_tag(struct cfq_data *cfqd) 2143static void cfq_update_hw_tag(struct cfq_data *cfqd)
2132{ 2144{
2133 if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak) 2145 if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak)
2134 cfqd->rq_in_driver_peak = cfqd->rq_in_driver; 2146 cfqd->rq_in_driver_peak = rq_in_driver(cfqd);
2135 2147
2136 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && 2148 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
2137 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) 2149 rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
2138 return; 2150 return;
2139 2151
2140 if (cfqd->hw_tag_samples++ < 50) 2152 if (cfqd->hw_tag_samples++ < 50)
@@ -2161,9 +2173,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2161 2173
2162 cfq_update_hw_tag(cfqd); 2174 cfq_update_hw_tag(cfqd);
2163 2175
2164 WARN_ON(!cfqd->rq_in_driver); 2176 WARN_ON(!cfqd->rq_in_driver[sync]);
2165 WARN_ON(!cfqq->dispatched); 2177 WARN_ON(!cfqq->dispatched);
2166 cfqd->rq_in_driver--; 2178 cfqd->rq_in_driver[sync]--;
2167 cfqq->dispatched--; 2179 cfqq->dispatched--;
2168 2180
2169 if (cfq_cfqq_sync(cfqq)) 2181 if (cfq_cfqq_sync(cfqq))
@@ -2197,7 +2209,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
2197 cfq_arm_slice_timer(cfqd); 2209 cfq_arm_slice_timer(cfqd);
2198 } 2210 }
2199 2211
2200 if (!cfqd->rq_in_driver) 2212 if (!rq_in_driver(cfqd))
2201 cfq_schedule_dispatch(cfqd); 2213 cfq_schedule_dispatch(cfqd);
2202} 2214}
2203 2215