diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 242 |
1 files changed, 148 insertions, 94 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1ca813b16e78..069a61017c02 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -173,6 +173,7 @@ struct cfq_data { | |||
173 | unsigned int cfq_slice[2]; | 173 | unsigned int cfq_slice[2]; |
174 | unsigned int cfq_slice_async_rq; | 174 | unsigned int cfq_slice_async_rq; |
175 | unsigned int cfq_slice_idle; | 175 | unsigned int cfq_slice_idle; |
176 | unsigned int cfq_latency; | ||
176 | 177 | ||
177 | struct list_head cic_list; | 178 | struct list_head cic_list; |
178 | 179 | ||
@@ -180,6 +181,8 @@ struct cfq_data { | |||
180 | * Fallback dummy cfqq for extreme OOM conditions | 181 | * Fallback dummy cfqq for extreme OOM conditions |
181 | */ | 182 | */ |
182 | struct cfq_queue oom_cfqq; | 183 | struct cfq_queue oom_cfqq; |
184 | |||
185 | unsigned long last_end_sync_rq; | ||
183 | }; | 186 | }; |
184 | 187 | ||
185 | enum cfqq_state_flags { | 188 | enum cfqq_state_flags { |
@@ -227,7 +230,7 @@ CFQ_CFQQ_FNS(coop); | |||
227 | blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) | 230 | blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) |
228 | 231 | ||
229 | static void cfq_dispatch_insert(struct request_queue *, struct request *); | 232 | static void cfq_dispatch_insert(struct request_queue *, struct request *); |
230 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, | 233 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, |
231 | struct io_context *, gfp_t); | 234 | struct io_context *, gfp_t); |
232 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, | 235 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, |
233 | struct io_context *); | 236 | struct io_context *); |
@@ -238,27 +241,24 @@ static inline int rq_in_driver(struct cfq_data *cfqd) | |||
238 | } | 241 | } |
239 | 242 | ||
240 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, | 243 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, |
241 | int is_sync) | 244 | bool is_sync) |
242 | { | 245 | { |
243 | return cic->cfqq[!!is_sync]; | 246 | return cic->cfqq[is_sync]; |
244 | } | 247 | } |
245 | 248 | ||
246 | static inline void cic_set_cfqq(struct cfq_io_context *cic, | 249 | static inline void cic_set_cfqq(struct cfq_io_context *cic, |
247 | struct cfq_queue *cfqq, int is_sync) | 250 | struct cfq_queue *cfqq, bool is_sync) |
248 | { | 251 | { |
249 | cic->cfqq[!!is_sync] = cfqq; | 252 | cic->cfqq[is_sync] = cfqq; |
250 | } | 253 | } |
251 | 254 | ||
252 | /* | 255 | /* |
253 | * We regard a request as SYNC, if it's either a read or has the SYNC bit | 256 | * We regard a request as SYNC, if it's either a read or has the SYNC bit |
254 | * set (in which case it could also be direct WRITE). | 257 | * set (in which case it could also be direct WRITE). |
255 | */ | 258 | */ |
256 | static inline int cfq_bio_sync(struct bio *bio) | 259 | static inline bool cfq_bio_sync(struct bio *bio) |
257 | { | 260 | { |
258 | if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO)) | 261 | return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); |
259 | return 1; | ||
260 | |||
261 | return 0; | ||
262 | } | 262 | } |
263 | 263 | ||
264 | /* | 264 | /* |
@@ -285,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q) | |||
285 | * if a queue is marked sync and has sync io queued. A sync queue with async | 285 | * if a queue is marked sync and has sync io queued. A sync queue with async |
286 | * io only, should not get full sync slice length. | 286 | * io only, should not get full sync slice length. |
287 | */ | 287 | */ |
288 | static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync, | 288 | static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, |
289 | unsigned short prio) | 289 | unsigned short prio) |
290 | { | 290 | { |
291 | const int base_slice = cfqd->cfq_slice[sync]; | 291 | const int base_slice = cfqd->cfq_slice[sync]; |
@@ -313,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
313 | * isn't valid until the first request from the dispatch is activated | 313 | * isn't valid until the first request from the dispatch is activated |
314 | * and the slice time set. | 314 | * and the slice time set. |
315 | */ | 315 | */ |
316 | static inline int cfq_slice_used(struct cfq_queue *cfqq) | 316 | static inline bool cfq_slice_used(struct cfq_queue *cfqq) |
317 | { | 317 | { |
318 | if (cfq_cfqq_slice_new(cfqq)) | 318 | if (cfq_cfqq_slice_new(cfqq)) |
319 | return 0; | 319 | return 0; |
@@ -488,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd, | |||
488 | * we will service the queues. | 488 | * we will service the queues. |
489 | */ | 489 | */ |
490 | static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 490 | static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
491 | int add_front) | 491 | bool add_front) |
492 | { | 492 | { |
493 | struct rb_node **p, *parent; | 493 | struct rb_node **p, *parent; |
494 | struct cfq_queue *__cfqq; | 494 | struct cfq_queue *__cfqq; |
@@ -504,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
504 | } else | 504 | } else |
505 | rb_key += jiffies; | 505 | rb_key += jiffies; |
506 | } else if (!add_front) { | 506 | } else if (!add_front) { |
507 | /* | ||
508 | * Get our rb key offset. Subtract any residual slice | ||
509 | * value carried from last service. A negative resid | ||
510 | * count indicates slice overrun, and this should position | ||
511 | * the next service time further away in the tree. | ||
512 | */ | ||
507 | rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; | 513 | rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; |
508 | rb_key += cfqq->slice_resid; | 514 | rb_key -= cfqq->slice_resid; |
509 | cfqq->slice_resid = 0; | 515 | cfqq->slice_resid = 0; |
510 | } else | 516 | } else { |
511 | rb_key = 0; | 517 | rb_key = -HZ; |
518 | __cfqq = cfq_rb_first(&cfqd->service_tree); | ||
519 | rb_key += __cfqq ? __cfqq->rb_key : jiffies; | ||
520 | } | ||
512 | 521 | ||
513 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) { | 522 | if (!RB_EMPTY_NODE(&cfqq->rb_node)) { |
514 | /* | 523 | /* |
@@ -542,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
542 | n = &(*p)->rb_left; | 551 | n = &(*p)->rb_left; |
543 | else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) | 552 | else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq)) |
544 | n = &(*p)->rb_right; | 553 | n = &(*p)->rb_right; |
545 | else if (rb_key < __cfqq->rb_key) | 554 | else if (time_before(rb_key, __cfqq->rb_key)) |
546 | n = &(*p)->rb_left; | 555 | n = &(*p)->rb_left; |
547 | else | 556 | else |
548 | n = &(*p)->rb_right; | 557 | n = &(*p)->rb_right; |
@@ -822,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
822 | * reposition in fifo if next is older than rq | 831 | * reposition in fifo if next is older than rq |
823 | */ | 832 | */ |
824 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && | 833 | if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && |
825 | time_before(next->start_time, rq->start_time)) | 834 | time_before(rq_fifo_time(next), rq_fifo_time(rq))) { |
826 | list_move(&rq->queuelist, &next->queuelist); | 835 | list_move(&rq->queuelist, &next->queuelist); |
836 | rq_set_fifo_time(rq, rq_fifo_time(next)); | ||
837 | } | ||
827 | 838 | ||
828 | cfq_remove_request(next); | 839 | cfq_remove_request(next); |
829 | } | 840 | } |
@@ -839,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
839 | * Disallow merge of a sync bio into an async request. | 850 | * Disallow merge of a sync bio into an async request. |
840 | */ | 851 | */ |
841 | if (cfq_bio_sync(bio) && !rq_is_sync(rq)) | 852 | if (cfq_bio_sync(bio) && !rq_is_sync(rq)) |
842 | return 0; | 853 | return false; |
843 | 854 | ||
844 | /* | 855 | /* |
845 | * Lookup the cfqq that this bio will be queued with. Allow | 856 | * Lookup the cfqq that this bio will be queued with. Allow |
@@ -847,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
847 | */ | 858 | */ |
848 | cic = cfq_cic_lookup(cfqd, current->io_context); | 859 | cic = cfq_cic_lookup(cfqd, current->io_context); |
849 | if (!cic) | 860 | if (!cic) |
850 | return 0; | 861 | return false; |
851 | 862 | ||
852 | cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); | 863 | cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); |
853 | if (cfqq == RQ_CFQQ(rq)) | 864 | return cfqq == RQ_CFQQ(rq); |
854 | return 1; | ||
855 | |||
856 | return 0; | ||
857 | } | 865 | } |
858 | 866 | ||
859 | static void __cfq_set_active_queue(struct cfq_data *cfqd, | 867 | static void __cfq_set_active_queue(struct cfq_data *cfqd, |
@@ -881,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
881 | */ | 889 | */ |
882 | static void | 890 | static void |
883 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 891 | __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
884 | int timed_out) | 892 | bool timed_out) |
885 | { | 893 | { |
886 | cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); | 894 | cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); |
887 | 895 | ||
@@ -909,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
909 | } | 917 | } |
910 | } | 918 | } |
911 | 919 | ||
912 | static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) | 920 | static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) |
913 | { | 921 | { |
914 | struct cfq_queue *cfqq = cfqd->active_queue; | 922 | struct cfq_queue *cfqq = cfqd->active_queue; |
915 | 923 | ||
@@ -1021,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | |||
1021 | */ | 1029 | */ |
1022 | static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, | 1030 | static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, |
1023 | struct cfq_queue *cur_cfqq, | 1031 | struct cfq_queue *cur_cfqq, |
1024 | int probe) | 1032 | bool probe) |
1025 | { | 1033 | { |
1026 | struct cfq_queue *cfqq; | 1034 | struct cfq_queue *cfqq; |
1027 | 1035 | ||
@@ -1085,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1085 | if (!cic || !atomic_read(&cic->ioc->nr_tasks)) | 1093 | if (!cic || !atomic_read(&cic->ioc->nr_tasks)) |
1086 | return; | 1094 | return; |
1087 | 1095 | ||
1096 | /* | ||
1097 | * If our average think time is larger than the remaining time | ||
1098 | * slice, then don't idle. This avoids overrunning the allotted | ||
1099 | * time slice. | ||
1100 | */ | ||
1101 | if (sample_valid(cic->ttime_samples) && | ||
1102 | (cfqq->slice_end - jiffies < cic->ttime_mean)) | ||
1103 | return; | ||
1104 | |||
1088 | cfq_mark_cfqq_wait_request(cfqq); | 1105 | cfq_mark_cfqq_wait_request(cfqq); |
1089 | 1106 | ||
1090 | /* | 1107 | /* |
@@ -1124,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
1124 | */ | 1141 | */ |
1125 | static struct request *cfq_check_fifo(struct cfq_queue *cfqq) | 1142 | static struct request *cfq_check_fifo(struct cfq_queue *cfqq) |
1126 | { | 1143 | { |
1127 | struct cfq_data *cfqd = cfqq->cfqd; | 1144 | struct request *rq = NULL; |
1128 | struct request *rq; | ||
1129 | int fifo; | ||
1130 | 1145 | ||
1131 | if (cfq_cfqq_fifo_expire(cfqq)) | 1146 | if (cfq_cfqq_fifo_expire(cfqq)) |
1132 | return NULL; | 1147 | return NULL; |
@@ -1136,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq) | |||
1136 | if (list_empty(&cfqq->fifo)) | 1151 | if (list_empty(&cfqq->fifo)) |
1137 | return NULL; | 1152 | return NULL; |
1138 | 1153 | ||
1139 | fifo = cfq_cfqq_sync(cfqq); | ||
1140 | rq = rq_entry_fifo(cfqq->fifo.next); | 1154 | rq = rq_entry_fifo(cfqq->fifo.next); |
1141 | 1155 | if (time_before(jiffies, rq_fifo_time(rq))) | |
1142 | if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) | ||
1143 | rq = NULL; | 1156 | rq = NULL; |
1144 | 1157 | ||
1145 | cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq); | 1158 | cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); |
1146 | return rq; | 1159 | return rq; |
1147 | } | 1160 | } |
1148 | 1161 | ||
@@ -1243,16 +1256,83 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
1243 | return dispatched; | 1256 | return dispatched; |
1244 | } | 1257 | } |
1245 | 1258 | ||
1259 | static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | ||
1260 | { | ||
1261 | unsigned int max_dispatch; | ||
1262 | |||
1263 | /* | ||
1264 | * Drain async requests before we start sync IO | ||
1265 | */ | ||
1266 | if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) | ||
1267 | return false; | ||
1268 | |||
1269 | /* | ||
1270 | * If this is an async queue and we have sync IO in flight, let it wait | ||
1271 | */ | ||
1272 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) | ||
1273 | return false; | ||
1274 | |||
1275 | max_dispatch = cfqd->cfq_quantum; | ||
1276 | if (cfq_class_idle(cfqq)) | ||
1277 | max_dispatch = 1; | ||
1278 | |||
1279 | /* | ||
1280 | * Does this cfqq already have too much IO in flight? | ||
1281 | */ | ||
1282 | if (cfqq->dispatched >= max_dispatch) { | ||
1283 | /* | ||
1284 | * idle queue must always only have a single IO in flight | ||
1285 | */ | ||
1286 | if (cfq_class_idle(cfqq)) | ||
1287 | return false; | ||
1288 | |||
1289 | /* | ||
1290 | * We have other queues, don't allow more IO from this one | ||
1291 | */ | ||
1292 | if (cfqd->busy_queues > 1) | ||
1293 | return false; | ||
1294 | |||
1295 | /* | ||
1296 | * Sole queue user, allow bigger slice | ||
1297 | */ | ||
1298 | max_dispatch *= 4; | ||
1299 | } | ||
1300 | |||
1301 | /* | ||
1302 | * Async queues must wait a bit before being allowed dispatch. | ||
1303 | * We also ramp up the dispatch depth gradually for async IO, | ||
1304 | * based on the last sync IO we serviced | ||
1305 | */ | ||
1306 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { | ||
1307 | unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; | ||
1308 | unsigned int depth; | ||
1309 | |||
1310 | depth = last_sync / cfqd->cfq_slice[1]; | ||
1311 | if (!depth && !cfqq->dispatched) | ||
1312 | depth = 1; | ||
1313 | if (depth < max_dispatch) | ||
1314 | max_dispatch = depth; | ||
1315 | } | ||
1316 | |||
1317 | /* | ||
1318 | * If we're below the current max, allow a dispatch | ||
1319 | */ | ||
1320 | return cfqq->dispatched < max_dispatch; | ||
1321 | } | ||
1322 | |||
1246 | /* | 1323 | /* |
1247 | * Dispatch a request from cfqq, moving them to the request queue | 1324 | * Dispatch a request from cfqq, moving them to the request queue |
1248 | * dispatch list. | 1325 | * dispatch list. |
1249 | */ | 1326 | */ |
1250 | static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 1327 | static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1251 | { | 1328 | { |
1252 | struct request *rq; | 1329 | struct request *rq; |
1253 | 1330 | ||
1254 | BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); | 1331 | BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); |
1255 | 1332 | ||
1333 | if (!cfq_may_dispatch(cfqd, cfqq)) | ||
1334 | return false; | ||
1335 | |||
1256 | /* | 1336 | /* |
1257 | * follow expired path, else get first next available | 1337 | * follow expired path, else get first next available |
1258 | */ | 1338 | */ |
@@ -1271,6 +1351,8 @@ static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1271 | atomic_long_inc(&cic->ioc->refcount); | 1351 | atomic_long_inc(&cic->ioc->refcount); |
1272 | cfqd->active_cic = cic; | 1352 | cfqd->active_cic = cic; |
1273 | } | 1353 | } |
1354 | |||
1355 | return true; | ||
1274 | } | 1356 | } |
1275 | 1357 | ||
1276 | /* | 1358 | /* |
@@ -1281,7 +1363,6 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1281 | { | 1363 | { |
1282 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1364 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1283 | struct cfq_queue *cfqq; | 1365 | struct cfq_queue *cfqq; |
1284 | unsigned int max_dispatch; | ||
1285 | 1366 | ||
1286 | if (!cfqd->busy_queues) | 1367 | if (!cfqd->busy_queues) |
1287 | return 0; | 1368 | return 0; |
@@ -1294,48 +1375,11 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1294 | return 0; | 1375 | return 0; |
1295 | 1376 | ||
1296 | /* | 1377 | /* |
1297 | * Drain async requests before we start sync IO | 1378 | * Dispatch a request from this cfqq, if it is allowed |
1298 | */ | 1379 | */ |
1299 | if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) | 1380 | if (!cfq_dispatch_request(cfqd, cfqq)) |
1300 | return 0; | 1381 | return 0; |
1301 | 1382 | ||
1302 | /* | ||
1303 | * If this is an async queue and we have sync IO in flight, let it wait | ||
1304 | */ | ||
1305 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) | ||
1306 | return 0; | ||
1307 | |||
1308 | max_dispatch = cfqd->cfq_quantum; | ||
1309 | if (cfq_class_idle(cfqq)) | ||
1310 | max_dispatch = 1; | ||
1311 | |||
1312 | /* | ||
1313 | * Does this cfqq already have too much IO in flight? | ||
1314 | */ | ||
1315 | if (cfqq->dispatched >= max_dispatch) { | ||
1316 | /* | ||
1317 | * idle queue must always only have a single IO in flight | ||
1318 | */ | ||
1319 | if (cfq_class_idle(cfqq)) | ||
1320 | return 0; | ||
1321 | |||
1322 | /* | ||
1323 | * We have other queues, don't allow more IO from this one | ||
1324 | */ | ||
1325 | if (cfqd->busy_queues > 1) | ||
1326 | return 0; | ||
1327 | |||
1328 | /* | ||
1329 | * we are the only queue, allow up to 4 times of 'quantum' | ||
1330 | */ | ||
1331 | if (cfqq->dispatched >= 4 * max_dispatch) | ||
1332 | return 0; | ||
1333 | } | ||
1334 | |||
1335 | /* | ||
1336 | * Dispatch a request from this cfqq | ||
1337 | */ | ||
1338 | cfq_dispatch_request(cfqd, cfqq); | ||
1339 | cfqq->slice_dispatch++; | 1383 | cfqq->slice_dispatch++; |
1340 | cfq_clear_cfqq_must_dispatch(cfqq); | 1384 | cfq_clear_cfqq_must_dispatch(cfqq); |
1341 | 1385 | ||
@@ -1635,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) | |||
1635 | } | 1679 | } |
1636 | 1680 | ||
1637 | static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1681 | static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
1638 | pid_t pid, int is_sync) | 1682 | pid_t pid, bool is_sync) |
1639 | { | 1683 | { |
1640 | RB_CLEAR_NODE(&cfqq->rb_node); | 1684 | RB_CLEAR_NODE(&cfqq->rb_node); |
1641 | RB_CLEAR_NODE(&cfqq->p_node); | 1685 | RB_CLEAR_NODE(&cfqq->p_node); |
@@ -1655,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1655 | } | 1699 | } |
1656 | 1700 | ||
1657 | static struct cfq_queue * | 1701 | static struct cfq_queue * |
1658 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, | 1702 | cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, |
1659 | struct io_context *ioc, gfp_t gfp_mask) | 1703 | struct io_context *ioc, gfp_t gfp_mask) |
1660 | { | 1704 | { |
1661 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1705 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
@@ -1719,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) | |||
1719 | } | 1763 | } |
1720 | 1764 | ||
1721 | static struct cfq_queue * | 1765 | static struct cfq_queue * |
1722 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, | 1766 | cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, |
1723 | gfp_t gfp_mask) | 1767 | gfp_t gfp_mask) |
1724 | { | 1768 | { |
1725 | const int ioprio = task_ioprio(ioc); | 1769 | const int ioprio = task_ioprio(ioc); |
@@ -1951,10 +1995,13 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1951 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); | 1995 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); |
1952 | 1996 | ||
1953 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 1997 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
1954 | (cfqd->hw_tag && CIC_SEEKY(cic))) | 1998 | (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) |
1955 | enable_idle = 0; | 1999 | enable_idle = 0; |
1956 | else if (sample_valid(cic->ttime_samples)) { | 2000 | else if (sample_valid(cic->ttime_samples)) { |
1957 | if (cic->ttime_mean > cfqd->cfq_slice_idle) | 2001 | unsigned int slice_idle = cfqd->cfq_slice_idle; |
2002 | if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic)) | ||
2003 | slice_idle = msecs_to_jiffies(CFQ_MIN_TT); | ||
2004 | if (cic->ttime_mean > slice_idle) | ||
1958 | enable_idle = 0; | 2005 | enable_idle = 0; |
1959 | else | 2006 | else |
1960 | enable_idle = 1; | 2007 | enable_idle = 1; |
@@ -1973,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1973 | * Check if new_cfqq should preempt the currently active queue. Return 0 for | 2020 | * Check if new_cfqq should preempt the currently active queue. Return 0 for |
1974 | * no or if we aren't sure, a 1 will cause a preempt. | 2021 | * no or if we aren't sure, a 1 will cause a preempt. |
1975 | */ | 2022 | */ |
1976 | static int | 2023 | static bool |
1977 | cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | 2024 | cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, |
1978 | struct request *rq) | 2025 | struct request *rq) |
1979 | { | 2026 | { |
@@ -1981,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, | |||
1981 | 2028 | ||
1982 | cfqq = cfqd->active_queue; | 2029 | cfqq = cfqd->active_queue; |
1983 | if (!cfqq) | 2030 | if (!cfqq) |
1984 | return 0; | 2031 | return false; |
1985 | 2032 | ||
1986 | if (cfq_slice_used(cfqq)) | 2033 | if (cfq_slice_used(cfqq)) |
1987 | return 1; | 2034 | return true; |
1988 | 2035 | ||
1989 | if (cfq_class_idle(new_cfqq)) | 2036 | if (cfq_class_idle(new_cfqq)) |
1990 | return 0; | 2037 | return false; |
1991 | 2038 | ||
1992 | if (cfq_class_idle(cfqq)) | 2039 | if (cfq_class_idle(cfqq)) |
1993 | return 1; | 2040 | return true; |
1994 | 2041 | ||
1995 | /* | 2042 | /* |
1996 | * if the new request is sync, but the currently running queue is | 2043 | * if the new request is sync, but the currently running queue is |
1997 | * not, let the sync request have priority. | 2044 | * not, let the sync request have priority. |
1998 | */ | 2045 | */ |
1999 | if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) | 2046 | if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) |
2000 | return 1; | 2047 | return true; |
2001 | 2048 | ||
2002 | /* | 2049 | /* |
2003 | * So both queues are sync. Let the new request get disk time if | 2050 | * So both queues are sync. Let the new request get disk time if |
2004 | * it's a metadata request and the current queue is doing regular IO. | 2051 | * it's a metadata request and the current queue is doing regular IO. |
2005 | */ | 2052 | */ |
2006 | if (rq_is_meta(rq) && !cfqq->meta_pending) | 2053 | if (rq_is_meta(rq) && !cfqq->meta_pending) |
2007 | return 1; | 2054 | return false; |
2008 | 2055 | ||
2009 | /* | 2056 | /* |
2010 | * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. | 2057 | * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. |
2011 | */ | 2058 | */ |
2012 | if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) | 2059 | if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) |
2013 | return 1; | 2060 | return true; |
2014 | 2061 | ||
2015 | if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) | 2062 | if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) |
2016 | return 0; | 2063 | return false; |
2017 | 2064 | ||
2018 | /* | 2065 | /* |
2019 | * if this request is as-good as one we would expect from the | 2066 | * if this request is as-good as one we would expect from the |
2020 | * current cfqq, let it preempt | 2067 | * current cfqq, let it preempt |
2021 | */ | 2068 | */ |
2022 | if (cfq_rq_close(cfqd, rq)) | 2069 | if (cfq_rq_close(cfqd, rq)) |
2023 | return 1; | 2070 | return true; |
2024 | 2071 | ||
2025 | return 0; | 2072 | return false; |
2026 | } | 2073 | } |
2027 | 2074 | ||
2028 | /* | 2075 | /* |
@@ -2107,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
2107 | 2154 | ||
2108 | cfq_add_rq_rb(rq); | 2155 | cfq_add_rq_rb(rq); |
2109 | 2156 | ||
2157 | rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); | ||
2110 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 2158 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
2111 | 2159 | ||
2112 | cfq_rq_enqueued(cfqd, cfqq, rq); | 2160 | cfq_rq_enqueued(cfqd, cfqq, rq); |
@@ -2157,8 +2205,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2157 | if (cfq_cfqq_sync(cfqq)) | 2205 | if (cfq_cfqq_sync(cfqq)) |
2158 | cfqd->sync_flight--; | 2206 | cfqd->sync_flight--; |
2159 | 2207 | ||
2160 | if (sync) | 2208 | if (sync) { |
2161 | RQ_CIC(rq)->last_end_request = now; | 2209 | RQ_CIC(rq)->last_end_request = now; |
2210 | cfqd->last_end_sync_rq = now; | ||
2211 | } | ||
2162 | 2212 | ||
2163 | /* | 2213 | /* |
2164 | * If this is the active queue, check if it needs to be expired, | 2214 | * If this is the active queue, check if it needs to be expired, |
@@ -2284,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
2284 | struct cfq_data *cfqd = q->elevator->elevator_data; | 2334 | struct cfq_data *cfqd = q->elevator->elevator_data; |
2285 | struct cfq_io_context *cic; | 2335 | struct cfq_io_context *cic; |
2286 | const int rw = rq_data_dir(rq); | 2336 | const int rw = rq_data_dir(rq); |
2287 | const int is_sync = rq_is_sync(rq); | 2337 | const bool is_sync = rq_is_sync(rq); |
2288 | struct cfq_queue *cfqq; | 2338 | struct cfq_queue *cfqq; |
2289 | unsigned long flags; | 2339 | unsigned long flags; |
2290 | 2340 | ||
@@ -2480,8 +2530,9 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2480 | cfqd->cfq_slice[1] = cfq_slice_sync; | 2530 | cfqd->cfq_slice[1] = cfq_slice_sync; |
2481 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 2531 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
2482 | cfqd->cfq_slice_idle = cfq_slice_idle; | 2532 | cfqd->cfq_slice_idle = cfq_slice_idle; |
2533 | cfqd->cfq_latency = 1; | ||
2483 | cfqd->hw_tag = 1; | 2534 | cfqd->hw_tag = 1; |
2484 | 2535 | cfqd->last_end_sync_rq = jiffies; | |
2485 | return cfqd; | 2536 | return cfqd; |
2486 | } | 2537 | } |
2487 | 2538 | ||
@@ -2549,6 +2600,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | |||
2549 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 2600 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
2550 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 2601 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
2551 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 2602 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
2603 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); | ||
2552 | #undef SHOW_FUNCTION | 2604 | #undef SHOW_FUNCTION |
2553 | 2605 | ||
2554 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 2606 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
@@ -2580,6 +2632,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | |||
2580 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 2632 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
2581 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 2633 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
2582 | UINT_MAX, 0); | 2634 | UINT_MAX, 0); |
2635 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); | ||
2583 | #undef STORE_FUNCTION | 2636 | #undef STORE_FUNCTION |
2584 | 2637 | ||
2585 | #define CFQ_ATTR(name) \ | 2638 | #define CFQ_ATTR(name) \ |
@@ -2595,6 +2648,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
2595 | CFQ_ATTR(slice_async), | 2648 | CFQ_ATTR(slice_async), |
2596 | CFQ_ATTR(slice_async_rq), | 2649 | CFQ_ATTR(slice_async_rq), |
2597 | CFQ_ATTR(slice_idle), | 2650 | CFQ_ATTR(slice_idle), |
2651 | CFQ_ATTR(low_latency), | ||
2598 | __ATTR_NULL | 2652 | __ATTR_NULL |
2599 | }; | 2653 | }; |
2600 | 2654 | ||