diff options
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 72 |
1 files changed, 29 insertions, 43 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1b2d12cda43e..1ca813b16e78 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -134,13 +134,8 @@ struct cfq_data { | |||
134 | struct rb_root prio_trees[CFQ_PRIO_LISTS]; | 134 | struct rb_root prio_trees[CFQ_PRIO_LISTS]; |
135 | 135 | ||
136 | unsigned int busy_queues; | 136 | unsigned int busy_queues; |
137 | /* | ||
138 | * Used to track any pending rt requests so we can pre-empt current | ||
139 | * non-RT cfqq in service when this value is non-zero. | ||
140 | */ | ||
141 | unsigned int busy_rt_queues; | ||
142 | 137 | ||
143 | int rq_in_driver; | 138 | int rq_in_driver[2]; |
144 | int sync_flight; | 139 | int sync_flight; |
145 | 140 | ||
146 | /* | 141 | /* |
@@ -191,7 +186,6 @@ enum cfqq_state_flags { | |||
191 | CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ | 186 | CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ |
192 | CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ | 187 | CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ |
193 | CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ | 188 | CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ |
194 | CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ | ||
195 | CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ | 189 | CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ |
196 | CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ | 190 | CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ |
197 | CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ | 191 | CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ |
@@ -218,7 +212,6 @@ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ | |||
218 | CFQ_CFQQ_FNS(on_rr); | 212 | CFQ_CFQQ_FNS(on_rr); |
219 | CFQ_CFQQ_FNS(wait_request); | 213 | CFQ_CFQQ_FNS(wait_request); |
220 | CFQ_CFQQ_FNS(must_dispatch); | 214 | CFQ_CFQQ_FNS(must_dispatch); |
221 | CFQ_CFQQ_FNS(must_alloc); | ||
222 | CFQ_CFQQ_FNS(must_alloc_slice); | 215 | CFQ_CFQQ_FNS(must_alloc_slice); |
223 | CFQ_CFQQ_FNS(fifo_expire); | 216 | CFQ_CFQQ_FNS(fifo_expire); |
224 | CFQ_CFQQ_FNS(idle_window); | 217 | CFQ_CFQQ_FNS(idle_window); |
@@ -239,6 +232,11 @@ static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, | |||
239 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, | 232 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, |
240 | struct io_context *); | 233 | struct io_context *); |
241 | 234 | ||
235 | static inline int rq_in_driver(struct cfq_data *cfqd) | ||
236 | { | ||
237 | return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1]; | ||
238 | } | ||
239 | |||
242 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, | 240 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, |
243 | int is_sync) | 241 | int is_sync) |
244 | { | 242 | { |
@@ -257,7 +255,7 @@ static inline void cic_set_cfqq(struct cfq_io_context *cic, | |||
257 | */ | 255 | */ |
258 | static inline int cfq_bio_sync(struct bio *bio) | 256 | static inline int cfq_bio_sync(struct bio *bio) |
259 | { | 257 | { |
260 | if (bio_data_dir(bio) == READ || bio_sync(bio)) | 258 | if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO)) |
261 | return 1; | 259 | return 1; |
262 | 260 | ||
263 | return 0; | 261 | return 0; |
@@ -648,8 +646,6 @@ static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
648 | BUG_ON(cfq_cfqq_on_rr(cfqq)); | 646 | BUG_ON(cfq_cfqq_on_rr(cfqq)); |
649 | cfq_mark_cfqq_on_rr(cfqq); | 647 | cfq_mark_cfqq_on_rr(cfqq); |
650 | cfqd->busy_queues++; | 648 | cfqd->busy_queues++; |
651 | if (cfq_class_rt(cfqq)) | ||
652 | cfqd->busy_rt_queues++; | ||
653 | 649 | ||
654 | cfq_resort_rr_list(cfqd, cfqq); | 650 | cfq_resort_rr_list(cfqd, cfqq); |
655 | } | 651 | } |
@@ -673,8 +669,6 @@ static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
673 | 669 | ||
674 | BUG_ON(!cfqd->busy_queues); | 670 | BUG_ON(!cfqd->busy_queues); |
675 | cfqd->busy_queues--; | 671 | cfqd->busy_queues--; |
676 | if (cfq_class_rt(cfqq)) | ||
677 | cfqd->busy_rt_queues--; | ||
678 | } | 672 | } |
679 | 673 | ||
680 | /* | 674 | /* |
@@ -760,9 +754,9 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) | |||
760 | { | 754 | { |
761 | struct cfq_data *cfqd = q->elevator->elevator_data; | 755 | struct cfq_data *cfqd = q->elevator->elevator_data; |
762 | 756 | ||
763 | cfqd->rq_in_driver++; | 757 | cfqd->rq_in_driver[rq_is_sync(rq)]++; |
764 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", | 758 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", |
765 | cfqd->rq_in_driver); | 759 | rq_in_driver(cfqd)); |
766 | 760 | ||
767 | cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); | 761 | cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); |
768 | } | 762 | } |
@@ -770,11 +764,12 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) | |||
770 | static void cfq_deactivate_request(struct request_queue *q, struct request *rq) | 764 | static void cfq_deactivate_request(struct request_queue *q, struct request *rq) |
771 | { | 765 | { |
772 | struct cfq_data *cfqd = q->elevator->elevator_data; | 766 | struct cfq_data *cfqd = q->elevator->elevator_data; |
767 | const int sync = rq_is_sync(rq); | ||
773 | 768 | ||
774 | WARN_ON(!cfqd->rq_in_driver); | 769 | WARN_ON(!cfqd->rq_in_driver[sync]); |
775 | cfqd->rq_in_driver--; | 770 | cfqd->rq_in_driver[sync]--; |
776 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", | 771 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", |
777 | cfqd->rq_in_driver); | 772 | rq_in_driver(cfqd)); |
778 | } | 773 | } |
779 | 774 | ||
780 | static void cfq_remove_request(struct request *rq) | 775 | static void cfq_remove_request(struct request *rq) |
@@ -1080,7 +1075,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
1080 | /* | 1075 | /* |
1081 | * still requests with the driver, don't idle | 1076 | * still requests with the driver, don't idle |
1082 | */ | 1077 | */ |
1083 | if (cfqd->rq_in_driver) | 1078 | if (rq_in_driver(cfqd)) |
1084 | return; | 1079 | return; |
1085 | 1080 | ||
1086 | /* | 1081 | /* |
@@ -1115,6 +1110,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
1115 | 1110 | ||
1116 | cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); | 1111 | cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); |
1117 | 1112 | ||
1113 | cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); | ||
1118 | cfq_remove_request(rq); | 1114 | cfq_remove_request(rq); |
1119 | cfqq->dispatched++; | 1115 | cfqq->dispatched++; |
1120 | elv_dispatch_sort(q, rq); | 1116 | elv_dispatch_sort(q, rq); |
@@ -1179,20 +1175,6 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | |||
1179 | goto expire; | 1175 | goto expire; |
1180 | 1176 | ||
1181 | /* | 1177 | /* |
1182 | * If we have a RT cfqq waiting, then we pre-empt the current non-rt | ||
1183 | * cfqq. | ||
1184 | */ | ||
1185 | if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues) { | ||
1186 | /* | ||
1187 | * We simulate this as cfqq timed out so that it gets to bank | ||
1188 | * the remaining of its time slice. | ||
1189 | */ | ||
1190 | cfq_log_cfqq(cfqd, cfqq, "preempt"); | ||
1191 | cfq_slice_expired(cfqd, 1); | ||
1192 | goto new_queue; | ||
1193 | } | ||
1194 | |||
1195 | /* | ||
1196 | * The active queue has requests and isn't expired, allow it to | 1178 | * The active queue has requests and isn't expired, allow it to |
1197 | * dispatch. | 1179 | * dispatch. |
1198 | */ | 1180 | */ |
@@ -1312,6 +1294,12 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1312 | return 0; | 1294 | return 0; |
1313 | 1295 | ||
1314 | /* | 1296 | /* |
1297 | * Drain async requests before we start sync IO | ||
1298 | */ | ||
1299 | if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) | ||
1300 | return 0; | ||
1301 | |||
1302 | /* | ||
1315 | * If this is an async queue and we have sync IO in flight, let it wait | 1303 | * If this is an async queue and we have sync IO in flight, let it wait |
1316 | */ | 1304 | */ |
1317 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) | 1305 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) |
@@ -1362,7 +1350,7 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1362 | cfq_slice_expired(cfqd, 0); | 1350 | cfq_slice_expired(cfqd, 0); |
1363 | } | 1351 | } |
1364 | 1352 | ||
1365 | cfq_log(cfqd, "dispatched a request"); | 1353 | cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); |
1366 | return 1; | 1354 | return 1; |
1367 | } | 1355 | } |
1368 | 1356 | ||
@@ -2130,11 +2118,11 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
2130 | */ | 2118 | */ |
2131 | static void cfq_update_hw_tag(struct cfq_data *cfqd) | 2119 | static void cfq_update_hw_tag(struct cfq_data *cfqd) |
2132 | { | 2120 | { |
2133 | if (cfqd->rq_in_driver > cfqd->rq_in_driver_peak) | 2121 | if (rq_in_driver(cfqd) > cfqd->rq_in_driver_peak) |
2134 | cfqd->rq_in_driver_peak = cfqd->rq_in_driver; | 2122 | cfqd->rq_in_driver_peak = rq_in_driver(cfqd); |
2135 | 2123 | ||
2136 | if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && | 2124 | if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && |
2137 | cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) | 2125 | rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) |
2138 | return; | 2126 | return; |
2139 | 2127 | ||
2140 | if (cfqd->hw_tag_samples++ < 50) | 2128 | if (cfqd->hw_tag_samples++ < 50) |
@@ -2161,9 +2149,9 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2161 | 2149 | ||
2162 | cfq_update_hw_tag(cfqd); | 2150 | cfq_update_hw_tag(cfqd); |
2163 | 2151 | ||
2164 | WARN_ON(!cfqd->rq_in_driver); | 2152 | WARN_ON(!cfqd->rq_in_driver[sync]); |
2165 | WARN_ON(!cfqq->dispatched); | 2153 | WARN_ON(!cfqq->dispatched); |
2166 | cfqd->rq_in_driver--; | 2154 | cfqd->rq_in_driver[sync]--; |
2167 | cfqq->dispatched--; | 2155 | cfqq->dispatched--; |
2168 | 2156 | ||
2169 | if (cfq_cfqq_sync(cfqq)) | 2157 | if (cfq_cfqq_sync(cfqq)) |
@@ -2197,7 +2185,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2197 | cfq_arm_slice_timer(cfqd); | 2185 | cfq_arm_slice_timer(cfqd); |
2198 | } | 2186 | } |
2199 | 2187 | ||
2200 | if (!cfqd->rq_in_driver) | 2188 | if (!rq_in_driver(cfqd)) |
2201 | cfq_schedule_dispatch(cfqd); | 2189 | cfq_schedule_dispatch(cfqd); |
2202 | } | 2190 | } |
2203 | 2191 | ||
@@ -2229,8 +2217,7 @@ static void cfq_prio_boost(struct cfq_queue *cfqq) | |||
2229 | 2217 | ||
2230 | static inline int __cfq_may_queue(struct cfq_queue *cfqq) | 2218 | static inline int __cfq_may_queue(struct cfq_queue *cfqq) |
2231 | { | 2219 | { |
2232 | if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) && | 2220 | if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { |
2233 | !cfq_cfqq_must_alloc_slice(cfqq)) { | ||
2234 | cfq_mark_cfqq_must_alloc_slice(cfqq); | 2221 | cfq_mark_cfqq_must_alloc_slice(cfqq); |
2235 | return ELV_MQUEUE_MUST; | 2222 | return ELV_MQUEUE_MUST; |
2236 | } | 2223 | } |
@@ -2317,7 +2304,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
2317 | } | 2304 | } |
2318 | 2305 | ||
2319 | cfqq->allocated[rw]++; | 2306 | cfqq->allocated[rw]++; |
2320 | cfq_clear_cfqq_must_alloc(cfqq); | ||
2321 | atomic_inc(&cfqq->ref); | 2307 | atomic_inc(&cfqq->ref); |
2322 | 2308 | ||
2323 | spin_unlock_irqrestore(q->queue_lock, flags); | 2309 | spin_unlock_irqrestore(q->queue_lock, flags); |