diff options
Diffstat (limited to 'block/as-iosched.c')
-rw-r--r-- | block/as-iosched.c | 180 |
1 files changed, 29 insertions, 151 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 9d0f15a54c64..134545b544c5 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -149,12 +149,6 @@ enum arq_state { | |||
149 | }; | 149 | }; |
150 | 150 | ||
151 | struct as_rq { | 151 | struct as_rq { |
152 | /* | ||
153 | * rbtree index, key is the starting offset | ||
154 | */ | ||
155 | struct rb_node rb_node; | ||
156 | sector_t rb_key; | ||
157 | |||
158 | struct request *request; | 152 | struct request *request; |
159 | 153 | ||
160 | struct io_context *io_context; /* The submitting task */ | 154 | struct io_context *io_context; /* The submitting task */ |
@@ -268,101 +262,22 @@ static void as_put_io_context(struct as_rq *arq) | |||
268 | /* | 262 | /* |
269 | * rb tree support functions | 263 | * rb tree support functions |
270 | */ | 264 | */ |
271 | #define rb_entry_arq(node) rb_entry((node), struct as_rq, rb_node) | ||
272 | #define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) | 265 | #define ARQ_RB_ROOT(ad, arq) (&(ad)->sort_list[(arq)->is_sync]) |
273 | #define rq_rb_key(rq) (rq)->sector | ||
274 | |||
275 | /* | ||
276 | * as_find_first_arq finds the first (lowest sector numbered) request | ||
277 | * for the specified data_dir. Used to sweep back to the start of the disk | ||
278 | * (1-way elevator) after we process the last (highest sector) request. | ||
279 | */ | ||
280 | static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir) | ||
281 | { | ||
282 | struct rb_node *n = ad->sort_list[data_dir].rb_node; | ||
283 | |||
284 | if (n == NULL) | ||
285 | return NULL; | ||
286 | |||
287 | for (;;) { | ||
288 | if (n->rb_left == NULL) | ||
289 | return rb_entry_arq(n); | ||
290 | |||
291 | n = n->rb_left; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Add the request to the rb tree if it is unique. If there is an alias (an | ||
297 | * existing request against the same sector), which can happen when using | ||
298 | * direct IO, then return the alias. | ||
299 | */ | ||
300 | static struct as_rq *__as_add_arq_rb(struct as_data *ad, struct as_rq *arq) | ||
301 | { | ||
302 | struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node; | ||
303 | struct rb_node *parent = NULL; | ||
304 | struct as_rq *__arq; | ||
305 | struct request *rq = arq->request; | ||
306 | |||
307 | arq->rb_key = rq_rb_key(rq); | ||
308 | |||
309 | while (*p) { | ||
310 | parent = *p; | ||
311 | __arq = rb_entry_arq(parent); | ||
312 | |||
313 | if (arq->rb_key < __arq->rb_key) | ||
314 | p = &(*p)->rb_left; | ||
315 | else if (arq->rb_key > __arq->rb_key) | ||
316 | p = &(*p)->rb_right; | ||
317 | else | ||
318 | return __arq; | ||
319 | } | ||
320 | |||
321 | rb_link_node(&arq->rb_node, parent, p); | ||
322 | rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); | ||
323 | 266 | ||
324 | return NULL; | 267 | static void as_add_arq_rb(struct as_data *ad, struct request *rq) |
325 | } | ||
326 | |||
327 | static void as_add_arq_rb(struct as_data *ad, struct as_rq *arq) | ||
328 | { | 268 | { |
329 | struct as_rq *alias; | 269 | struct as_rq *arq = RQ_DATA(rq); |
270 | struct request *alias; | ||
330 | 271 | ||
331 | while ((unlikely(alias = __as_add_arq_rb(ad, arq)))) { | 272 | while ((unlikely(alias = elv_rb_add(ARQ_RB_ROOT(ad, arq), rq)))) { |
332 | as_move_to_dispatch(ad, alias); | 273 | as_move_to_dispatch(ad, RQ_DATA(alias)); |
333 | as_antic_stop(ad); | 274 | as_antic_stop(ad); |
334 | } | 275 | } |
335 | } | 276 | } |
336 | 277 | ||
337 | static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq) | 278 | static inline void as_del_arq_rb(struct as_data *ad, struct request *rq) |
338 | { | ||
339 | if (RB_EMPTY_NODE(&arq->rb_node)) { | ||
340 | WARN_ON(1); | ||
341 | return; | ||
342 | } | ||
343 | |||
344 | rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq)); | ||
345 | RB_CLEAR_NODE(&arq->rb_node); | ||
346 | } | ||
347 | |||
348 | static struct request * | ||
349 | as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir) | ||
350 | { | 279 | { |
351 | struct rb_node *n = ad->sort_list[data_dir].rb_node; | 280 | elv_rb_del(ARQ_RB_ROOT(ad, RQ_DATA(rq)), rq); |
352 | struct as_rq *arq; | ||
353 | |||
354 | while (n) { | ||
355 | arq = rb_entry_arq(n); | ||
356 | |||
357 | if (sector < arq->rb_key) | ||
358 | n = n->rb_left; | ||
359 | else if (sector > arq->rb_key) | ||
360 | n = n->rb_right; | ||
361 | else | ||
362 | return arq->request; | ||
363 | } | ||
364 | |||
365 | return NULL; | ||
366 | } | 281 | } |
367 | 282 | ||
368 | /* | 283 | /* |
@@ -455,32 +370,29 @@ as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2) | |||
455 | * this with as_choose_req form the basis for how the scheduler chooses | 370 | * this with as_choose_req form the basis for how the scheduler chooses |
456 | * what request to process next. Anticipation works on top of this. | 371 | * what request to process next. Anticipation works on top of this. |
457 | */ | 372 | */ |
458 | static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last) | 373 | static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *arq) |
459 | { | 374 | { |
460 | const int data_dir = last->is_sync; | 375 | struct request *last = arq->request; |
461 | struct as_rq *ret; | ||
462 | struct rb_node *rbnext = rb_next(&last->rb_node); | 376 | struct rb_node *rbnext = rb_next(&last->rb_node); |
463 | struct rb_node *rbprev = rb_prev(&last->rb_node); | 377 | struct rb_node *rbprev = rb_prev(&last->rb_node); |
464 | struct as_rq *arq_next, *arq_prev; | 378 | struct as_rq *next = NULL, *prev = NULL; |
465 | 379 | ||
466 | BUG_ON(!RB_EMPTY_NODE(&last->rb_node)); | 380 | BUG_ON(RB_EMPTY_NODE(&last->rb_node)); |
467 | 381 | ||
468 | if (rbprev) | 382 | if (rbprev) |
469 | arq_prev = rb_entry_arq(rbprev); | 383 | prev = RQ_DATA(rb_entry_rq(rbprev)); |
470 | else | ||
471 | arq_prev = NULL; | ||
472 | 384 | ||
473 | if (rbnext) | 385 | if (rbnext) |
474 | arq_next = rb_entry_arq(rbnext); | 386 | next = RQ_DATA(rb_entry_rq(rbnext)); |
475 | else { | 387 | else { |
476 | arq_next = as_find_first_arq(ad, data_dir); | 388 | const int data_dir = arq->is_sync; |
477 | if (arq_next == last) | ||
478 | arq_next = NULL; | ||
479 | } | ||
480 | 389 | ||
481 | ret = as_choose_req(ad, arq_next, arq_prev); | 390 | rbnext = rb_first(&ad->sort_list[data_dir]); |
391 | if (rbnext && rbnext != &last->rb_node) | ||
392 | next = RQ_DATA(rb_entry_rq(rbnext)); | ||
393 | } | ||
482 | 394 | ||
483 | return ret; | 395 | return as_choose_req(ad, next, prev); |
484 | } | 396 | } |
485 | 397 | ||
486 | /* | 398 | /* |
@@ -982,7 +894,7 @@ static void as_remove_queued_request(request_queue_t *q, struct request *rq) | |||
982 | ad->next_arq[data_dir] = as_find_next_arq(ad, arq); | 894 | ad->next_arq[data_dir] = as_find_next_arq(ad, arq); |
983 | 895 | ||
984 | list_del_init(&arq->fifo); | 896 | list_del_init(&arq->fifo); |
985 | as_del_arq_rb(ad, arq); | 897 | as_del_arq_rb(ad, rq); |
986 | } | 898 | } |
987 | 899 | ||
988 | /* | 900 | /* |
@@ -1039,7 +951,7 @@ static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq) | |||
1039 | struct request *rq = arq->request; | 951 | struct request *rq = arq->request; |
1040 | const int data_dir = arq->is_sync; | 952 | const int data_dir = arq->is_sync; |
1041 | 953 | ||
1042 | BUG_ON(RB_EMPTY_NODE(&arq->rb_node)); | 954 | BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); |
1043 | 955 | ||
1044 | as_antic_stop(ad); | 956 | as_antic_stop(ad); |
1045 | ad->antic_status = ANTIC_OFF; | 957 | ad->antic_status = ANTIC_OFF; |
@@ -1269,7 +1181,7 @@ static void as_add_request(request_queue_t *q, struct request *rq) | |||
1269 | atomic_inc(&arq->io_context->aic->nr_queued); | 1181 | atomic_inc(&arq->io_context->aic->nr_queued); |
1270 | } | 1182 | } |
1271 | 1183 | ||
1272 | as_add_arq_rb(ad, arq); | 1184 | as_add_arq_rb(ad, rq); |
1273 | 1185 | ||
1274 | /* | 1186 | /* |
1275 | * set expire time (only used for reads) and add to fifo list | 1187 | * set expire time (only used for reads) and add to fifo list |
@@ -1315,32 +1227,6 @@ static int as_queue_empty(request_queue_t *q) | |||
1315 | && list_empty(&ad->fifo_list[REQ_SYNC]); | 1227 | && list_empty(&ad->fifo_list[REQ_SYNC]); |
1316 | } | 1228 | } |
1317 | 1229 | ||
1318 | static struct request *as_former_request(request_queue_t *q, | ||
1319 | struct request *rq) | ||
1320 | { | ||
1321 | struct as_rq *arq = RQ_DATA(rq); | ||
1322 | struct rb_node *rbprev = rb_prev(&arq->rb_node); | ||
1323 | struct request *ret = NULL; | ||
1324 | |||
1325 | if (rbprev) | ||
1326 | ret = rb_entry_arq(rbprev)->request; | ||
1327 | |||
1328 | return ret; | ||
1329 | } | ||
1330 | |||
1331 | static struct request *as_latter_request(request_queue_t *q, | ||
1332 | struct request *rq) | ||
1333 | { | ||
1334 | struct as_rq *arq = RQ_DATA(rq); | ||
1335 | struct rb_node *rbnext = rb_next(&arq->rb_node); | ||
1336 | struct request *ret = NULL; | ||
1337 | |||
1338 | if (rbnext) | ||
1339 | ret = rb_entry_arq(rbnext)->request; | ||
1340 | |||
1341 | return ret; | ||
1342 | } | ||
1343 | |||
1344 | static int | 1230 | static int |
1345 | as_merge(request_queue_t *q, struct request **req, struct bio *bio) | 1231 | as_merge(request_queue_t *q, struct request **req, struct bio *bio) |
1346 | { | 1232 | { |
@@ -1351,7 +1237,7 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
1351 | /* | 1237 | /* |
1352 | * check for front merge | 1238 | * check for front merge |
1353 | */ | 1239 | */ |
1354 | __rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio)); | 1240 | __rq = elv_rb_find(&ad->sort_list[bio_data_dir(bio)], rb_key); |
1355 | if (__rq && elv_rq_merge_ok(__rq, bio)) { | 1241 | if (__rq && elv_rq_merge_ok(__rq, bio)) { |
1356 | *req = __rq; | 1242 | *req = __rq; |
1357 | return ELEVATOR_FRONT_MERGE; | 1243 | return ELEVATOR_FRONT_MERGE; |
@@ -1360,17 +1246,16 @@ as_merge(request_queue_t *q, struct request **req, struct bio *bio) | |||
1360 | return ELEVATOR_NO_MERGE; | 1246 | return ELEVATOR_NO_MERGE; |
1361 | } | 1247 | } |
1362 | 1248 | ||
1363 | static void as_merged_request(request_queue_t *q, struct request *req) | 1249 | static void as_merged_request(request_queue_t *q, struct request *req, int type) |
1364 | { | 1250 | { |
1365 | struct as_data *ad = q->elevator->elevator_data; | 1251 | struct as_data *ad = q->elevator->elevator_data; |
1366 | struct as_rq *arq = RQ_DATA(req); | ||
1367 | 1252 | ||
1368 | /* | 1253 | /* |
1369 | * if the merge was a front merge, we need to reposition request | 1254 | * if the merge was a front merge, we need to reposition request |
1370 | */ | 1255 | */ |
1371 | if (rq_rb_key(req) != arq->rb_key) { | 1256 | if (type == ELEVATOR_FRONT_MERGE) { |
1372 | as_del_arq_rb(ad, arq); | 1257 | as_del_arq_rb(ad, req); |
1373 | as_add_arq_rb(ad, arq); | 1258 | as_add_arq_rb(ad, req); |
1374 | /* | 1259 | /* |
1375 | * Note! At this stage of this and the next function, our next | 1260 | * Note! At this stage of this and the next function, our next |
1376 | * request may not be optimal - eg the request may have "grown" | 1261 | * request may not be optimal - eg the request may have "grown" |
@@ -1382,18 +1267,12 @@ static void as_merged_request(request_queue_t *q, struct request *req) | |||
1382 | static void as_merged_requests(request_queue_t *q, struct request *req, | 1267 | static void as_merged_requests(request_queue_t *q, struct request *req, |
1383 | struct request *next) | 1268 | struct request *next) |
1384 | { | 1269 | { |
1385 | struct as_data *ad = q->elevator->elevator_data; | ||
1386 | struct as_rq *arq = RQ_DATA(req); | 1270 | struct as_rq *arq = RQ_DATA(req); |
1387 | struct as_rq *anext = RQ_DATA(next); | 1271 | struct as_rq *anext = RQ_DATA(next); |
1388 | 1272 | ||
1389 | BUG_ON(!arq); | 1273 | BUG_ON(!arq); |
1390 | BUG_ON(!anext); | 1274 | BUG_ON(!anext); |
1391 | 1275 | ||
1392 | if (rq_rb_key(req) != arq->rb_key) { | ||
1393 | as_del_arq_rb(ad, arq); | ||
1394 | as_add_arq_rb(ad, arq); | ||
1395 | } | ||
1396 | |||
1397 | /* | 1276 | /* |
1398 | * if anext expires before arq, assign its expire time to arq | 1277 | * if anext expires before arq, assign its expire time to arq |
1399 | * and move into anext position (anext will be deleted) in fifo | 1278 | * and move into anext position (anext will be deleted) in fifo |
@@ -1468,7 +1347,6 @@ static int as_set_request(request_queue_t *q, struct request *rq, | |||
1468 | 1347 | ||
1469 | if (arq) { | 1348 | if (arq) { |
1470 | memset(arq, 0, sizeof(*arq)); | 1349 | memset(arq, 0, sizeof(*arq)); |
1471 | RB_CLEAR_NODE(&arq->rb_node); | ||
1472 | arq->request = rq; | 1350 | arq->request = rq; |
1473 | arq->state = AS_RQ_PRESCHED; | 1351 | arq->state = AS_RQ_PRESCHED; |
1474 | arq->io_context = NULL; | 1352 | arq->io_context = NULL; |
@@ -1654,8 +1532,8 @@ static struct elevator_type iosched_as = { | |||
1654 | .elevator_deactivate_req_fn = as_deactivate_request, | 1532 | .elevator_deactivate_req_fn = as_deactivate_request, |
1655 | .elevator_queue_empty_fn = as_queue_empty, | 1533 | .elevator_queue_empty_fn = as_queue_empty, |
1656 | .elevator_completed_req_fn = as_completed_request, | 1534 | .elevator_completed_req_fn = as_completed_request, |
1657 | .elevator_former_req_fn = as_former_request, | 1535 | .elevator_former_req_fn = elv_rb_former_request, |
1658 | .elevator_latter_req_fn = as_latter_request, | 1536 | .elevator_latter_req_fn = elv_rb_latter_request, |
1659 | .elevator_set_req_fn = as_set_request, | 1537 | .elevator_set_req_fn = as_set_request, |
1660 | .elevator_put_req_fn = as_put_request, | 1538 | .elevator_put_req_fn = as_put_request, |
1661 | .elevator_may_queue_fn = as_may_queue, | 1539 | .elevator_may_queue_fn = as_may_queue, |