diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-04-26 06:54:48 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2007-04-30 03:01:22 -0400 |
commit | 498d3aa2b4f791059acd8c942ee8fa15c2ce36c2 (patch) | |
tree | 4223b993fe6eb11a0ab57cec3961c237605362a2 /block | |
parent | 67060e37994444ee9c0bd2413c8baa6cc58e7adb (diff) |
[PATCH] cfq-iosched: style cleanups and comments
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 66 |
1 files changed, 50 insertions, 16 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6a6a5f7930d8..29284fa06e6b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -222,7 +222,7 @@ CFQ_CFQQ_FNS(slice_new); | |||
222 | 222 | ||
223 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); | 223 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); |
224 | static void cfq_dispatch_insert(request_queue_t *, struct request *); | 224 | static void cfq_dispatch_insert(request_queue_t *, struct request *); |
225 | static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); | 225 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, unsigned int, struct task_struct *, gfp_t); |
226 | 226 | ||
227 | /* | 227 | /* |
228 | * scheduler run of queue, if there are requests pending and no one in the | 228 | * scheduler run of queue, if there are requests pending and no one in the |
@@ -389,6 +389,9 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2) | |||
389 | } | 389 | } |
390 | } | 390 | } |
391 | 391 | ||
392 | /* | ||
393 | * The below is leftmost cache rbtree addon | ||
394 | */ | ||
392 | static struct rb_node *cfq_rb_first(struct cfq_rb_root *root) | 395 | static struct rb_node *cfq_rb_first(struct cfq_rb_root *root) |
393 | { | 396 | { |
394 | if (!root->left) | 397 | if (!root->left) |
@@ -442,13 +445,18 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd, | |||
442 | return ((cfqd->busy_queues - 1) * cfq_prio_slice(cfqd, 1, 0)); | 445 | return ((cfqd->busy_queues - 1) * cfq_prio_slice(cfqd, 1, 0)); |
443 | } | 446 | } |
444 | 447 | ||
448 | /* | ||
449 | * The cfqd->service_tree holds all pending cfq_queue's that have | ||
450 | * requests waiting to be processed. It is sorted in the order that | ||
451 | * we will service the queues. | ||
452 | */ | ||
445 | static void cfq_service_tree_add(struct cfq_data *cfqd, | 453 | static void cfq_service_tree_add(struct cfq_data *cfqd, |
446 | struct cfq_queue *cfqq) | 454 | struct cfq_queue *cfqq) |
447 | { | 455 | { |
448 | struct rb_node **p = &cfqd->service_tree.rb.rb_node; | 456 | struct rb_node **p = &cfqd->service_tree.rb.rb_node; |
449 | struct rb_node *parent = NULL; | 457 | struct rb_node *parent = NULL; |
450 | unsigned long rb_key; | 458 | unsigned long rb_key; |
451 | int left = 1; | 459 | int left; |
452 | 460 | ||
453 | rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; | 461 | rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; |
454 | rb_key += cfqq->slice_resid; | 462 | rb_key += cfqq->slice_resid; |
@@ -464,6 +472,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, | |||
464 | cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); | 472 | cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree); |
465 | } | 473 | } |
466 | 474 | ||
475 | left = 1; | ||
467 | while (*p) { | 476 | while (*p) { |
468 | struct cfq_queue *__cfqq; | 477 | struct cfq_queue *__cfqq; |
469 | struct rb_node **n; | 478 | struct rb_node **n; |
@@ -503,17 +512,16 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, | |||
503 | rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); | 512 | rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb); |
504 | } | 513 | } |
505 | 514 | ||
515 | /* | ||
516 | * Update cfqq's position in the service tree. | ||
517 | */ | ||
506 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) | 518 | static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) |
507 | { | 519 | { |
508 | struct cfq_data *cfqd = cfqq->cfqd; | ||
509 | |||
510 | /* | 520 | /* |
511 | * Resorting requires the cfqq to be on the RR list already. | 521 | * Resorting requires the cfqq to be on the RR list already. |
512 | */ | 522 | */ |
513 | if (!cfq_cfqq_on_rr(cfqq)) | 523 | if (cfq_cfqq_on_rr(cfqq)) |
514 | return; | 524 | cfq_service_tree_add(cfqq->cfqd, cfqq); |
515 | |||
516 | cfq_service_tree_add(cfqd, cfqq); | ||
517 | } | 525 | } |
518 | 526 | ||
519 | /* | 527 | /* |
@@ -530,6 +538,10 @@ cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
530 | cfq_resort_rr_list(cfqq, 0); | 538 | cfq_resort_rr_list(cfqq, 0); |
531 | } | 539 | } |
532 | 540 | ||
541 | /* | ||
542 | * Called when the cfqq no longer has requests pending, remove it from | ||
543 | * the service tree. | ||
544 | */ | ||
533 | static inline void | 545 | static inline void |
534 | cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 546 | cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
535 | { | 547 | { |
@@ -654,8 +666,7 @@ static void cfq_remove_request(struct request *rq) | |||
654 | } | 666 | } |
655 | } | 667 | } |
656 | 668 | ||
657 | static int | 669 | static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) |
658 | cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) | ||
659 | { | 670 | { |
660 | struct cfq_data *cfqd = q->elevator->elevator_data; | 671 | struct cfq_data *cfqd = q->elevator->elevator_data; |
661 | struct request *__rq; | 672 | struct request *__rq; |
@@ -781,6 +792,10 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted, | |||
781 | __cfq_slice_expired(cfqd, cfqq, preempted, timed_out); | 792 | __cfq_slice_expired(cfqd, cfqq, preempted, timed_out); |
782 | } | 793 | } |
783 | 794 | ||
795 | /* | ||
796 | * Get next queue for service. Unless we have a queue preemption, | ||
797 | * we'll simply select the first cfqq in the service tree. | ||
798 | */ | ||
784 | static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | 799 | static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) |
785 | { | 800 | { |
786 | struct cfq_queue *cfqq = NULL; | 801 | struct cfq_queue *cfqq = NULL; |
@@ -792,10 +807,11 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | |||
792 | cfqq = list_entry_cfqq(cfqd->cur_rr.next); | 807 | cfqq = list_entry_cfqq(cfqd->cur_rr.next); |
793 | } else if (!RB_EMPTY_ROOT(&cfqd->service_tree.rb)) { | 808 | } else if (!RB_EMPTY_ROOT(&cfqd->service_tree.rb)) { |
794 | struct rb_node *n = cfq_rb_first(&cfqd->service_tree); | 809 | struct rb_node *n = cfq_rb_first(&cfqd->service_tree); |
795 | unsigned long end; | ||
796 | 810 | ||
797 | cfqq = rb_entry(n, struct cfq_queue, rb_node); | 811 | cfqq = rb_entry(n, struct cfq_queue, rb_node); |
798 | if (cfq_class_idle(cfqq)) { | 812 | if (cfq_class_idle(cfqq)) { |
813 | unsigned long end; | ||
814 | |||
799 | /* | 815 | /* |
800 | * if we have idle queues and no rt or be queues had | 816 | * if we have idle queues and no rt or be queues had |
801 | * pending requests, either allow immediate service if | 817 | * pending requests, either allow immediate service if |
@@ -813,6 +829,9 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | |||
813 | return cfqq; | 829 | return cfqq; |
814 | } | 830 | } |
815 | 831 | ||
832 | /* | ||
833 | * Get and set a new active queue for service. | ||
834 | */ | ||
816 | static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) | 835 | static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd) |
817 | { | 836 | { |
818 | struct cfq_queue *cfqq; | 837 | struct cfq_queue *cfqq; |
@@ -898,6 +917,9 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
898 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 917 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
899 | } | 918 | } |
900 | 919 | ||
920 | /* | ||
921 | * Move request from internal lists to the request queue dispatch list. | ||
922 | */ | ||
901 | static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) | 923 | static void cfq_dispatch_insert(request_queue_t *q, struct request *rq) |
902 | { | 924 | { |
903 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 925 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
@@ -944,7 +966,8 @@ cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
944 | } | 966 | } |
945 | 967 | ||
946 | /* | 968 | /* |
947 | * get next queue for service | 969 | * Select a queue for service. If we have a current active queue, |
970 | * check whether to continue servicing it, or retrieve and set a new one. | ||
948 | */ | 971 | */ |
949 | static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) | 972 | static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) |
950 | { | 973 | { |
@@ -985,6 +1008,10 @@ keep_queue: | |||
985 | return cfqq; | 1008 | return cfqq; |
986 | } | 1009 | } |
987 | 1010 | ||
1011 | /* | ||
1012 | * Dispatch some requests from cfqq, moving them to the request queue | ||
1013 | * dispatch list. | ||
1014 | */ | ||
988 | static int | 1015 | static int |
989 | __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1016 | __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
990 | int max_dispatch) | 1017 | int max_dispatch) |
@@ -1059,6 +1086,10 @@ static int cfq_forced_dispatch_cfqqs(struct list_head *list) | |||
1059 | return dispatched; | 1086 | return dispatched; |
1060 | } | 1087 | } |
1061 | 1088 | ||
1089 | /* | ||
1090 | * Drain our current requests. Used for barriers and when switching | ||
1091 | * io schedulers on-the-fly. | ||
1092 | */ | ||
1062 | static int cfq_forced_dispatch(struct cfq_data *cfqd) | 1093 | static int cfq_forced_dispatch(struct cfq_data *cfqd) |
1063 | { | 1094 | { |
1064 | int dispatched = 0; | 1095 | int dispatched = 0; |
@@ -1224,10 +1255,6 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, | |||
1224 | } | 1255 | } |
1225 | } | 1256 | } |
1226 | 1257 | ||
1227 | |||
1228 | /* | ||
1229 | * Called with interrupts disabled | ||
1230 | */ | ||
1231 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) | 1258 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) |
1232 | { | 1259 | { |
1233 | struct cfq_data *cfqd = cic->key; | 1260 | struct cfq_data *cfqd = cic->key; |
@@ -1241,6 +1268,10 @@ static void cfq_exit_single_io_context(struct cfq_io_context *cic) | |||
1241 | } | 1268 | } |
1242 | } | 1269 | } |
1243 | 1270 | ||
1271 | /* | ||
1272 | * The process that ioc belongs to has exited, we need to clean up | ||
1273 | * and put the internal structures we have that belongs to that process. | ||
1274 | */ | ||
1244 | static void cfq_exit_io_context(struct io_context *ioc) | 1275 | static void cfq_exit_io_context(struct io_context *ioc) |
1245 | { | 1276 | { |
1246 | struct cfq_io_context *__cic; | 1277 | struct cfq_io_context *__cic; |
@@ -1427,6 +1458,9 @@ out: | |||
1427 | return cfqq; | 1458 | return cfqq; |
1428 | } | 1459 | } |
1429 | 1460 | ||
1461 | /* | ||
1462 | * We drop cfq io contexts lazily, so we may find a dead one. | ||
1463 | */ | ||
1430 | static void | 1464 | static void |
1431 | cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) | 1465 | cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) |
1432 | { | 1466 | { |