diff options
author | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:33 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:33 -0400 |
commit | e2d57e60195a65e2f161ac1229ec9c91935e0240 (patch) | |
tree | 768bddc4827b2f9165b3a4b9e24dc0811fad07e6 /block | |
parent | 0f3457f60edc57332bf6564fa00d561a4372dcb9 (diff) |
blk-throttle: pass around throtl_service_queue instead of throtl_data
throtl_service_queue will be used as the basic block to implement
hierarchy support. Pass around throtl_service_queue *sq instead of
throtl_data *td in the following functions which will be used across
multiple levels of hierarchy.
* [__]throtl_enqueue/dequeue_tg()
* throtl_add_bio_tg()
* tg_update_disptime()
* throtl_select_dispatch()
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 53 |
1 files changed, 28 insertions, 25 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a489391f9153..9660ec8d0375 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -352,31 +352,33 @@ static void tg_service_queue_add(struct throtl_service_queue *sq, | |||
352 | rb_insert_color(&tg->rb_node, &sq->pending_tree); | 352 | rb_insert_color(&tg->rb_node, &sq->pending_tree); |
353 | } | 353 | } |
354 | 354 | ||
355 | static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) | 355 | static void __throtl_enqueue_tg(struct throtl_service_queue *sq, |
356 | struct throtl_grp *tg) | ||
356 | { | 357 | { |
357 | struct throtl_service_queue *sq = &td->service_queue; | ||
358 | |||
359 | tg_service_queue_add(sq, tg); | 358 | tg_service_queue_add(sq, tg); |
360 | tg->flags |= THROTL_TG_PENDING; | 359 | tg->flags |= THROTL_TG_PENDING; |
361 | sq->nr_pending++; | 360 | sq->nr_pending++; |
362 | } | 361 | } |
363 | 362 | ||
364 | static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) | 363 | static void throtl_enqueue_tg(struct throtl_service_queue *sq, |
364 | struct throtl_grp *tg) | ||
365 | { | 365 | { |
366 | if (!(tg->flags & THROTL_TG_PENDING)) | 366 | if (!(tg->flags & THROTL_TG_PENDING)) |
367 | __throtl_enqueue_tg(td, tg); | 367 | __throtl_enqueue_tg(sq, tg); |
368 | } | 368 | } |
369 | 369 | ||
370 | static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) | 370 | static void __throtl_dequeue_tg(struct throtl_service_queue *sq, |
371 | struct throtl_grp *tg) | ||
371 | { | 372 | { |
372 | throtl_rb_erase(&tg->rb_node, &td->service_queue); | 373 | throtl_rb_erase(&tg->rb_node, sq); |
373 | tg->flags &= ~THROTL_TG_PENDING; | 374 | tg->flags &= ~THROTL_TG_PENDING; |
374 | } | 375 | } |
375 | 376 | ||
376 | static void throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) | 377 | static void throtl_dequeue_tg(struct throtl_service_queue *sq, |
378 | struct throtl_grp *tg) | ||
377 | { | 379 | { |
378 | if (tg->flags & THROTL_TG_PENDING) | 380 | if (tg->flags & THROTL_TG_PENDING) |
379 | __throtl_dequeue_tg(td, tg); | 381 | __throtl_dequeue_tg(sq, tg); |
380 | } | 382 | } |
381 | 383 | ||
382 | /* Call with queue lock held */ | 384 | /* Call with queue lock held */ |
@@ -689,8 +691,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) | |||
689 | throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw); | 691 | throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw); |
690 | } | 692 | } |
691 | 693 | ||
692 | static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg, | 694 | static void throtl_add_bio_tg(struct throtl_service_queue *sq, |
693 | struct bio *bio) | 695 | struct throtl_grp *tg, struct bio *bio) |
694 | { | 696 | { |
695 | bool rw = bio_data_dir(bio); | 697 | bool rw = bio_data_dir(bio); |
696 | 698 | ||
@@ -698,11 +700,12 @@ static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg, | |||
698 | /* Take a bio reference on tg */ | 700 | /* Take a bio reference on tg */ |
699 | blkg_get(tg_to_blkg(tg)); | 701 | blkg_get(tg_to_blkg(tg)); |
700 | tg->nr_queued[rw]++; | 702 | tg->nr_queued[rw]++; |
701 | td->nr_queued[rw]++; | 703 | tg->td->nr_queued[rw]++; |
702 | throtl_enqueue_tg(td, tg); | 704 | throtl_enqueue_tg(sq, tg); |
703 | } | 705 | } |
704 | 706 | ||
705 | static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg) | 707 | static void tg_update_disptime(struct throtl_service_queue *sq, |
708 | struct throtl_grp *tg) | ||
706 | { | 709 | { |
707 | unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; | 710 | unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; |
708 | struct bio *bio; | 711 | struct bio *bio; |
@@ -717,9 +720,9 @@ static void tg_update_disptime(struct throtl_data *td, struct throtl_grp *tg) | |||
717 | disptime = jiffies + min_wait; | 720 | disptime = jiffies + min_wait; |
718 | 721 | ||
719 | /* Update dispatch time */ | 722 | /* Update dispatch time */ |
720 | throtl_dequeue_tg(td, tg); | 723 | throtl_dequeue_tg(sq, tg); |
721 | tg->disptime = disptime; | 724 | tg->disptime = disptime; |
722 | throtl_enqueue_tg(td, tg); | 725 | throtl_enqueue_tg(sq, tg); |
723 | } | 726 | } |
724 | 727 | ||
725 | static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw, | 728 | static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw, |
@@ -774,11 +777,11 @@ static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl) | |||
774 | return nr_reads + nr_writes; | 777 | return nr_reads + nr_writes; |
775 | } | 778 | } |
776 | 779 | ||
777 | static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl) | 780 | static int throtl_select_dispatch(struct throtl_service_queue *sq, |
781 | struct bio_list *bl) | ||
778 | { | 782 | { |
779 | unsigned int nr_disp = 0; | 783 | unsigned int nr_disp = 0; |
780 | struct throtl_grp *tg; | 784 | struct throtl_grp *tg; |
781 | struct throtl_service_queue *sq = &td->service_queue; | ||
782 | 785 | ||
783 | while (1) { | 786 | while (1) { |
784 | tg = throtl_rb_first(sq); | 787 | tg = throtl_rb_first(sq); |
@@ -789,12 +792,12 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl) | |||
789 | if (time_before(jiffies, tg->disptime)) | 792 | if (time_before(jiffies, tg->disptime)) |
790 | break; | 793 | break; |
791 | 794 | ||
792 | throtl_dequeue_tg(td, tg); | 795 | throtl_dequeue_tg(sq, tg); |
793 | 796 | ||
794 | nr_disp += throtl_dispatch_tg(tg, bl); | 797 | nr_disp += throtl_dispatch_tg(tg, bl); |
795 | 798 | ||
796 | if (tg->nr_queued[0] || tg->nr_queued[1]) | 799 | if (tg->nr_queued[0] || tg->nr_queued[1]) |
797 | tg_update_disptime(td, tg); | 800 | tg_update_disptime(sq, tg); |
798 | 801 | ||
799 | if (nr_disp >= throtl_quantum) | 802 | if (nr_disp >= throtl_quantum) |
800 | break; | 803 | break; |
@@ -822,7 +825,7 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work) | |||
822 | td->nr_queued[READ] + td->nr_queued[WRITE], | 825 | td->nr_queued[READ] + td->nr_queued[WRITE], |
823 | td->nr_queued[READ], td->nr_queued[WRITE]); | 826 | td->nr_queued[READ], td->nr_queued[WRITE]); |
824 | 827 | ||
825 | nr_disp = throtl_select_dispatch(td, &bio_list_on_stack); | 828 | nr_disp = throtl_select_dispatch(&td->service_queue, &bio_list_on_stack); |
826 | 829 | ||
827 | if (nr_disp) | 830 | if (nr_disp) |
828 | throtl_log(td, "bios disp=%u", nr_disp); | 831 | throtl_log(td, "bios disp=%u", nr_disp); |
@@ -949,7 +952,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, | |||
949 | throtl_start_new_slice(tg, 1); | 952 | throtl_start_new_slice(tg, 1); |
950 | 953 | ||
951 | if (tg->flags & THROTL_TG_PENDING) { | 954 | if (tg->flags & THROTL_TG_PENDING) { |
952 | tg_update_disptime(td, tg); | 955 | tg_update_disptime(&td->service_queue, tg); |
953 | throtl_schedule_next_dispatch(td); | 956 | throtl_schedule_next_dispatch(td); |
954 | } | 957 | } |
955 | 958 | ||
@@ -1103,11 +1106,11 @@ queue_bio: | |||
1103 | tg->nr_queued[READ], tg->nr_queued[WRITE]); | 1106 | tg->nr_queued[READ], tg->nr_queued[WRITE]); |
1104 | 1107 | ||
1105 | bio_associate_current(bio); | 1108 | bio_associate_current(bio); |
1106 | throtl_add_bio_tg(q->td, tg, bio); | 1109 | throtl_add_bio_tg(&q->td->service_queue, tg, bio); |
1107 | throttled = true; | 1110 | throttled = true; |
1108 | 1111 | ||
1109 | if (update_disptime) { | 1112 | if (update_disptime) { |
1110 | tg_update_disptime(td, tg); | 1113 | tg_update_disptime(&td->service_queue, tg); |
1111 | throtl_schedule_next_dispatch(td); | 1114 | throtl_schedule_next_dispatch(td); |
1112 | } | 1115 | } |
1113 | 1116 | ||
@@ -1139,7 +1142,7 @@ void blk_throtl_drain(struct request_queue *q) | |||
1139 | bio_list_init(&bl); | 1142 | bio_list_init(&bl); |
1140 | 1143 | ||
1141 | while ((tg = throtl_rb_first(sq))) { | 1144 | while ((tg = throtl_rb_first(sq))) { |
1142 | throtl_dequeue_tg(td, tg); | 1145 | throtl_dequeue_tg(sq, tg); |
1143 | 1146 | ||
1144 | while ((bio = bio_list_peek(&tg->bio_lists[READ]))) | 1147 | while ((bio = bio_list_peek(&tg->bio_lists[READ]))) |
1145 | tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl); | 1148 | tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl); |