aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 16:52:37 -0400
committerTejun Heo <tj@kernel.org>2013-05-14 16:52:37 -0400
commit7f52f98c2a83339b89a27d01296354e5dbb90ad0 (patch)
tree90306fe5385ad770db8be8b1242ee5a3f114ca84 /block/blk-throttle.c
parent69df0ab030c94e851b77991c2f5e00bcf5294edc (diff)
blk-throttle: implement dispatch looping
throtl_select_dispatch() only dispatches throtl_quantum bios on each invocation. blk_throtl_dispatch_work_fn() in turn depends on throtl_schedule_next_dispatch() scheduling the next dispatch window immediately so that undue delays aren't incurred. This effectively chains multiple dispatch work item executions back-to-back when there are more than throtl_quantum bios to dispatch on a given tick. There is no reason to finish the current work item just to repeat it immediately. This patch makes throtl_schedule_next_dispatch() return %false without doing anything if the current dispatch window is still open and updates blk_throtl_dispatch_work_fn() repeat dispatching after cpu_relax() on %false return. This change will help implementing hierarchy support as dispatching will be done from pending_timer and immediate reschedule of timer function isn't supported and doesn't make much sense. While this patch changes how dispatch behaves when there are more than throtl_quantum bios to dispatch on a single tick, the behavior change is immaterial. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c82
1 files changed, 56 insertions, 26 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a8d23f0cf357..8ee8e4e0a2ba 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -467,24 +467,41 @@ static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
467 expires - jiffies, jiffies); 467 expires - jiffies, jiffies);
468} 468}
469 469
470static void throtl_schedule_next_dispatch(struct throtl_service_queue *sq) 470/**
471 * throtl_schedule_next_dispatch - schedule the next dispatch cycle
472 * @sq: the service_queue to schedule dispatch for
473 * @force: force scheduling
474 *
475 * Arm @sq->pending_timer so that the next dispatch cycle starts on the
476 * dispatch time of the first pending child. Returns %true if either timer
477 * is armed or there's no pending child left. %false if the current
478 * dispatch window is still open and the caller should continue
479 * dispatching.
480 *
481 * If @force is %true, the dispatch timer is always scheduled and this
482 * function is guaranteed to return %true. This is to be used when the
483 * caller can't dispatch itself and needs to invoke pending_timer
484 * unconditionally. Note that forced scheduling is likely to induce short
485 * delay before dispatch starts even if @sq->first_pending_disptime is not
486 * in the future and thus shouldn't be used in hot paths.
487 */
488static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
489 bool force)
471{ 490{
472 struct throtl_data *td = sq_to_td(sq);
473
474 /* any pending children left? */ 491 /* any pending children left? */
475 if (!sq->nr_pending) 492 if (!sq->nr_pending)
476 return; 493 return true;
477 494
478 update_min_dispatch_time(sq); 495 update_min_dispatch_time(sq);
479 496
480 /* is the next dispatch time in the future? */ 497 /* is the next dispatch time in the future? */
481 if (time_after(sq->first_pending_disptime, jiffies)) { 498 if (force || time_after(sq->first_pending_disptime, jiffies)) {
482 throtl_schedule_pending_timer(sq, sq->first_pending_disptime); 499 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
483 return; 500 return true;
484 } 501 }
485 502
486 /* kick immediate execution */ 503 /* tell the caller to continue dispatching */
487 queue_work(kthrotld_workqueue, &td->dispatch_work); 504 return false;
488} 505}
489 506
490static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) 507static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
@@ -930,39 +947,47 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work)
930 dispatch_work); 947 dispatch_work);
931 struct throtl_service_queue *sq = &td->service_queue; 948 struct throtl_service_queue *sq = &td->service_queue;
932 struct request_queue *q = td->queue; 949 struct request_queue *q = td->queue;
933 unsigned int nr_disp = 0;
934 struct bio_list bio_list_on_stack; 950 struct bio_list bio_list_on_stack;
935 struct bio *bio; 951 struct bio *bio;
936 struct blk_plug plug; 952 struct blk_plug plug;
937 int rw; 953 bool dispatched = false;
954 int rw, ret;
938 955
939 spin_lock_irq(q->queue_lock); 956 spin_lock_irq(q->queue_lock);
940 957
941 bio_list_init(&bio_list_on_stack); 958 bio_list_init(&bio_list_on_stack);
942 959
943 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", 960 while (true) {
944 td->nr_queued[READ] + td->nr_queued[WRITE], 961 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
945 td->nr_queued[READ], td->nr_queued[WRITE]); 962 td->nr_queued[READ] + td->nr_queued[WRITE],
963 td->nr_queued[READ], td->nr_queued[WRITE]);
964
965 ret = throtl_select_dispatch(sq);
966 if (ret) {
967 for (rw = READ; rw <= WRITE; rw++) {
968 bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]);
969 bio_list_init(&sq->bio_lists[rw]);
970 }
971 throtl_log(sq, "bios disp=%u", ret);
972 dispatched = true;
973 }
946 974
947 nr_disp = throtl_select_dispatch(sq); 975 if (throtl_schedule_next_dispatch(sq, false))
976 break;
948 977
949 if (nr_disp) { 978 /* this dispatch windows is still open, relax and repeat */
950 for (rw = READ; rw <= WRITE; rw++) { 979 spin_unlock_irq(q->queue_lock);
951 bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]); 980 cpu_relax();
952 bio_list_init(&sq->bio_lists[rw]); 981 spin_lock_irq(q->queue_lock);
953 }
954 throtl_log(sq, "bios disp=%u", nr_disp);
955 } 982 }
956 983
957 throtl_schedule_next_dispatch(sq);
958
959 spin_unlock_irq(q->queue_lock); 984 spin_unlock_irq(q->queue_lock);
960 985
961 /* 986 /*
962 * If we dispatched some requests, unplug the queue to make sure 987 * If we dispatched some requests, unplug the queue to make sure
963 * immediate dispatch 988 * immediate dispatch
964 */ 989 */
965 if (nr_disp) { 990 if (dispatched) {
966 blk_start_plug(&plug); 991 blk_start_plug(&plug);
967 while((bio = bio_list_pop(&bio_list_on_stack))) 992 while((bio = bio_list_pop(&bio_list_on_stack)))
968 generic_make_request(bio); 993 generic_make_request(bio);
@@ -1078,7 +1103,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
1078 1103
1079 if (tg->flags & THROTL_TG_PENDING) { 1104 if (tg->flags & THROTL_TG_PENDING) {
1080 tg_update_disptime(tg); 1105 tg_update_disptime(tg);
1081 throtl_schedule_next_dispatch(sq->parent_sq); 1106 throtl_schedule_next_dispatch(sq->parent_sq, true);
1082 } 1107 }
1083 1108
1084 blkg_conf_finish(&ctx); 1109 blkg_conf_finish(&ctx);
@@ -1229,10 +1254,15 @@ queue_bio:
1229 throtl_add_bio_tg(bio, tg); 1254 throtl_add_bio_tg(bio, tg);
1230 throttled = true; 1255 throttled = true;
1231 1256
1232 /* update @tg's dispatch time if @tg was empty before @bio */ 1257 /*
1258 * Update @tg's dispatch time and force schedule dispatch if @tg
1259 * was empty before @bio. The forced scheduling isn't likely to
1260 * cause undue delay as @bio is likely to be dispatched directly if
1261 * its @tg's disptime is not in the future.
1262 */
1233 if (tg->flags & THROTL_TG_WAS_EMPTY) { 1263 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1234 tg_update_disptime(tg); 1264 tg_update_disptime(tg);
1235 throtl_schedule_next_dispatch(tg->service_queue.parent_sq); 1265 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
1236 } 1266 }
1237 1267
1238out_unlock: 1268out_unlock: