aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c29
1 files changed, 7 insertions, 22 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 507b1c608941..dbeef303f27b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -166,11 +166,6 @@ THROTL_TG_FNS(on_rr);
166#define throtl_log(td, fmt, args...) \ 166#define throtl_log(td, fmt, args...) \
167 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args) 167 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
168 168
169static inline unsigned int total_nr_queued(struct throtl_data *td)
170{
171 return td->nr_queued[0] + td->nr_queued[1];
172}
173
174/* 169/*
175 * Worker for allocating per cpu stat for tgs. This is scheduled on the 170 * Worker for allocating per cpu stat for tgs. This is scheduled on the
176 * system_wq once there are some groups on the alloc_list waiting for 171 * system_wq once there are some groups on the alloc_list waiting for
@@ -402,25 +397,18 @@ static void throtl_schedule_delayed_work(struct throtl_data *td,
402{ 397{
403 struct delayed_work *dwork = &td->dispatch_work; 398 struct delayed_work *dwork = &td->dispatch_work;
404 399
405 if (total_nr_queued(td)) { 400 mod_delayed_work(kthrotld_workqueue, dwork, delay);
406 mod_delayed_work(kthrotld_workqueue, dwork, delay); 401 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies);
407 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
408 delay, jiffies);
409 }
410} 402}
411 403
412static void throtl_schedule_next_dispatch(struct throtl_data *td) 404static void throtl_schedule_next_dispatch(struct throtl_data *td)
413{ 405{
414 struct throtl_rb_root *st = &td->tg_service_tree; 406 struct throtl_rb_root *st = &td->tg_service_tree;
415 407
416 /* 408 /* any pending children left? */
417 * If there are more bios pending, schedule more work. 409 if (!st->count)
418 */
419 if (!total_nr_queued(td))
420 return; 410 return;
421 411
422 BUG_ON(!st->count);
423
424 update_min_dispatch_time(st); 412 update_min_dispatch_time(st);
425 413
426 if (time_before_eq(st->min_disptime, jiffies)) 414 if (time_before_eq(st->min_disptime, jiffies))
@@ -844,14 +832,11 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work)
844 832
845 spin_lock_irq(q->queue_lock); 833 spin_lock_irq(q->queue_lock);
846 834
847 if (!total_nr_queued(td))
848 goto out;
849
850 bio_list_init(&bio_list_on_stack); 835 bio_list_init(&bio_list_on_stack);
851 836
852 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u", 837 throtl_log(td, "dispatch nr_queued=%u read=%u write=%u",
853 total_nr_queued(td), td->nr_queued[READ], 838 td->nr_queued[READ] + td->nr_queued[WRITE],
854 td->nr_queued[WRITE]); 839 td->nr_queued[READ], td->nr_queued[WRITE]);
855 840
856 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack); 841 nr_disp = throtl_select_dispatch(td, &bio_list_on_stack);
857 842
@@ -859,7 +844,7 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work)
859 throtl_log(td, "bios disp=%u", nr_disp); 844 throtl_log(td, "bios disp=%u", nr_disp);
860 845
861 throtl_schedule_next_dispatch(td); 846 throtl_schedule_next_dispatch(td);
862out: 847
863 spin_unlock_irq(q->queue_lock); 848 spin_unlock_irq(q->queue_lock);
864 849
865 /* 850 /*