diff options
author | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:32 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:32 -0400 |
commit | 6a525600ffeb9e0d6cbbebda49eb89d6d3408c2b (patch) | |
tree | f4ced3b39162c54ecf12f0171b781e60d54e4a7c /block | |
parent | a9131a27e2a3272df2207277a2be90377ce75fc6 (diff) |
blk-throttle: remove pointless throtl_nr_queued() optimizations
throtl_nr_queued() is used in several places to avoid performing
certain operations when the throtl_data is empty. This usually is
useless as those paths usually aren't traveled if there's no bio
queued.
* throtl_schedule_delayed_work() skips scheduling dispatch work item
if @td doesn't have any bios queued; however, the only case it can
be called when @td is empty is from tg_set_conf() which isn't
something we should be optimizing for.
* throtl_schedule_next_dispatch() takes a quick exit if @td is empty;
however, right after that it triggers BUG if the service tree is
empty. The two conditions are equivalent and it can just test
@st->count for the quick exit.
* blk_throtl_dispatch_work_fn() skips dispatch if @td is empty. This
work function isn't usually invoked when @td is empty. The only
possibility is from tg_set_conf() and when it happens the normal
dispatching path can handle empty @td fine. No need to add special
skip path.
This patch removes the above three unnecessary optimizations, which
leave throtl_log() call in blk_throtl_dispatch_work_fn() the only user
of throtl_nr_queued(). Remove throtl_nr_queued() and open code it in
throtl_log(). I don't think we need td->nr_queued[] at all. Maybe we
can remove it later.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 29 |
1 files changed, 7 insertions, 22 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 507b1c608941..dbeef303f27b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -166,11 +166,6 @@ THROTL_TG_FNS(on_rr); | |||
166 | #define throtl_log(td, fmt, args...) \ | 166 | #define throtl_log(td, fmt, args...) \ |
167 | blk_add_trace_msg((td)->queue, "throtl " fmt, ##args) | 167 | blk_add_trace_msg((td)->queue, "throtl " fmt, ##args) |
168 | 168 | ||
169 | static inline unsigned int total_nr_queued(struct throtl_data *td) | ||
170 | { | ||
171 | return td->nr_queued[0] + td->nr_queued[1]; | ||
172 | } | ||
173 | |||
174 | /* | 169 | /* |
175 | * Worker for allocating per cpu stat for tgs. This is scheduled on the | 170 | * Worker for allocating per cpu stat for tgs. This is scheduled on the |
176 | * system_wq once there are some groups on the alloc_list waiting for | 171 | * system_wq once there are some groups on the alloc_list waiting for |
@@ -402,25 +397,18 @@ static void throtl_schedule_delayed_work(struct throtl_data *td, | |||
402 | { | 397 | { |
403 | struct delayed_work *dwork = &td->dispatch_work; | 398 | struct delayed_work *dwork = &td->dispatch_work; |
404 | 399 | ||
405 | if (total_nr_queued(td)) { | 400 | mod_delayed_work(kthrotld_workqueue, dwork, delay); |
406 | mod_delayed_work(kthrotld_workqueue, dwork, delay); | 401 | throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); |
407 | throtl_log(td, "schedule work. delay=%lu jiffies=%lu", | ||
408 | delay, jiffies); | ||
409 | } | ||
410 | } | 402 | } |
411 | 403 | ||
412 | static void throtl_schedule_next_dispatch(struct throtl_data *td) | 404 | static void throtl_schedule_next_dispatch(struct throtl_data *td) |
413 | { | 405 | { |
414 | struct throtl_rb_root *st = &td->tg_service_tree; | 406 | struct throtl_rb_root *st = &td->tg_service_tree; |
415 | 407 | ||
416 | /* | 408 | /* any pending children left? */ |
417 | * If there are more bios pending, schedule more work. | 409 | if (!st->count) |
418 | */ | ||
419 | if (!total_nr_queued(td)) | ||
420 | return; | 410 | return; |
421 | 411 | ||
422 | BUG_ON(!st->count); | ||
423 | |||
424 | update_min_dispatch_time(st); | 412 | update_min_dispatch_time(st); |
425 | 413 | ||
426 | if (time_before_eq(st->min_disptime, jiffies)) | 414 | if (time_before_eq(st->min_disptime, jiffies)) |
@@ -844,14 +832,11 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work) | |||
844 | 832 | ||
845 | spin_lock_irq(q->queue_lock); | 833 | spin_lock_irq(q->queue_lock); |
846 | 834 | ||
847 | if (!total_nr_queued(td)) | ||
848 | goto out; | ||
849 | |||
850 | bio_list_init(&bio_list_on_stack); | 835 | bio_list_init(&bio_list_on_stack); |
851 | 836 | ||
852 | throtl_log(td, "dispatch nr_queued=%u read=%u write=%u", | 837 | throtl_log(td, "dispatch nr_queued=%u read=%u write=%u", |
853 | total_nr_queued(td), td->nr_queued[READ], | 838 | td->nr_queued[READ] + td->nr_queued[WRITE], |
854 | td->nr_queued[WRITE]); | 839 | td->nr_queued[READ], td->nr_queued[WRITE]); |
855 | 840 | ||
856 | nr_disp = throtl_select_dispatch(td, &bio_list_on_stack); | 841 | nr_disp = throtl_select_dispatch(td, &bio_list_on_stack); |
857 | 842 | ||
@@ -859,7 +844,7 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work) | |||
859 | throtl_log(td, "bios disp=%u", nr_disp); | 844 | throtl_log(td, "bios disp=%u", nr_disp); |
860 | 845 | ||
861 | throtl_schedule_next_dispatch(td); | 846 | throtl_schedule_next_dispatch(td); |
862 | out: | 847 | |
863 | spin_unlock_irq(q->queue_lock); | 848 | spin_unlock_irq(q->queue_lock); |
864 | 849 | ||
865 | /* | 850 | /* |