diff options
Diffstat (limited to 'block/blk-throttle.c')
| -rw-r--r-- | block/blk-throttle.c | 41 |
1 files changed, 26 insertions, 15 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 56ad4531b412..381b09bb562b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
| @@ -355,6 +355,12 @@ throtl_start_new_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) | |||
| 355 | tg->slice_end[rw], jiffies); | 355 | tg->slice_end[rw], jiffies); |
| 356 | } | 356 | } |
| 357 | 357 | ||
| 358 | static inline void throtl_set_slice_end(struct throtl_data *td, | ||
| 359 | struct throtl_grp *tg, bool rw, unsigned long jiffy_end) | ||
| 360 | { | ||
| 361 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); | ||
| 362 | } | ||
| 363 | |||
| 358 | static inline void throtl_extend_slice(struct throtl_data *td, | 364 | static inline void throtl_extend_slice(struct throtl_data *td, |
| 359 | struct throtl_grp *tg, bool rw, unsigned long jiffy_end) | 365 | struct throtl_grp *tg, bool rw, unsigned long jiffy_end) |
| 360 | { | 366 | { |
| @@ -391,6 +397,16 @@ throtl_trim_slice(struct throtl_data *td, struct throtl_grp *tg, bool rw) | |||
| 391 | if (throtl_slice_used(td, tg, rw)) | 397 | if (throtl_slice_used(td, tg, rw)) |
| 392 | return; | 398 | return; |
| 393 | 399 | ||
| 400 | /* | ||
| 401 | * A bio has been dispatched. Also adjust slice_end. It might happen | ||
| 402 | * that initially cgroup limit was very low resulting in high | ||
| 403 | * slice_end, but later limit was bumped up and bio was dispached | ||
| 404 | * sooner, then we need to reduce slice_end. A high bogus slice_end | ||
| 405 | * is bad because it does not allow new slice to start. | ||
| 406 | */ | ||
| 407 | |||
| 408 | throtl_set_slice_end(td, tg, rw, jiffies + throtl_slice); | ||
| 409 | |||
| 394 | time_elapsed = jiffies - tg->slice_start[rw]; | 410 | time_elapsed = jiffies - tg->slice_start[rw]; |
| 395 | 411 | ||
| 396 | nr_slices = time_elapsed / throtl_slice; | 412 | nr_slices = time_elapsed / throtl_slice; |
| @@ -645,7 +661,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg, | |||
| 645 | { | 661 | { |
| 646 | unsigned int nr_reads = 0, nr_writes = 0; | 662 | unsigned int nr_reads = 0, nr_writes = 0; |
| 647 | unsigned int max_nr_reads = throtl_grp_quantum*3/4; | 663 | unsigned int max_nr_reads = throtl_grp_quantum*3/4; |
| 648 | unsigned int max_nr_writes = throtl_grp_quantum - nr_reads; | 664 | unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads; |
| 649 | struct bio *bio; | 665 | struct bio *bio; |
| 650 | 666 | ||
| 651 | /* Try to dispatch 75% READS and 25% WRITES */ | 667 | /* Try to dispatch 75% READS and 25% WRITES */ |
| @@ -709,26 +725,21 @@ static void throtl_process_limit_change(struct throtl_data *td) | |||
| 709 | struct throtl_grp *tg; | 725 | struct throtl_grp *tg; |
| 710 | struct hlist_node *pos, *n; | 726 | struct hlist_node *pos, *n; |
| 711 | 727 | ||
| 712 | /* | ||
| 713 | * Make sure atomic_inc() effects from | ||
| 714 | * throtl_update_blkio_group_read_bps(), group of functions are | ||
| 715 | * visible. | ||
| 716 | * Is this required or smp_mb__after_atomic_inc() was suffcient | ||
| 717 | * after the atomic_inc(). | ||
| 718 | */ | ||
| 719 | smp_rmb(); | ||
| 720 | if (!atomic_read(&td->limits_changed)) | 728 | if (!atomic_read(&td->limits_changed)) |
| 721 | return; | 729 | return; |
| 722 | 730 | ||
| 723 | throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); | 731 | throtl_log(td, "limit changed =%d", atomic_read(&td->limits_changed)); |
| 724 | 732 | ||
| 725 | hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { | 733 | /* |
| 726 | /* | 734 | * Make sure updates from throtl_update_blkio_group_read_bps() group |
| 727 | * Do I need an smp_rmb() here to make sure tg->limits_changed | 735 | * of functions to tg->limits_changed are visible. We do not |
| 728 | * update is visible. I am relying on smp_rmb() at the | 736 | * want update td->limits_changed to be visible but update to |
| 729 | * beginning of function and not putting a new one here. | 737 | * tg->limits_changed not being visible yet on this cpu. Hence |
| 730 | */ | 738 | * the read barrier. |
| 739 | */ | ||
| 740 | smp_rmb(); | ||
| 731 | 741 | ||
| 742 | hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { | ||
| 732 | if (throtl_tg_on_rr(tg) && tg->limits_changed) { | 743 | if (throtl_tg_on_rr(tg) && tg->limits_changed) { |
| 733 | throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" | 744 | throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" |
| 734 | " riops=%u wiops=%u", tg->bps[READ], | 745 | " riops=%u wiops=%u", tg->bps[READ], |
