diff options
author | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:36 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:36 -0400 |
commit | fda6f272c77a7acd798bb247fadc4791574e698b (patch) | |
tree | 6814838dcd260197342db0c411cf0ad9173a0daa /block | |
parent | 77216b0484817817a18aaa6b089dffe49070f7c1 (diff) |
blk-throttle: implement sq_to_tg(), sq_to_td() and throtl_log()
Now that both throtl_data and throtl_grp embed throtl_service_queue,
we can unify throtl_log() and throtl_log_tg().
* sq_to_tg() is added. This returns the throtl_grp a service_queue is
embedded in. If the service_queue is the top-level one embedded in
throtl_data, NULL is returned.
* sq_to_td() is added. A service_queue is always associated with a
throtl_data. This function finds the associated td and returns it.
* throtl_log() is updated to take throtl_service_queue instead of
throtl_data. If the service_queue is one embedded in throtl_grp, it
prints the same header as throtl_log_tg() did. If it's one embedded
in throtl_data, it behaves the same as before. This renders
throtl_log_tg() unnecessary. Removed.
This change is necessary for hierarchy support as we're gonna be using
the same code paths to dispatch bios to intermediate service_queues
embedded in throtl_grps and the top-level service_queue embedded in
throtl_data.
This patch doesn't make any behavior changes.
v2: throtl_log() didn't print a space after blkg path. Updated so
that it prints a space after throtl_grp path. Spotted by Vivek.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 110 |
1 files changed, 81 insertions, 29 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 00cfdd05f98f..2875ff66e1b9 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -151,16 +151,65 @@ static inline struct throtl_grp *td_root_tg(struct throtl_data *td) | |||
151 | return blkg_to_tg(td->queue->root_blkg); | 151 | return blkg_to_tg(td->queue->root_blkg); |
152 | } | 152 | } |
153 | 153 | ||
154 | #define throtl_log_tg(tg, fmt, args...) do { \ | 154 | /** |
155 | char __pbuf[128]; \ | 155 | * sq_to_tg - return the throl_grp the specified service queue belongs to |
156 | * @sq: the throtl_service_queue of interest | ||
157 | * | ||
158 | * Return the throtl_grp @sq belongs to. If @sq is the top-level one | ||
159 | * embedded in throtl_data, %NULL is returned. | ||
160 | */ | ||
161 | static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) | ||
162 | { | ||
163 | if (sq && sq->parent_sq) | ||
164 | return container_of(sq, struct throtl_grp, service_queue); | ||
165 | else | ||
166 | return NULL; | ||
167 | } | ||
168 | |||
169 | /** | ||
170 | * sq_to_td - return throtl_data the specified service queue belongs to | ||
171 | * @sq: the throtl_service_queue of interest | ||
172 | * | ||
173 | * A service_queue can be embeded in either a throtl_grp or throtl_data. | ||
174 | * Determine the associated throtl_data accordingly and return it. | ||
175 | */ | ||
176 | static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) | ||
177 | { | ||
178 | struct throtl_grp *tg = sq_to_tg(sq); | ||
179 | |||
180 | if (tg) | ||
181 | return tg->td; | ||
182 | else | ||
183 | return container_of(sq, struct throtl_data, service_queue); | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * throtl_log - log debug message via blktrace | ||
188 | * @sq: the service_queue being reported | ||
189 | * @fmt: printf format string | ||
190 | * @args: printf args | ||
191 | * | ||
192 | * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a | ||
193 | * throtl_grp; otherwise, just "throtl". | ||
194 | * | ||
195 | * TODO: this should be made a function and name formatting should happen | ||
196 | * after testing whether blktrace is enabled. | ||
197 | */ | ||
198 | #define throtl_log(sq, fmt, args...) do { \ | ||
199 | struct throtl_grp *__tg = sq_to_tg((sq)); \ | ||
200 | struct throtl_data *__td = sq_to_td((sq)); \ | ||
201 | \ | ||
202 | (void)__td; \ | ||
203 | if ((__tg)) { \ | ||
204 | char __pbuf[128]; \ | ||
156 | \ | 205 | \ |
157 | blkg_path(tg_to_blkg(tg), __pbuf, sizeof(__pbuf)); \ | 206 | blkg_path(tg_to_blkg(__tg), __pbuf, sizeof(__pbuf)); \ |
158 | blk_add_trace_msg((tg)->td->queue, "throtl %s " fmt, __pbuf, ##args); \ | 207 | blk_add_trace_msg(__td->queue, "throtl %s " fmt, __pbuf, ##args); \ |
208 | } else { \ | ||
209 | blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ | ||
210 | } \ | ||
159 | } while (0) | 211 | } while (0) |
160 | 212 | ||
161 | #define throtl_log(td, fmt, args...) \ | ||
162 | blk_add_trace_msg((td)->queue, "throtl " fmt, ##args) | ||
163 | |||
164 | /* | 213 | /* |
165 | * Worker for allocating per cpu stat for tgs. This is scheduled on the | 214 | * Worker for allocating per cpu stat for tgs. This is scheduled on the |
166 | * system_wq once there are some groups on the alloc_list waiting for | 215 | * system_wq once there are some groups on the alloc_list waiting for |
@@ -402,9 +451,10 @@ static void throtl_schedule_delayed_work(struct throtl_data *td, | |||
402 | unsigned long delay) | 451 | unsigned long delay) |
403 | { | 452 | { |
404 | struct delayed_work *dwork = &td->dispatch_work; | 453 | struct delayed_work *dwork = &td->dispatch_work; |
454 | struct throtl_service_queue *sq = &td->service_queue; | ||
405 | 455 | ||
406 | mod_delayed_work(kthrotld_workqueue, dwork, delay); | 456 | mod_delayed_work(kthrotld_workqueue, dwork, delay); |
407 | throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); | 457 | throtl_log(sq, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); |
408 | } | 458 | } |
409 | 459 | ||
410 | static void throtl_schedule_next_dispatch(struct throtl_data *td) | 460 | static void throtl_schedule_next_dispatch(struct throtl_data *td) |
@@ -429,9 +479,10 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) | |||
429 | tg->io_disp[rw] = 0; | 479 | tg->io_disp[rw] = 0; |
430 | tg->slice_start[rw] = jiffies; | 480 | tg->slice_start[rw] = jiffies; |
431 | tg->slice_end[rw] = jiffies + throtl_slice; | 481 | tg->slice_end[rw] = jiffies + throtl_slice; |
432 | throtl_log_tg(tg, "[%c] new slice start=%lu end=%lu jiffies=%lu", | 482 | throtl_log(&tg->service_queue, |
433 | rw == READ ? 'R' : 'W', tg->slice_start[rw], | 483 | "[%c] new slice start=%lu end=%lu jiffies=%lu", |
434 | tg->slice_end[rw], jiffies); | 484 | rw == READ ? 'R' : 'W', tg->slice_start[rw], |
485 | tg->slice_end[rw], jiffies); | ||
435 | } | 486 | } |
436 | 487 | ||
437 | static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, | 488 | static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, |
@@ -444,9 +495,10 @@ static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, | |||
444 | unsigned long jiffy_end) | 495 | unsigned long jiffy_end) |
445 | { | 496 | { |
446 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); | 497 | tg->slice_end[rw] = roundup(jiffy_end, throtl_slice); |
447 | throtl_log_tg(tg, "[%c] extend slice start=%lu end=%lu jiffies=%lu", | 498 | throtl_log(&tg->service_queue, |
448 | rw == READ ? 'R' : 'W', tg->slice_start[rw], | 499 | "[%c] extend slice start=%lu end=%lu jiffies=%lu", |
449 | tg->slice_end[rw], jiffies); | 500 | rw == READ ? 'R' : 'W', tg->slice_start[rw], |
501 | tg->slice_end[rw], jiffies); | ||
450 | } | 502 | } |
451 | 503 | ||
452 | /* Determine if previously allocated or extended slice is complete or not */ | 504 | /* Determine if previously allocated or extended slice is complete or not */ |
@@ -511,10 +563,10 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) | |||
511 | 563 | ||
512 | tg->slice_start[rw] += nr_slices * throtl_slice; | 564 | tg->slice_start[rw] += nr_slices * throtl_slice; |
513 | 565 | ||
514 | throtl_log_tg(tg, "[%c] trim slice nr=%lu bytes=%llu io=%lu" | 566 | throtl_log(&tg->service_queue, |
515 | " start=%lu end=%lu jiffies=%lu", | 567 | "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu", |
516 | rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, | 568 | rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim, |
517 | tg->slice_start[rw], tg->slice_end[rw], jiffies); | 569 | tg->slice_start[rw], tg->slice_end[rw], jiffies); |
518 | } | 570 | } |
519 | 571 | ||
520 | static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, | 572 | static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, |
@@ -852,7 +904,7 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work) | |||
852 | 904 | ||
853 | bio_list_init(&bio_list_on_stack); | 905 | bio_list_init(&bio_list_on_stack); |
854 | 906 | ||
855 | throtl_log(td, "dispatch nr_queued=%u read=%u write=%u", | 907 | throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", |
856 | td->nr_queued[READ] + td->nr_queued[WRITE], | 908 | td->nr_queued[READ] + td->nr_queued[WRITE], |
857 | td->nr_queued[READ], td->nr_queued[WRITE]); | 909 | td->nr_queued[READ], td->nr_queued[WRITE]); |
858 | 910 | ||
@@ -863,7 +915,7 @@ void blk_throtl_dispatch_work_fn(struct work_struct *work) | |||
863 | bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]); | 915 | bio_list_merge(&bio_list_on_stack, &sq->bio_lists[rw]); |
864 | bio_list_init(&sq->bio_lists[rw]); | 916 | bio_list_init(&sq->bio_lists[rw]); |
865 | } | 917 | } |
866 | throtl_log(td, "bios disp=%u", nr_disp); | 918 | throtl_log(sq, "bios disp=%u", nr_disp); |
867 | } | 919 | } |
868 | 920 | ||
869 | throtl_schedule_next_dispatch(td); | 921 | throtl_schedule_next_dispatch(td); |
@@ -972,9 +1024,10 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, | |||
972 | else | 1024 | else |
973 | *(unsigned int *)((void *)tg + cft->private) = ctx.v; | 1025 | *(unsigned int *)((void *)tg + cft->private) = ctx.v; |
974 | 1026 | ||
975 | throtl_log_tg(tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", | 1027 | throtl_log(&tg->service_queue, |
976 | tg->bps[READ], tg->bps[WRITE], | 1028 | "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", |
977 | tg->iops[READ], tg->iops[WRITE]); | 1029 | tg->bps[READ], tg->bps[WRITE], |
1030 | tg->iops[READ], tg->iops[WRITE]); | ||
978 | 1031 | ||
979 | /* | 1032 | /* |
980 | * We're already holding queue_lock and know @tg is valid. Let's | 1033 | * We're already holding queue_lock and know @tg is valid. Let's |
@@ -1131,12 +1184,11 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
1131 | } | 1184 | } |
1132 | 1185 | ||
1133 | queue_bio: | 1186 | queue_bio: |
1134 | throtl_log_tg(tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu" | 1187 | throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", |
1135 | " iodisp=%u iops=%u queued=%d/%d", | 1188 | rw == READ ? 'R' : 'W', |
1136 | rw == READ ? 'R' : 'W', | 1189 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], |
1137 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], | 1190 | tg->io_disp[rw], tg->iops[rw], |
1138 | tg->io_disp[rw], tg->iops[rw], | 1191 | sq->nr_queued[READ], sq->nr_queued[WRITE]); |
1139 | sq->nr_queued[READ], sq->nr_queued[WRITE]); | ||
1140 | 1192 | ||
1141 | bio_associate_current(bio); | 1193 | bio_associate_current(bio); |
1142 | throtl_add_bio_tg(bio, tg); | 1194 | throtl_add_bio_tg(bio, tg); |