diff options
author | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:32 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:32 -0400 |
commit | c9e0332e877c1a1ccfe4ba315a437c7a8cf6e575 (patch) | |
tree | d5bab892cad9080643d619684e9d3dc59f320022 /block | |
parent | 6a525600ffeb9e0d6cbbebda49eb89d6d3408c2b (diff) |
blk-throttle: rename throtl_rb_root to throtl_service_queue
throtl_rb_root will be expanded to cover more roles for hierarchy
support. Rename it to throtl_service_queue and make its fields more
descriptive.
* rb -> pending_tree
* left -> first_pending
* count -> nr_pending
* min_disptime -> first_pending_disptime
This patch is purely cosmetic.
Signed-off-by: Tejun Heo <tj@kernel.org
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index dbeef303f27b..b279110ba287 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -26,15 +26,15 @@ static struct blkcg_policy blkcg_policy_throtl; | |||
26 | /* A workqueue to queue throttle related work */ | 26 | /* A workqueue to queue throttle related work */ |
27 | static struct workqueue_struct *kthrotld_workqueue; | 27 | static struct workqueue_struct *kthrotld_workqueue; |
28 | 28 | ||
29 | struct throtl_rb_root { | 29 | struct throtl_service_queue { |
30 | struct rb_root rb; | 30 | struct rb_root pending_tree; /* RB tree of active tgs */ |
31 | struct rb_node *left; | 31 | struct rb_node *first_pending; /* first node in the tree */ |
32 | unsigned int count; | 32 | unsigned int nr_pending; /* # queued in the tree */ |
33 | unsigned long min_disptime; | 33 | unsigned long first_pending_disptime; /* disptime of the first tg */ |
34 | }; | 34 | }; |
35 | 35 | ||
36 | #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \ | 36 | #define THROTL_SERVICE_QUEUE_INITIALIZER \ |
37 | .count = 0, .min_disptime = 0} | 37 | (struct throtl_service_queue){ .pending_tree = RB_ROOT } |
38 | 38 | ||
39 | #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) | 39 | #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) |
40 | 40 | ||
@@ -50,7 +50,7 @@ struct throtl_grp { | |||
50 | /* must be the first member */ | 50 | /* must be the first member */ |
51 | struct blkg_policy_data pd; | 51 | struct blkg_policy_data pd; |
52 | 52 | ||
53 | /* active throtl group service_tree member */ | 53 | /* active throtl group service_queue member */ |
54 | struct rb_node rb_node; | 54 | struct rb_node rb_node; |
55 | 55 | ||
56 | /* | 56 | /* |
@@ -93,7 +93,7 @@ struct throtl_grp { | |||
93 | struct throtl_data | 93 | struct throtl_data |
94 | { | 94 | { |
95 | /* service tree for active throtl groups */ | 95 | /* service tree for active throtl groups */ |
96 | struct throtl_rb_root tg_service_tree; | 96 | struct throtl_service_queue service_queue; |
97 | 97 | ||
98 | struct request_queue *queue; | 98 | struct request_queue *queue; |
99 | 99 | ||
@@ -296,17 +296,17 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, | |||
296 | return tg; | 296 | return tg; |
297 | } | 297 | } |
298 | 298 | ||
299 | static struct throtl_grp *throtl_rb_first(struct throtl_rb_root *root) | 299 | static struct throtl_grp *throtl_rb_first(struct throtl_service_queue *sq) |
300 | { | 300 | { |
301 | /* Service tree is empty */ | 301 | /* Service tree is empty */ |
302 | if (!root->count) | 302 | if (!sq->nr_pending) |
303 | return NULL; | 303 | return NULL; |
304 | 304 | ||
305 | if (!root->left) | 305 | if (!sq->first_pending) |
306 | root->left = rb_first(&root->rb); | 306 | sq->first_pending = rb_first(&sq->pending_tree); |
307 | 307 | ||
308 | if (root->left) | 308 | if (sq->first_pending) |
309 | return rb_entry_tg(root->left); | 309 | return rb_entry_tg(sq->first_pending); |
310 | 310 | ||
311 | return NULL; | 311 | return NULL; |
312 | } | 312 | } |
@@ -317,29 +317,29 @@ static void rb_erase_init(struct rb_node *n, struct rb_root *root) | |||
317 | RB_CLEAR_NODE(n); | 317 | RB_CLEAR_NODE(n); |
318 | } | 318 | } |
319 | 319 | ||
320 | static void throtl_rb_erase(struct rb_node *n, struct throtl_rb_root *root) | 320 | static void throtl_rb_erase(struct rb_node *n, struct throtl_service_queue *sq) |
321 | { | 321 | { |
322 | if (root->left == n) | 322 | if (sq->first_pending == n) |
323 | root->left = NULL; | 323 | sq->first_pending = NULL; |
324 | rb_erase_init(n, &root->rb); | 324 | rb_erase_init(n, &sq->pending_tree); |
325 | --root->count; | 325 | --sq->nr_pending; |
326 | } | 326 | } |
327 | 327 | ||
328 | static void update_min_dispatch_time(struct throtl_rb_root *st) | 328 | static void update_min_dispatch_time(struct throtl_service_queue *sq) |
329 | { | 329 | { |
330 | struct throtl_grp *tg; | 330 | struct throtl_grp *tg; |
331 | 331 | ||
332 | tg = throtl_rb_first(st); | 332 | tg = throtl_rb_first(sq); |
333 | if (!tg) | 333 | if (!tg) |
334 | return; | 334 | return; |
335 | 335 | ||
336 | st->min_disptime = tg->disptime; | 336 | sq->first_pending_disptime = tg->disptime; |
337 | } | 337 | } |
338 | 338 | ||
339 | static void | 339 | static void tg_service_queue_add(struct throtl_service_queue *sq, |
340 | tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg) | 340 | struct throtl_grp *tg) |
341 | { | 341 | { |
342 | struct rb_node **node = &st->rb.rb_node; | 342 | struct rb_node **node = &sq->pending_tree.rb_node; |
343 | struct rb_node *parent = NULL; | 343 | struct rb_node *parent = NULL; |
344 | struct throtl_grp *__tg; | 344 | struct throtl_grp *__tg; |
345 | unsigned long key = tg->disptime; | 345 | unsigned long key = tg->disptime; |
@@ -358,19 +358,19 @@ tg_service_tree_add(struct throtl_rb_root *st, struct throtl_grp *tg) | |||
358 | } | 358 | } |
359 | 359 | ||
360 | if (left) | 360 | if (left) |
361 | st->left = &tg->rb_node; | 361 | sq->first_pending = &tg->rb_node; |
362 | 362 | ||
363 | rb_link_node(&tg->rb_node, parent, node); | 363 | rb_link_node(&tg->rb_node, parent, node); |
364 | rb_insert_color(&tg->rb_node, &st->rb); | 364 | rb_insert_color(&tg->rb_node, &sq->pending_tree); |
365 | } | 365 | } |
366 | 366 | ||
367 | static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) | 367 | static void __throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) |
368 | { | 368 | { |
369 | struct throtl_rb_root *st = &td->tg_service_tree; | 369 | struct throtl_service_queue *sq = &td->service_queue; |
370 | 370 | ||
371 | tg_service_tree_add(st, tg); | 371 | tg_service_queue_add(sq, tg); |
372 | throtl_mark_tg_on_rr(tg); | 372 | throtl_mark_tg_on_rr(tg); |
373 | st->count++; | 373 | sq->nr_pending++; |
374 | } | 374 | } |
375 | 375 | ||
376 | static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) | 376 | static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) |
@@ -381,7 +381,7 @@ static void throtl_enqueue_tg(struct throtl_data *td, struct throtl_grp *tg) | |||
381 | 381 | ||
382 | static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) | 382 | static void __throtl_dequeue_tg(struct throtl_data *td, struct throtl_grp *tg) |
383 | { | 383 | { |
384 | throtl_rb_erase(&tg->rb_node, &td->tg_service_tree); | 384 | throtl_rb_erase(&tg->rb_node, &td->service_queue); |
385 | throtl_clear_tg_on_rr(tg); | 385 | throtl_clear_tg_on_rr(tg); |
386 | } | 386 | } |
387 | 387 | ||
@@ -403,18 +403,18 @@ static void throtl_schedule_delayed_work(struct throtl_data *td, | |||
403 | 403 | ||
404 | static void throtl_schedule_next_dispatch(struct throtl_data *td) | 404 | static void throtl_schedule_next_dispatch(struct throtl_data *td) |
405 | { | 405 | { |
406 | struct throtl_rb_root *st = &td->tg_service_tree; | 406 | struct throtl_service_queue *sq = &td->service_queue; |
407 | 407 | ||
408 | /* any pending children left? */ | 408 | /* any pending children left? */ |
409 | if (!st->count) | 409 | if (!sq->nr_pending) |
410 | return; | 410 | return; |
411 | 411 | ||
412 | update_min_dispatch_time(st); | 412 | update_min_dispatch_time(sq); |
413 | 413 | ||
414 | if (time_before_eq(st->min_disptime, jiffies)) | 414 | if (time_before_eq(sq->first_pending_disptime, jiffies)) |
415 | throtl_schedule_delayed_work(td, 0); | 415 | throtl_schedule_delayed_work(td, 0); |
416 | else | 416 | else |
417 | throtl_schedule_delayed_work(td, (st->min_disptime - jiffies)); | 417 | throtl_schedule_delayed_work(td, sq->first_pending_disptime - jiffies); |
418 | } | 418 | } |
419 | 419 | ||
420 | static inline void | 420 | static inline void |
@@ -794,10 +794,10 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl) | |||
794 | { | 794 | { |
795 | unsigned int nr_disp = 0; | 795 | unsigned int nr_disp = 0; |
796 | struct throtl_grp *tg; | 796 | struct throtl_grp *tg; |
797 | struct throtl_rb_root *st = &td->tg_service_tree; | 797 | struct throtl_service_queue *sq = &td->service_queue; |
798 | 798 | ||
799 | while (1) { | 799 | while (1) { |
800 | tg = throtl_rb_first(st); | 800 | tg = throtl_rb_first(sq); |
801 | 801 | ||
802 | if (!tg) | 802 | if (!tg) |
803 | break; | 803 | break; |
@@ -1145,7 +1145,7 @@ void blk_throtl_drain(struct request_queue *q) | |||
1145 | __releases(q->queue_lock) __acquires(q->queue_lock) | 1145 | __releases(q->queue_lock) __acquires(q->queue_lock) |
1146 | { | 1146 | { |
1147 | struct throtl_data *td = q->td; | 1147 | struct throtl_data *td = q->td; |
1148 | struct throtl_rb_root *st = &td->tg_service_tree; | 1148 | struct throtl_service_queue *sq = &td->service_queue; |
1149 | struct throtl_grp *tg; | 1149 | struct throtl_grp *tg; |
1150 | struct bio_list bl; | 1150 | struct bio_list bl; |
1151 | struct bio *bio; | 1151 | struct bio *bio; |
@@ -1154,7 +1154,7 @@ void blk_throtl_drain(struct request_queue *q) | |||
1154 | 1154 | ||
1155 | bio_list_init(&bl); | 1155 | bio_list_init(&bl); |
1156 | 1156 | ||
1157 | while ((tg = throtl_rb_first(st))) { | 1157 | while ((tg = throtl_rb_first(sq))) { |
1158 | throtl_dequeue_tg(td, tg); | 1158 | throtl_dequeue_tg(td, tg); |
1159 | 1159 | ||
1160 | while ((bio = bio_list_peek(&tg->bio_lists[READ]))) | 1160 | while ((bio = bio_list_peek(&tg->bio_lists[READ]))) |
@@ -1179,7 +1179,7 @@ int blk_throtl_init(struct request_queue *q) | |||
1179 | if (!td) | 1179 | if (!td) |
1180 | return -ENOMEM; | 1180 | return -ENOMEM; |
1181 | 1181 | ||
1182 | td->tg_service_tree = THROTL_RB_ROOT; | 1182 | td->service_queue = THROTL_SERVICE_QUEUE_INITIALIZER; |
1183 | INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); | 1183 | INIT_DELAYED_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); |
1184 | 1184 | ||
1185 | q->td = td; | 1185 | q->td = td; |