aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-14 16:52:33 -0400
committerTejun Heo <tj@kernel.org>2013-05-14 16:52:33 -0400
commit0049af73bb4b74d1407db59caefc5fe057ee434a (patch)
treee903a60168c0f41d3613028bf711db0c9d4183ab /block/blk-throttle.c
parente2d57e60195a65e2f161ac1229ec9c91935e0240 (diff)
blk-throttle: reorganize throtl_service_queue passed around as argument
throtl_service_queue will be the building block of hierarchy support and will form a tree. This patch updates its usages as arguments to reduce confusion. * When a service queue is used as the parent role - the host of the rbtree - use @parent_sq instead of @sq. * For functions taking both @tg and @parent_sq, reorder them so that the order is (@tg, @parent_sq) not the other way around. This makes the code follow the usual convention of specifying the primary target of the operation as the first argument. This patch doesn't make any functional differences. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c100
1 files changed, 51 insertions, 49 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 9660ec8d0375..ebaaaa9f57d6 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -284,17 +284,18 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td,
284 return tg; 284 return tg;
285} 285}
286 286
287static struct throtl_grp *throtl_rb_first(struct throtl_service_queue *sq) 287static struct throtl_grp *
288throtl_rb_first(struct throtl_service_queue *parent_sq)
288{ 289{
289 /* Service tree is empty */ 290 /* Service tree is empty */
290 if (!sq->nr_pending) 291 if (!parent_sq->nr_pending)
291 return NULL; 292 return NULL;
292 293
293 if (!sq->first_pending) 294 if (!parent_sq->first_pending)
294 sq->first_pending = rb_first(&sq->pending_tree); 295 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
295 296
296 if (sq->first_pending) 297 if (parent_sq->first_pending)
297 return rb_entry_tg(sq->first_pending); 298 return rb_entry_tg(parent_sq->first_pending);
298 299
299 return NULL; 300 return NULL;
300} 301}
@@ -305,29 +306,30 @@ static void rb_erase_init(struct rb_node *n, struct rb_root *root)
305 RB_CLEAR_NODE(n); 306 RB_CLEAR_NODE(n);
306} 307}
307 308
308static void throtl_rb_erase(struct rb_node *n, struct throtl_service_queue *sq) 309static void throtl_rb_erase(struct rb_node *n,
310 struct throtl_service_queue *parent_sq)
309{ 311{
310 if (sq->first_pending == n) 312 if (parent_sq->first_pending == n)
311 sq->first_pending = NULL; 313 parent_sq->first_pending = NULL;
312 rb_erase_init(n, &sq->pending_tree); 314 rb_erase_init(n, &parent_sq->pending_tree);
313 --sq->nr_pending; 315 --parent_sq->nr_pending;
314} 316}
315 317
316static void update_min_dispatch_time(struct throtl_service_queue *sq) 318static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
317{ 319{
318 struct throtl_grp *tg; 320 struct throtl_grp *tg;
319 321
320 tg = throtl_rb_first(sq); 322 tg = throtl_rb_first(parent_sq);
321 if (!tg) 323 if (!tg)
322 return; 324 return;
323 325
324 sq->first_pending_disptime = tg->disptime; 326 parent_sq->first_pending_disptime = tg->disptime;
325} 327}
326 328
327static void tg_service_queue_add(struct throtl_service_queue *sq, 329static void tg_service_queue_add(struct throtl_grp *tg,
328 struct throtl_grp *tg) 330 struct throtl_service_queue *parent_sq)
329{ 331{
330 struct rb_node **node = &sq->pending_tree.rb_node; 332 struct rb_node **node = &parent_sq->pending_tree.rb_node;
331 struct rb_node *parent = NULL; 333 struct rb_node *parent = NULL;
332 struct throtl_grp *__tg; 334 struct throtl_grp *__tg;
333 unsigned long key = tg->disptime; 335 unsigned long key = tg->disptime;
@@ -346,39 +348,39 @@ static void tg_service_queue_add(struct throtl_service_queue *sq,
346 } 348 }
347 349
348 if (left) 350 if (left)
349 sq->first_pending = &tg->rb_node; 351 parent_sq->first_pending = &tg->rb_node;
350 352
351 rb_link_node(&tg->rb_node, parent, node); 353 rb_link_node(&tg->rb_node, parent, node);
352 rb_insert_color(&tg->rb_node, &sq->pending_tree); 354 rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
353} 355}
354 356
355static void __throtl_enqueue_tg(struct throtl_service_queue *sq, 357static void __throtl_enqueue_tg(struct throtl_grp *tg,
356 struct throtl_grp *tg) 358 struct throtl_service_queue *parent_sq)
357{ 359{
358 tg_service_queue_add(sq, tg); 360 tg_service_queue_add(tg, parent_sq);
359 tg->flags |= THROTL_TG_PENDING; 361 tg->flags |= THROTL_TG_PENDING;
360 sq->nr_pending++; 362 parent_sq->nr_pending++;
361} 363}
362 364
363static void throtl_enqueue_tg(struct throtl_service_queue *sq, 365static void throtl_enqueue_tg(struct throtl_grp *tg,
364 struct throtl_grp *tg) 366 struct throtl_service_queue *parent_sq)
365{ 367{
366 if (!(tg->flags & THROTL_TG_PENDING)) 368 if (!(tg->flags & THROTL_TG_PENDING))
367 __throtl_enqueue_tg(sq, tg); 369 __throtl_enqueue_tg(tg, parent_sq);
368} 370}
369 371
370static void __throtl_dequeue_tg(struct throtl_service_queue *sq, 372static void __throtl_dequeue_tg(struct throtl_grp *tg,
371 struct throtl_grp *tg) 373 struct throtl_service_queue *parent_sq)
372{ 374{
373 throtl_rb_erase(&tg->rb_node, sq); 375 throtl_rb_erase(&tg->rb_node, parent_sq);
374 tg->flags &= ~THROTL_TG_PENDING; 376 tg->flags &= ~THROTL_TG_PENDING;
375} 377}
376 378
377static void throtl_dequeue_tg(struct throtl_service_queue *sq, 379static void throtl_dequeue_tg(struct throtl_grp *tg,
378 struct throtl_grp *tg) 380 struct throtl_service_queue *parent_sq)
379{ 381{
380 if (tg->flags & THROTL_TG_PENDING) 382 if (tg->flags & THROTL_TG_PENDING)
381 __throtl_dequeue_tg(sq, tg); 383 __throtl_dequeue_tg(tg, parent_sq);
382} 384}
383 385
384/* Call with queue lock held */ 386/* Call with queue lock held */
@@ -691,8 +693,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
691 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw); 693 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
692} 694}
693 695
694static void throtl_add_bio_tg(struct throtl_service_queue *sq, 696static void throtl_add_bio_tg(struct bio *bio, struct throtl_grp *tg,
695 struct throtl_grp *tg, struct bio *bio) 697 struct throtl_service_queue *parent_sq)
696{ 698{
697 bool rw = bio_data_dir(bio); 699 bool rw = bio_data_dir(bio);
698 700
@@ -701,11 +703,11 @@ static void throtl_add_bio_tg(struct throtl_service_queue *sq,
701 blkg_get(tg_to_blkg(tg)); 703 blkg_get(tg_to_blkg(tg));
702 tg->nr_queued[rw]++; 704 tg->nr_queued[rw]++;
703 tg->td->nr_queued[rw]++; 705 tg->td->nr_queued[rw]++;
704 throtl_enqueue_tg(sq, tg); 706 throtl_enqueue_tg(tg, parent_sq);
705} 707}
706 708
707static void tg_update_disptime(struct throtl_service_queue *sq, 709static void tg_update_disptime(struct throtl_grp *tg,
708 struct throtl_grp *tg) 710 struct throtl_service_queue *parent_sq)
709{ 711{
710 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; 712 unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
711 struct bio *bio; 713 struct bio *bio;
@@ -720,9 +722,9 @@ static void tg_update_disptime(struct throtl_service_queue *sq,
720 disptime = jiffies + min_wait; 722 disptime = jiffies + min_wait;
721 723
722 /* Update dispatch time */ 724 /* Update dispatch time */
723 throtl_dequeue_tg(sq, tg); 725 throtl_dequeue_tg(tg, parent_sq);
724 tg->disptime = disptime; 726 tg->disptime = disptime;
725 throtl_enqueue_tg(sq, tg); 727 throtl_enqueue_tg(tg, parent_sq);
726} 728}
727 729
728static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw, 730static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw,
@@ -777,14 +779,14 @@ static int throtl_dispatch_tg(struct throtl_grp *tg, struct bio_list *bl)
777 return nr_reads + nr_writes; 779 return nr_reads + nr_writes;
778} 780}
779 781
780static int throtl_select_dispatch(struct throtl_service_queue *sq, 782static int throtl_select_dispatch(struct throtl_service_queue *parent_sq,
781 struct bio_list *bl) 783 struct bio_list *bl)
782{ 784{
783 unsigned int nr_disp = 0; 785 unsigned int nr_disp = 0;
784 struct throtl_grp *tg; 786 struct throtl_grp *tg;
785 787
786 while (1) { 788 while (1) {
787 tg = throtl_rb_first(sq); 789 tg = throtl_rb_first(parent_sq);
788 790
789 if (!tg) 791 if (!tg)
790 break; 792 break;
@@ -792,12 +794,12 @@ static int throtl_select_dispatch(struct throtl_service_queue *sq,
792 if (time_before(jiffies, tg->disptime)) 794 if (time_before(jiffies, tg->disptime))
793 break; 795 break;
794 796
795 throtl_dequeue_tg(sq, tg); 797 throtl_dequeue_tg(tg, parent_sq);
796 798
797 nr_disp += throtl_dispatch_tg(tg, bl); 799 nr_disp += throtl_dispatch_tg(tg, bl);
798 800
799 if (tg->nr_queued[0] || tg->nr_queued[1]) 801 if (tg->nr_queued[0] || tg->nr_queued[1])
800 tg_update_disptime(sq, tg); 802 tg_update_disptime(tg, parent_sq);
801 803
802 if (nr_disp >= throtl_quantum) 804 if (nr_disp >= throtl_quantum)
803 break; 805 break;
@@ -952,7 +954,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
952 throtl_start_new_slice(tg, 1); 954 throtl_start_new_slice(tg, 1);
953 955
954 if (tg->flags & THROTL_TG_PENDING) { 956 if (tg->flags & THROTL_TG_PENDING) {
955 tg_update_disptime(&td->service_queue, tg); 957 tg_update_disptime(tg, &td->service_queue);
956 throtl_schedule_next_dispatch(td); 958 throtl_schedule_next_dispatch(td);
957 } 959 }
958 960
@@ -1106,11 +1108,11 @@ queue_bio:
1106 tg->nr_queued[READ], tg->nr_queued[WRITE]); 1108 tg->nr_queued[READ], tg->nr_queued[WRITE]);
1107 1109
1108 bio_associate_current(bio); 1110 bio_associate_current(bio);
1109 throtl_add_bio_tg(&q->td->service_queue, tg, bio); 1111 throtl_add_bio_tg(bio, tg, &q->td->service_queue);
1110 throttled = true; 1112 throttled = true;
1111 1113
1112 if (update_disptime) { 1114 if (update_disptime) {
1113 tg_update_disptime(&td->service_queue, tg); 1115 tg_update_disptime(tg, &td->service_queue);
1114 throtl_schedule_next_dispatch(td); 1116 throtl_schedule_next_dispatch(td);
1115 } 1117 }
1116 1118
@@ -1132,7 +1134,7 @@ void blk_throtl_drain(struct request_queue *q)
1132 __releases(q->queue_lock) __acquires(q->queue_lock) 1134 __releases(q->queue_lock) __acquires(q->queue_lock)
1133{ 1135{
1134 struct throtl_data *td = q->td; 1136 struct throtl_data *td = q->td;
1135 struct throtl_service_queue *sq = &td->service_queue; 1137 struct throtl_service_queue *parent_sq = &td->service_queue;
1136 struct throtl_grp *tg; 1138 struct throtl_grp *tg;
1137 struct bio_list bl; 1139 struct bio_list bl;
1138 struct bio *bio; 1140 struct bio *bio;
@@ -1141,8 +1143,8 @@ void blk_throtl_drain(struct request_queue *q)
1141 1143
1142 bio_list_init(&bl); 1144 bio_list_init(&bl);
1143 1145
1144 while ((tg = throtl_rb_first(sq))) { 1146 while ((tg = throtl_rb_first(parent_sq))) {
1145 throtl_dequeue_tg(sq, tg); 1147 throtl_dequeue_tg(tg, parent_sq);
1146 1148
1147 while ((bio = bio_list_peek(&tg->bio_lists[READ]))) 1149 while ((bio = bio_list_peek(&tg->bio_lists[READ])))
1148 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl); 1150 tg_dispatch_one_bio(tg, bio_data_dir(bio), &bl);