diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2010-10-01 08:49:48 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-10-01 08:49:48 -0400 |
commit | 02977e4af7ed3b478c505e50491ffdf3e1314cf4 (patch) | |
tree | 5adb947a5c8567cbbff79459e9feaccf354fd81f | |
parent | 61014e96e6ed55b8db0af31574eec2a75d4e8755 (diff) |
blkio: Add root group to td->tg_list
o Currently all the dynamically allocated groups, except root grp is added
to td->tg_list. This was not a problem so far but in next patch I will
travel through td->tg_list to process any updates of limits on the group.
If root group is not in tg_list, then root group's updates are not
processed.
o It is better to root group also to tg_list instead of doing special
processing for it during limit updates.
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r-- | block/blk-throttle.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index af53f37c1b13..bc2936b80add 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -87,7 +87,7 @@ struct throtl_data | |||
87 | unsigned int nr_queued[2]; | 87 | unsigned int nr_queued[2]; |
88 | 88 | ||
89 | /* | 89 | /* |
90 | * number of total undestroyed groups (excluding root group) | 90 | * number of total undestroyed groups |
91 | */ | 91 | */ |
92 | unsigned int nr_undestroyed_grps; | 92 | unsigned int nr_undestroyed_grps; |
93 | 93 | ||
@@ -940,7 +940,17 @@ int blk_throtl_init(struct request_queue *q) | |||
940 | /* Practically unlimited BW */ | 940 | /* Practically unlimited BW */ |
941 | tg->bps[0] = tg->bps[1] = -1; | 941 | tg->bps[0] = tg->bps[1] = -1; |
942 | tg->iops[0] = tg->iops[1] = -1; | 942 | tg->iops[0] = tg->iops[1] = -1; |
943 | atomic_set(&tg->ref, 1); | 943 | |
944 | /* | ||
945 | * Set root group reference to 2. One reference will be dropped when | ||
946 | * all groups on tg_list are being deleted during queue exit. Other | ||
947 | * reference will remain there as we don't want to delete this group | ||
948 | * as it is statically allocated and gets destroyed when throtl_data | ||
949 | * goes away. | ||
950 | */ | ||
951 | atomic_set(&tg->ref, 2); | ||
952 | hlist_add_head(&tg->tg_node, &td->tg_list); | ||
953 | td->nr_undestroyed_grps++; | ||
944 | 954 | ||
945 | INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); | 955 | INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); |
946 | 956 | ||
@@ -966,10 +976,9 @@ void blk_throtl_exit(struct request_queue *q) | |||
966 | 976 | ||
967 | spin_lock_irq(q->queue_lock); | 977 | spin_lock_irq(q->queue_lock); |
968 | throtl_release_tgs(td); | 978 | throtl_release_tgs(td); |
969 | blkiocg_del_blkio_group(&td->root_tg.blkg); | ||
970 | 979 | ||
971 | /* If there are other groups */ | 980 | /* If there are other groups */ |
972 | if (td->nr_undestroyed_grps >= 1) | 981 | if (td->nr_undestroyed_grps > 0) |
973 | wait = true; | 982 | wait = true; |
974 | 983 | ||
975 | spin_unlock_irq(q->queue_lock); | 984 | spin_unlock_irq(q->queue_lock); |