diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 16:15:03 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 15:27:22 -0500 |
commit | ca32aefc7f2539ed88d42763330d54ee3e61769a (patch) | |
tree | 791d2ac1c11b738ce34629653090b6e971fc11b5 /block/blk-throttle.c | |
parent | 0a5a7d0e32be6643b881f0e7cd9d0d06fadde27a (diff) |
blkcg: use q and plid instead of opaque void * for blkio_group association
blkgio_group is association between a block cgroup and a queue for a
given policy. Using opaque void * for association makes things
confusing and hinders factoring of common code. Use request_queue *
and, if necessary, policy id instead.
This will help block cgroup API cleanup.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 50 |
1 files changed, 23 insertions, 27 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index c252df9169db..6613de78e364 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -252,7 +252,7 @@ static void throtl_init_add_tg_lists(struct throtl_data *td, | |||
252 | __throtl_tg_fill_dev_details(td, tg); | 252 | __throtl_tg_fill_dev_details(td, tg); |
253 | 253 | ||
254 | /* Add group onto cgroup list */ | 254 | /* Add group onto cgroup list */ |
255 | blkiocg_add_blkio_group(blkcg, &tg->blkg, (void *)td, | 255 | blkiocg_add_blkio_group(blkcg, &tg->blkg, td->queue, |
256 | tg->blkg.dev, BLKIO_POLICY_THROTL); | 256 | tg->blkg.dev, BLKIO_POLICY_THROTL); |
257 | 257 | ||
258 | tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev); | 258 | tg->bps[READ] = blkcg_get_read_bps(blkcg, tg->blkg.dev); |
@@ -288,7 +288,6 @@ static struct | |||
288 | throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) | 288 | throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) |
289 | { | 289 | { |
290 | struct throtl_grp *tg = NULL; | 290 | struct throtl_grp *tg = NULL; |
291 | void *key = td; | ||
292 | 291 | ||
293 | /* | 292 | /* |
294 | * This is the common case when there are no blkio cgroups. | 293 | * This is the common case when there are no blkio cgroups. |
@@ -297,7 +296,8 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) | |||
297 | if (blkcg == &blkio_root_cgroup) | 296 | if (blkcg == &blkio_root_cgroup) |
298 | tg = td->root_tg; | 297 | tg = td->root_tg; |
299 | else | 298 | else |
300 | tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); | 299 | tg = tg_of_blkg(blkiocg_lookup_group(blkcg, td->queue, |
300 | BLKIO_POLICY_THROTL)); | ||
301 | 301 | ||
302 | __throtl_tg_fill_dev_details(td, tg); | 302 | __throtl_tg_fill_dev_details(td, tg); |
303 | return tg; | 303 | return tg; |
@@ -1012,22 +1012,22 @@ static bool throtl_release_tgs(struct throtl_data *td, bool release_root) | |||
1012 | * no new IO will come in this group. So get rid of this group as soon as | 1012 | * no new IO will come in this group. So get rid of this group as soon as |
1013 | * any pending IO in the group is finished. | 1013 | * any pending IO in the group is finished. |
1014 | * | 1014 | * |
1015 | * This function is called under rcu_read_lock(). key is the rcu protected | 1015 | * This function is called under rcu_read_lock(). @q is the rcu protected |
1016 | * pointer. That means "key" is a valid throtl_data pointer as long as we are | 1016 | * pointer. That means @q is a valid request_queue pointer as long as we |
1017 | * rcu read lock. | 1017 | * are rcu read lock. |
1018 | * | 1018 | * |
1019 | * "key" was fetched from blkio_group under blkio_cgroup->lock. That means | 1019 | * @q was fetched from blkio_group under blkio_cgroup->lock. That means |
1020 | * it should not be NULL as even if queue was going away, cgroup deltion | 1020 | * it should not be NULL as even if queue was going away, cgroup deltion |
1021 | * path got to it first. | 1021 | * path got to it first. |
1022 | */ | 1022 | */ |
1023 | void throtl_unlink_blkio_group(void *key, struct blkio_group *blkg) | 1023 | void throtl_unlink_blkio_group(struct request_queue *q, |
1024 | struct blkio_group *blkg) | ||
1024 | { | 1025 | { |
1025 | unsigned long flags; | 1026 | unsigned long flags; |
1026 | struct throtl_data *td = key; | ||
1027 | 1027 | ||
1028 | spin_lock_irqsave(td->queue->queue_lock, flags); | 1028 | spin_lock_irqsave(q->queue_lock, flags); |
1029 | throtl_destroy_tg(td, tg_of_blkg(blkg)); | 1029 | throtl_destroy_tg(q->td, tg_of_blkg(blkg)); |
1030 | spin_unlock_irqrestore(td->queue->queue_lock, flags); | 1030 | spin_unlock_irqrestore(q->queue_lock, flags); |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | static bool throtl_clear_queue(struct request_queue *q) | 1033 | static bool throtl_clear_queue(struct request_queue *q) |
@@ -1054,52 +1054,48 @@ static void throtl_update_blkio_group_common(struct throtl_data *td, | |||
1054 | } | 1054 | } |
1055 | 1055 | ||
1056 | /* | 1056 | /* |
1057 | * For all update functions, key should be a valid pointer because these | 1057 | * For all update functions, @q should be a valid pointer because these |
1058 | * update functions are called under blkcg_lock, that means, blkg is | 1058 | * update functions are called under blkcg_lock, that means, blkg is |
1059 | * valid and in turn key is valid. queue exit path can not race because | 1059 | * valid and in turn @q is valid. queue exit path can not race because |
1060 | * of blkcg_lock | 1060 | * of blkcg_lock |
1061 | * | 1061 | * |
1062 | * Can not take queue lock in update functions as queue lock under blkcg_lock | 1062 | * Can not take queue lock in update functions as queue lock under blkcg_lock |
1063 | * is not allowed. Under other paths we take blkcg_lock under queue_lock. | 1063 | * is not allowed. Under other paths we take blkcg_lock under queue_lock. |
1064 | */ | 1064 | */ |
1065 | static void throtl_update_blkio_group_read_bps(void *key, | 1065 | static void throtl_update_blkio_group_read_bps(struct request_queue *q, |
1066 | struct blkio_group *blkg, u64 read_bps) | 1066 | struct blkio_group *blkg, u64 read_bps) |
1067 | { | 1067 | { |
1068 | struct throtl_data *td = key; | ||
1069 | struct throtl_grp *tg = tg_of_blkg(blkg); | 1068 | struct throtl_grp *tg = tg_of_blkg(blkg); |
1070 | 1069 | ||
1071 | tg->bps[READ] = read_bps; | 1070 | tg->bps[READ] = read_bps; |
1072 | throtl_update_blkio_group_common(td, tg); | 1071 | throtl_update_blkio_group_common(q->td, tg); |
1073 | } | 1072 | } |
1074 | 1073 | ||
1075 | static void throtl_update_blkio_group_write_bps(void *key, | 1074 | static void throtl_update_blkio_group_write_bps(struct request_queue *q, |
1076 | struct blkio_group *blkg, u64 write_bps) | 1075 | struct blkio_group *blkg, u64 write_bps) |
1077 | { | 1076 | { |
1078 | struct throtl_data *td = key; | ||
1079 | struct throtl_grp *tg = tg_of_blkg(blkg); | 1077 | struct throtl_grp *tg = tg_of_blkg(blkg); |
1080 | 1078 | ||
1081 | tg->bps[WRITE] = write_bps; | 1079 | tg->bps[WRITE] = write_bps; |
1082 | throtl_update_blkio_group_common(td, tg); | 1080 | throtl_update_blkio_group_common(q->td, tg); |
1083 | } | 1081 | } |
1084 | 1082 | ||
1085 | static void throtl_update_blkio_group_read_iops(void *key, | 1083 | static void throtl_update_blkio_group_read_iops(struct request_queue *q, |
1086 | struct blkio_group *blkg, unsigned int read_iops) | 1084 | struct blkio_group *blkg, unsigned int read_iops) |
1087 | { | 1085 | { |
1088 | struct throtl_data *td = key; | ||
1089 | struct throtl_grp *tg = tg_of_blkg(blkg); | 1086 | struct throtl_grp *tg = tg_of_blkg(blkg); |
1090 | 1087 | ||
1091 | tg->iops[READ] = read_iops; | 1088 | tg->iops[READ] = read_iops; |
1092 | throtl_update_blkio_group_common(td, tg); | 1089 | throtl_update_blkio_group_common(q->td, tg); |
1093 | } | 1090 | } |
1094 | 1091 | ||
1095 | static void throtl_update_blkio_group_write_iops(void *key, | 1092 | static void throtl_update_blkio_group_write_iops(struct request_queue *q, |
1096 | struct blkio_group *blkg, unsigned int write_iops) | 1093 | struct blkio_group *blkg, unsigned int write_iops) |
1097 | { | 1094 | { |
1098 | struct throtl_data *td = key; | ||
1099 | struct throtl_grp *tg = tg_of_blkg(blkg); | 1095 | struct throtl_grp *tg = tg_of_blkg(blkg); |
1100 | 1096 | ||
1101 | tg->iops[WRITE] = write_iops; | 1097 | tg->iops[WRITE] = write_iops; |
1102 | throtl_update_blkio_group_common(td, tg); | 1098 | throtl_update_blkio_group_common(q->td, tg); |
1103 | } | 1099 | } |
1104 | 1100 | ||
1105 | static void throtl_shutdown_wq(struct request_queue *q) | 1101 | static void throtl_shutdown_wq(struct request_queue *q) |
@@ -1306,7 +1302,7 @@ void blk_throtl_exit(struct request_queue *q) | |||
1306 | spin_unlock_irq(q->queue_lock); | 1302 | spin_unlock_irq(q->queue_lock); |
1307 | 1303 | ||
1308 | /* | 1304 | /* |
1309 | * Wait for tg->blkg->key accessors to exit their grace periods. | 1305 | * Wait for tg->blkg->q accessors to exit their grace periods. |
1310 | * Do this wait only if there are other undestroyed groups out | 1306 | * Do this wait only if there are other undestroyed groups out |
1311 | * there (other than root group). This can happen if cgroup deletion | 1307 | * there (other than root group). This can happen if cgroup deletion |
1312 | * path claimed the responsibility of cleaning up a group before | 1308 | * path claimed the responsibility of cleaning up a group before |