diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 16:15:09 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 15:27:22 -0500 |
commit | 7a4dd281ec66224f802093962d1d903d86b09560 (patch) | |
tree | c38c5c40464d4d7eb429cc14348a435176832ef2 | |
parent | 4bfd482e73b30284cb21e10834ce729fa81aa256 (diff) |
blkcg: kill the mind-bending blkg->dev
blkg->dev is dev_t recording the device number of the block device for
the associated request_queue. It is used to identify the associated
block device when printing out configuration or stats.
This is redundant to begin with. A blkg is an association between a
cgroup and a request_queue and it of course is possible to reach
request_queue from blkg and synchronization conventions are in place
for safe q dereferencing, so this shouldn't be necessary from the
beginning. Furthermore, it's initialized by sscanf()ing the device
name of backing_dev_info. The mind boggles.
Anyways, if blkg is visible under rcu lock, we *know* that the
associated request_queue hasn't gone away yet and its bdi is
registered and alive - blkg can't be created for request_queue which
hasn't been fully initialized and it can't go away before blkg is
removed.
Let stat and conf read functions get device name from
blkg->q->backing_dev_info.dev and pass it down to printing functions
and remove blkg->dev.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/blk-cgroup.c | 86 | ||||
-rw-r--r-- | block/blk-cgroup.h | 2 | ||||
-rw-r--r-- | block/blk-throttle.c | 51 | ||||
-rw-r--r-- | block/cfq-iosched.c | 21 |
4 files changed, 47 insertions, 113 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index adf61c99258c..8742af3be84b 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -662,10 +662,10 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |||
662 | return 0; | 662 | return 0; |
663 | } | 663 | } |
664 | 664 | ||
665 | static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str, | 665 | static void blkio_get_key_name(enum stat_sub_type type, const char *dname, |
666 | int chars_left, bool diskname_only) | 666 | char *str, int chars_left, bool diskname_only) |
667 | { | 667 | { |
668 | snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev)); | 668 | snprintf(str, chars_left, "%s", dname); |
669 | chars_left -= strlen(str); | 669 | chars_left -= strlen(str); |
670 | if (chars_left <= 0) { | 670 | if (chars_left <= 0) { |
671 | printk(KERN_WARNING | 671 | printk(KERN_WARNING |
@@ -696,9 +696,9 @@ static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str, | |||
696 | } | 696 | } |
697 | 697 | ||
698 | static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val, | 698 | static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val, |
699 | struct cgroup_map_cb *cb, dev_t dev) | 699 | struct cgroup_map_cb *cb, const char *dname) |
700 | { | 700 | { |
701 | blkio_get_key_name(0, dev, str, chars_left, true); | 701 | blkio_get_key_name(0, dname, str, chars_left, true); |
702 | cb->fill(cb, str, val); | 702 | cb->fill(cb, str, val); |
703 | return val; | 703 | return val; |
704 | } | 704 | } |
@@ -730,7 +730,8 @@ static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, | |||
730 | } | 730 | } |
731 | 731 | ||
732 | static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, | 732 | static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, |
733 | struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type) | 733 | struct cgroup_map_cb *cb, const char *dname, |
734 | enum stat_type_cpu type) | ||
734 | { | 735 | { |
735 | uint64_t disk_total, val; | 736 | uint64_t disk_total, val; |
736 | char key_str[MAX_KEY_LEN]; | 737 | char key_str[MAX_KEY_LEN]; |
@@ -738,12 +739,14 @@ static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, | |||
738 | 739 | ||
739 | if (type == BLKIO_STAT_CPU_SECTORS) { | 740 | if (type == BLKIO_STAT_CPU_SECTORS) { |
740 | val = blkio_read_stat_cpu(blkg, type, 0); | 741 | val = blkio_read_stat_cpu(blkg, type, 0); |
741 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev); | 742 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, |
743 | dname); | ||
742 | } | 744 | } |
743 | 745 | ||
744 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; | 746 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; |
745 | sub_type++) { | 747 | sub_type++) { |
746 | blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false); | 748 | blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, |
749 | false); | ||
747 | val = blkio_read_stat_cpu(blkg, type, sub_type); | 750 | val = blkio_read_stat_cpu(blkg, type, sub_type); |
748 | cb->fill(cb, key_str, val); | 751 | cb->fill(cb, key_str, val); |
749 | } | 752 | } |
@@ -751,14 +754,16 @@ static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, | |||
751 | disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) + | 754 | disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) + |
752 | blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE); | 755 | blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE); |
753 | 756 | ||
754 | blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false); | 757 | blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, |
758 | false); | ||
755 | cb->fill(cb, key_str, disk_total); | 759 | cb->fill(cb, key_str, disk_total); |
756 | return disk_total; | 760 | return disk_total; |
757 | } | 761 | } |
758 | 762 | ||
759 | /* This should be called with blkg->stats_lock held */ | 763 | /* This should be called with blkg->stats_lock held */ |
760 | static uint64_t blkio_get_stat(struct blkio_group *blkg, | 764 | static uint64_t blkio_get_stat(struct blkio_group *blkg, |
761 | struct cgroup_map_cb *cb, dev_t dev, enum stat_type type) | 765 | struct cgroup_map_cb *cb, const char *dname, |
766 | enum stat_type type) | ||
762 | { | 767 | { |
763 | uint64_t disk_total; | 768 | uint64_t disk_total; |
764 | char key_str[MAX_KEY_LEN]; | 769 | char key_str[MAX_KEY_LEN]; |
@@ -766,11 +771,11 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, | |||
766 | 771 | ||
767 | if (type == BLKIO_STAT_TIME) | 772 | if (type == BLKIO_STAT_TIME) |
768 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 773 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
769 | blkg->stats.time, cb, dev); | 774 | blkg->stats.time, cb, dname); |
770 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 775 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
771 | if (type == BLKIO_STAT_UNACCOUNTED_TIME) | 776 | if (type == BLKIO_STAT_UNACCOUNTED_TIME) |
772 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 777 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
773 | blkg->stats.unaccounted_time, cb, dev); | 778 | blkg->stats.unaccounted_time, cb, dname); |
774 | if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { | 779 | if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { |
775 | uint64_t sum = blkg->stats.avg_queue_size_sum; | 780 | uint64_t sum = blkg->stats.avg_queue_size_sum; |
776 | uint64_t samples = blkg->stats.avg_queue_size_samples; | 781 | uint64_t samples = blkg->stats.avg_queue_size_samples; |
@@ -778,30 +783,33 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, | |||
778 | do_div(sum, samples); | 783 | do_div(sum, samples); |
779 | else | 784 | else |
780 | sum = 0; | 785 | sum = 0; |
781 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev); | 786 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
787 | sum, cb, dname); | ||
782 | } | 788 | } |
783 | if (type == BLKIO_STAT_GROUP_WAIT_TIME) | 789 | if (type == BLKIO_STAT_GROUP_WAIT_TIME) |
784 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 790 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
785 | blkg->stats.group_wait_time, cb, dev); | 791 | blkg->stats.group_wait_time, cb, dname); |
786 | if (type == BLKIO_STAT_IDLE_TIME) | 792 | if (type == BLKIO_STAT_IDLE_TIME) |
787 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 793 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
788 | blkg->stats.idle_time, cb, dev); | 794 | blkg->stats.idle_time, cb, dname); |
789 | if (type == BLKIO_STAT_EMPTY_TIME) | 795 | if (type == BLKIO_STAT_EMPTY_TIME) |
790 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 796 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
791 | blkg->stats.empty_time, cb, dev); | 797 | blkg->stats.empty_time, cb, dname); |
792 | if (type == BLKIO_STAT_DEQUEUE) | 798 | if (type == BLKIO_STAT_DEQUEUE) |
793 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, | 799 | return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, |
794 | blkg->stats.dequeue, cb, dev); | 800 | blkg->stats.dequeue, cb, dname); |
795 | #endif | 801 | #endif |
796 | 802 | ||
797 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; | 803 | for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; |
798 | sub_type++) { | 804 | sub_type++) { |
799 | blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false); | 805 | blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN, |
806 | false); | ||
800 | cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]); | 807 | cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]); |
801 | } | 808 | } |
802 | disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] + | 809 | disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] + |
803 | blkg->stats.stat_arr[type][BLKIO_STAT_WRITE]; | 810 | blkg->stats.stat_arr[type][BLKIO_STAT_WRITE]; |
804 | blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false); | 811 | blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN, |
812 | false); | ||
805 | cb->fill(cb, key_str, disk_total); | 813 | cb->fill(cb, key_str, disk_total); |
806 | return disk_total; | 814 | return disk_total; |
807 | } | 815 | } |
@@ -946,14 +954,15 @@ static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, | |||
946 | static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, | 954 | static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, |
947 | struct seq_file *m) | 955 | struct seq_file *m) |
948 | { | 956 | { |
957 | const char *dname = dev_name(blkg->q->backing_dev_info.dev); | ||
949 | int fileid = BLKIOFILE_ATTR(cft->private); | 958 | int fileid = BLKIOFILE_ATTR(cft->private); |
950 | int rw = WRITE; | 959 | int rw = WRITE; |
951 | 960 | ||
952 | switch (blkg->plid) { | 961 | switch (blkg->plid) { |
953 | case BLKIO_POLICY_PROP: | 962 | case BLKIO_POLICY_PROP: |
954 | if (blkg->conf.weight) | 963 | if (blkg->conf.weight) |
955 | seq_printf(m, "%u:%u\t%u\n", MAJOR(blkg->dev), | 964 | seq_printf(m, "%s\t%u\n", |
956 | MINOR(blkg->dev), blkg->conf.weight); | 965 | dname, blkg->conf.weight); |
957 | break; | 966 | break; |
958 | case BLKIO_POLICY_THROTL: | 967 | case BLKIO_POLICY_THROTL: |
959 | switch (fileid) { | 968 | switch (fileid) { |
@@ -961,19 +970,15 @@ static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg, | |||
961 | rw = READ; | 970 | rw = READ; |
962 | case BLKIO_THROTL_write_bps_device: | 971 | case BLKIO_THROTL_write_bps_device: |
963 | if (blkg->conf.bps[rw]) | 972 | if (blkg->conf.bps[rw]) |
964 | seq_printf(m, "%u:%u\t%llu\n", | 973 | seq_printf(m, "%s\t%llu\n", |
965 | MAJOR(blkg->dev), | 974 | dname, blkg->conf.bps[rw]); |
966 | MINOR(blkg->dev), | ||
967 | blkg->conf.bps[rw]); | ||
968 | break; | 975 | break; |
969 | case BLKIO_THROTL_read_iops_device: | 976 | case BLKIO_THROTL_read_iops_device: |
970 | rw = READ; | 977 | rw = READ; |
971 | case BLKIO_THROTL_write_iops_device: | 978 | case BLKIO_THROTL_write_iops_device: |
972 | if (blkg->conf.iops[rw]) | 979 | if (blkg->conf.iops[rw]) |
973 | seq_printf(m, "%u:%u\t%u\n", | 980 | seq_printf(m, "%s\t%u\n", |
974 | MAJOR(blkg->dev), | 981 | dname, blkg->conf.iops[rw]); |
975 | MINOR(blkg->dev), | ||
976 | blkg->conf.iops[rw]); | ||
977 | break; | 982 | break; |
978 | } | 983 | } |
979 | break; | 984 | break; |
@@ -1044,18 +1049,17 @@ static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg, | |||
1044 | 1049 | ||
1045 | rcu_read_lock(); | 1050 | rcu_read_lock(); |
1046 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { | 1051 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { |
1047 | if (blkg->dev) { | 1052 | const char *dname = dev_name(blkg->q->backing_dev_info.dev); |
1048 | if (BLKIOFILE_POLICY(cft->private) != blkg->plid) | 1053 | |
1049 | continue; | 1054 | if (BLKIOFILE_POLICY(cft->private) != blkg->plid) |
1050 | if (pcpu) | 1055 | continue; |
1051 | cgroup_total += blkio_get_stat_cpu(blkg, cb, | 1056 | if (pcpu) |
1052 | blkg->dev, type); | 1057 | cgroup_total += blkio_get_stat_cpu(blkg, cb, dname, |
1053 | else { | 1058 | type); |
1054 | spin_lock_irq(&blkg->stats_lock); | 1059 | else { |
1055 | cgroup_total += blkio_get_stat(blkg, cb, | 1060 | spin_lock_irq(&blkg->stats_lock); |
1056 | blkg->dev, type); | 1061 | cgroup_total += blkio_get_stat(blkg, cb, dname, type); |
1057 | spin_unlock_irq(&blkg->stats_lock); | 1062 | spin_unlock_irq(&blkg->stats_lock); |
1058 | } | ||
1059 | } | 1063 | } |
1060 | } | 1064 | } |
1061 | if (show_total) | 1065 | if (show_total) |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 9a5c68d7cc92..7ebecf6ea8f1 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -166,8 +166,6 @@ struct blkio_group { | |||
166 | unsigned short blkcg_id; | 166 | unsigned short blkcg_id; |
167 | /* Store cgroup path */ | 167 | /* Store cgroup path */ |
168 | char path[128]; | 168 | char path[128]; |
169 | /* The device MKDEV(major, minor), this group has been created for */ | ||
170 | dev_t dev; | ||
171 | /* policy which owns this blk group */ | 169 | /* policy which owns this blk group */ |
172 | enum blkio_policy_id plid; | 170 | enum blkio_policy_id plid; |
173 | 171 | ||
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 791b10719e43..52a429397d3b 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -212,50 +212,12 @@ static struct blkio_group *throtl_alloc_blkio_group(struct request_queue *q, | |||
212 | return &tg->blkg; | 212 | return &tg->blkg; |
213 | } | 213 | } |
214 | 214 | ||
215 | static void | ||
216 | __throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg) | ||
217 | { | ||
218 | struct backing_dev_info *bdi = &td->queue->backing_dev_info; | ||
219 | unsigned int major, minor; | ||
220 | |||
221 | if (!tg || tg->blkg.dev) | ||
222 | return; | ||
223 | |||
224 | /* | ||
225 | * Fill in device details for a group which might not have been | ||
226 | * filled at group creation time as queue was being instantiated | ||
227 | * and driver had not attached a device yet | ||
228 | */ | ||
229 | if (bdi->dev && dev_name(bdi->dev)) { | ||
230 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | ||
231 | tg->blkg.dev = MKDEV(major, minor); | ||
232 | } | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * Should be called with without queue lock held. Here queue lock will be | ||
237 | * taken rarely. It will be taken only once during life time of a group | ||
238 | * if need be | ||
239 | */ | ||
240 | static void | ||
241 | throtl_tg_fill_dev_details(struct throtl_data *td, struct throtl_grp *tg) | ||
242 | { | ||
243 | if (!tg || tg->blkg.dev) | ||
244 | return; | ||
245 | |||
246 | spin_lock_irq(td->queue->queue_lock); | ||
247 | __throtl_tg_fill_dev_details(td, tg); | ||
248 | spin_unlock_irq(td->queue->queue_lock); | ||
249 | } | ||
250 | |||
251 | static void throtl_link_blkio_group(struct request_queue *q, | 215 | static void throtl_link_blkio_group(struct request_queue *q, |
252 | struct blkio_group *blkg) | 216 | struct blkio_group *blkg) |
253 | { | 217 | { |
254 | struct throtl_data *td = q->td; | 218 | struct throtl_data *td = q->td; |
255 | struct throtl_grp *tg = tg_of_blkg(blkg); | 219 | struct throtl_grp *tg = tg_of_blkg(blkg); |
256 | 220 | ||
257 | __throtl_tg_fill_dev_details(td, tg); | ||
258 | |||
259 | hlist_add_head(&tg->tg_node, &td->tg_list); | 221 | hlist_add_head(&tg->tg_node, &td->tg_list); |
260 | td->nr_undestroyed_grps++; | 222 | td->nr_undestroyed_grps++; |
261 | } | 223 | } |
@@ -263,20 +225,14 @@ static void throtl_link_blkio_group(struct request_queue *q, | |||
263 | static struct | 225 | static struct |
264 | throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) | 226 | throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) |
265 | { | 227 | { |
266 | struct throtl_grp *tg = NULL; | ||
267 | |||
268 | /* | 228 | /* |
269 | * This is the common case when there are no blkio cgroups. | 229 | * This is the common case when there are no blkio cgroups. |
270 | * Avoid lookup in this case | 230 | * Avoid lookup in this case |
271 | */ | 231 | */ |
272 | if (blkcg == &blkio_root_cgroup) | 232 | if (blkcg == &blkio_root_cgroup) |
273 | tg = td->root_tg; | 233 | return td->root_tg; |
274 | else | ||
275 | tg = tg_of_blkg(blkg_lookup(blkcg, td->queue, | ||
276 | BLKIO_POLICY_THROTL)); | ||
277 | 234 | ||
278 | __throtl_tg_fill_dev_details(td, tg); | 235 | return tg_of_blkg(blkg_lookup(blkcg, td->queue, BLKIO_POLICY_THROTL)); |
279 | return tg; | ||
280 | } | 236 | } |
281 | 237 | ||
282 | static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, | 238 | static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, |
@@ -303,7 +259,6 @@ static struct throtl_grp *throtl_lookup_create_tg(struct throtl_data *td, | |||
303 | tg = td->root_tg; | 259 | tg = td->root_tg; |
304 | } | 260 | } |
305 | 261 | ||
306 | __throtl_tg_fill_dev_details(td, tg); | ||
307 | return tg; | 262 | return tg; |
308 | } | 263 | } |
309 | 264 | ||
@@ -1090,8 +1045,6 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | |||
1090 | blkcg = task_blkio_cgroup(current); | 1045 | blkcg = task_blkio_cgroup(current); |
1091 | tg = throtl_lookup_tg(td, blkcg); | 1046 | tg = throtl_lookup_tg(td, blkcg); |
1092 | if (tg) { | 1047 | if (tg) { |
1093 | throtl_tg_fill_dev_details(td, tg); | ||
1094 | |||
1095 | if (tg_no_rule_group(tg, rw)) { | 1048 | if (tg_no_rule_group(tg, rw)) { |
1096 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, | 1049 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, |
1097 | rw, rw_is_sync(bio->bi_rw)); | 1050 | rw, rw_is_sync(bio->bi_rw)); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 08d4fdd188fa..f67d109eb974 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1052,20 +1052,7 @@ static void cfq_link_blkio_group(struct request_queue *q, | |||
1052 | struct blkio_group *blkg) | 1052 | struct blkio_group *blkg) |
1053 | { | 1053 | { |
1054 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1054 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1055 | struct backing_dev_info *bdi = &q->backing_dev_info; | ||
1056 | struct cfq_group *cfqg = cfqg_of_blkg(blkg); | 1055 | struct cfq_group *cfqg = cfqg_of_blkg(blkg); |
1057 | unsigned int major, minor; | ||
1058 | |||
1059 | /* | ||
1060 | * Add group onto cgroup list. It might happen that bdi->dev is | ||
1061 | * not initialized yet. Initialize this new group without major | ||
1062 | * and minor info and this info will be filled in once a new thread | ||
1063 | * comes for IO. | ||
1064 | */ | ||
1065 | if (bdi->dev) { | ||
1066 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | ||
1067 | blkg->dev = MKDEV(major, minor); | ||
1068 | } | ||
1069 | 1056 | ||
1070 | cfqd->nr_blkcg_linked_grps++; | 1057 | cfqd->nr_blkcg_linked_grps++; |
1071 | 1058 | ||
@@ -1104,7 +1091,6 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, | |||
1104 | struct blkio_cgroup *blkcg) | 1091 | struct blkio_cgroup *blkcg) |
1105 | { | 1092 | { |
1106 | struct request_queue *q = cfqd->queue; | 1093 | struct request_queue *q = cfqd->queue; |
1107 | struct backing_dev_info *bdi = &q->backing_dev_info; | ||
1108 | struct cfq_group *cfqg = NULL; | 1094 | struct cfq_group *cfqg = NULL; |
1109 | 1095 | ||
1110 | /* avoid lookup for the common case where there's no blkio cgroup */ | 1096 | /* avoid lookup for the common case where there's no blkio cgroup */ |
@@ -1118,13 +1104,6 @@ static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, | |||
1118 | cfqg = cfqg_of_blkg(blkg); | 1104 | cfqg = cfqg_of_blkg(blkg); |
1119 | } | 1105 | } |
1120 | 1106 | ||
1121 | if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { | ||
1122 | unsigned int major, minor; | ||
1123 | |||
1124 | sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); | ||
1125 | cfqg->blkg.dev = MKDEV(major, minor); | ||
1126 | } | ||
1127 | |||
1128 | return cfqg; | 1107 | return cfqg; |
1129 | } | 1108 | } |
1130 | 1109 | ||