aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:05 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:22 -0500
commitf51b802c17e2a21926b29911493f5e7ddf6eee87 (patch)
treec32d9ea2a61201b0c6bf59b349300af04dbc3686 /block/cfq-iosched.c
parent035d10b2fa7e5f7e9bf9465dbc39c35affd5ac32 (diff)
blkcg: use the usual get blkg path for root blkio_group
For root blkg, blk_throtl_init() was using throtl_alloc_tg() explicitly and cfq_init_queue() was manually initializing embedded cfqd->root_group, adding unnecessarily different code paths to blkg handling. Make both use the usual blkio_group get functions - throtl_get_tg() and cfq_get_cfqg() - for the root blkio_group too. Note that blk_throtl_init() callsite is pushed downwards in blk_alloc_queue_node() so that @q is sufficiently initialized for throtl_get_tg(). This simplifies root blkg handling noticeably for cfq and will allow further modularization of blkcg API. -v2: Vivek pointed out that using cfq_get_cfqg() won't work if CONFIG_CFQ_GROUP_IOSCHED is disabled. Fix it by factoring out initialization of base part of cfqg into cfq_init_cfqg_base() and alloc/init/free explicitly if !CONFIG_CFQ_GROUP_IOSCHED. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c105
1 files changed, 53 insertions, 52 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 37e2da9cbb09..1c3f41b9d5dd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -229,7 +229,7 @@ struct cfq_data {
229 struct request_queue *queue; 229 struct request_queue *queue;
230 /* Root service tree for cfq_groups */ 230 /* Root service tree for cfq_groups */
231 struct cfq_rb_root grp_service_tree; 231 struct cfq_rb_root grp_service_tree;
232 struct cfq_group root_group; 232 struct cfq_group *root_group;
233 233
234 /* 234 /*
235 * The priority currently being served 235 * The priority currently being served
@@ -1012,6 +1012,25 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
1012 cfq_blkiocg_set_start_empty_time(&cfqg->blkg); 1012 cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
1013} 1013}
1014 1014
1015/**
1016 * cfq_init_cfqg_base - initialize base part of a cfq_group
1017 * @cfqg: cfq_group to initialize
1018 *
1019 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
1020 * is enabled or not.
1021 */
1022static void cfq_init_cfqg_base(struct cfq_group *cfqg)
1023{
1024 struct cfq_rb_root *st;
1025 int i, j;
1026
1027 for_each_cfqg_st(cfqg, i, j, st)
1028 *st = CFQ_RB_ROOT;
1029 RB_CLEAR_NODE(&cfqg->rb_node);
1030
1031 cfqg->ttime.last_end_request = jiffies;
1032}
1033
1015#ifdef CONFIG_CFQ_GROUP_IOSCHED 1034#ifdef CONFIG_CFQ_GROUP_IOSCHED
1016static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) 1035static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
1017{ 1036{
@@ -1063,19 +1082,14 @@ static void cfq_init_add_cfqg_lists(struct cfq_data *cfqd,
1063 */ 1082 */
1064static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd) 1083static struct cfq_group * cfq_alloc_cfqg(struct cfq_data *cfqd)
1065{ 1084{
1066 struct cfq_group *cfqg = NULL; 1085 struct cfq_group *cfqg;
1067 int i, j, ret; 1086 int ret;
1068 struct cfq_rb_root *st;
1069 1087
1070 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); 1088 cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1071 if (!cfqg) 1089 if (!cfqg)
1072 return NULL; 1090 return NULL;
1073 1091
1074 for_each_cfqg_st(cfqg, i, j, st) 1092 cfq_init_cfqg_base(cfqg);
1075 *st = CFQ_RB_ROOT;
1076 RB_CLEAR_NODE(&cfqg->rb_node);
1077
1078 cfqg->ttime.last_end_request = jiffies;
1079 1093
1080 /* 1094 /*
1081 * Take the initial reference that will be released on destroy 1095 * Take the initial reference that will be released on destroy
@@ -1106,7 +1120,7 @@ cfq_find_cfqg(struct cfq_data *cfqd, struct blkio_cgroup *blkcg)
1106 * Avoid lookup in this case 1120 * Avoid lookup in this case
1107 */ 1121 */
1108 if (blkcg == &blkio_root_cgroup) 1122 if (blkcg == &blkio_root_cgroup)
1109 cfqg = &cfqd->root_group; 1123 cfqg = cfqd->root_group;
1110 else 1124 else
1111 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, cfqd->queue, 1125 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, cfqd->queue,
1112 BLKIO_POLICY_PROP)); 1126 BLKIO_POLICY_PROP));
@@ -1166,7 +1180,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd,
1166 } 1180 }
1167 1181
1168 if (!cfqg) 1182 if (!cfqg)
1169 cfqg = &cfqd->root_group; 1183 cfqg = cfqd->root_group;
1170 1184
1171 cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg); 1185 cfq_init_add_cfqg_lists(cfqd, cfqg, blkcg);
1172 return cfqg; 1186 return cfqg;
@@ -1182,7 +1196,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1182{ 1196{
1183 /* Currently, all async queues are mapped to root group */ 1197 /* Currently, all async queues are mapped to root group */
1184 if (!cfq_cfqq_sync(cfqq)) 1198 if (!cfq_cfqq_sync(cfqq))
1185 cfqg = &cfqq->cfqd->root_group; 1199 cfqg = cfqq->cfqd->root_group;
1186 1200
1187 cfqq->cfqg = cfqg; 1201 cfqq->cfqg = cfqg;
1188 /* cfqq reference on cfqg */ 1202 /* cfqq reference on cfqg */
@@ -1283,7 +1297,7 @@ static bool cfq_clear_queue(struct request_queue *q)
1283static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, 1297static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd,
1284 struct blkio_cgroup *blkcg) 1298 struct blkio_cgroup *blkcg)
1285{ 1299{
1286 return &cfqd->root_group; 1300 return cfqd->root_group;
1287} 1301}
1288 1302
1289static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) 1303static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
@@ -3671,9 +3685,8 @@ static void cfq_exit_queue(struct elevator_queue *e)
3671 if (wait) 3685 if (wait)
3672 synchronize_rcu(); 3686 synchronize_rcu();
3673 3687
3674#ifdef CONFIG_CFQ_GROUP_IOSCHED 3688#ifndef CONFIG_CFQ_GROUP_IOSCHED
3675 /* Free up per cpu stats for root group */ 3689 kfree(cfqd->root_group);
3676 free_percpu(cfqd->root_group.blkg.stats_cpu);
3677#endif 3690#endif
3678 kfree(cfqd); 3691 kfree(cfqd);
3679} 3692}
@@ -3681,52 +3694,40 @@ static void cfq_exit_queue(struct elevator_queue *e)
3681static int cfq_init_queue(struct request_queue *q) 3694static int cfq_init_queue(struct request_queue *q)
3682{ 3695{
3683 struct cfq_data *cfqd; 3696 struct cfq_data *cfqd;
3684 int i, j; 3697 int i;
3685 struct cfq_group *cfqg;
3686 struct cfq_rb_root *st;
3687 3698
3688 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); 3699 cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3689 if (!cfqd) 3700 if (!cfqd)
3690 return -ENOMEM; 3701 return -ENOMEM;
3691 3702
3703 cfqd->queue = q;
3704 q->elevator->elevator_data = cfqd;
3705
3692 /* Init root service tree */ 3706 /* Init root service tree */
3693 cfqd->grp_service_tree = CFQ_RB_ROOT; 3707 cfqd->grp_service_tree = CFQ_RB_ROOT;
3694 3708
3695 /* Init root group */ 3709 /* Init root group and prefer root group over other groups by default */
3696 cfqg = &cfqd->root_group;
3697 for_each_cfqg_st(cfqg, i, j, st)
3698 *st = CFQ_RB_ROOT;
3699 RB_CLEAR_NODE(&cfqg->rb_node);
3700
3701 /* Give preference to root group over other groups */
3702 cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3703
3704#ifdef CONFIG_CFQ_GROUP_IOSCHED 3710#ifdef CONFIG_CFQ_GROUP_IOSCHED
3705 /* 3711 rcu_read_lock();
3706 * Set root group reference to 2. One reference will be dropped when 3712 spin_lock_irq(q->queue_lock);
3707 * all groups on cfqd->cfqg_list are being deleted during queue exit.
3708 * Other reference will remain there as we don't want to delete this
3709 * group as it is statically allocated and gets destroyed when
3710 * throtl_data goes away.
3711 */
3712 cfqg->ref = 2;
3713 3713
3714 if (blkio_alloc_blkg_stats(&cfqg->blkg)) { 3714 cfqd->root_group = cfq_get_cfqg(cfqd, &blkio_root_cgroup);
3715 kfree(cfqg); 3715
3716 spin_unlock_irq(q->queue_lock);
3717 rcu_read_unlock();
3718#else
3719 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
3720 GFP_KERNEL, cfqd->queue->node);
3721 if (cfqd->root_group)
3722 cfq_init_cfqg_base(cfqd->root_group);
3723#endif
3724 if (!cfqd->root_group) {
3716 kfree(cfqd); 3725 kfree(cfqd);
3717 return -ENOMEM; 3726 return -ENOMEM;
3718 } 3727 }
3719 3728
3720 rcu_read_lock(); 3729 cfqd->root_group->weight = 2*BLKIO_WEIGHT_DEFAULT;
3721 3730
3722 cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3723 cfqd->queue, 0);
3724 rcu_read_unlock();
3725 cfqd->nr_blkcg_linked_grps++;
3726
3727 /* Add group on cfqd->cfqg_list */
3728 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
3729#endif
3730 /* 3731 /*
3731 * Not strictly needed (since RB_ROOT just clears the node and we 3732 * Not strictly needed (since RB_ROOT just clears the node and we
3732 * zeroed cfqd on alloc), but better be safe in case someone decides 3733 * zeroed cfqd on alloc), but better be safe in case someone decides
@@ -3738,14 +3739,14 @@ static int cfq_init_queue(struct request_queue *q)
3738 /* 3739 /*
3739 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. 3740 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3740 * Grab a permanent reference to it, so that the normal code flow 3741 * Grab a permanent reference to it, so that the normal code flow
3741 * will not attempt to free it. 3742 * will not attempt to free it. oom_cfqq is linked to root_group
3743 * but shouldn't hold a reference as it'll never be unlinked. Lose
3744 * the reference from linking right away.
3742 */ 3745 */
3743 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); 3746 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3744 cfqd->oom_cfqq.ref++; 3747 cfqd->oom_cfqq.ref++;
3745 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); 3748 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
3746 3749 cfq_put_cfqg(cfqd->root_group);
3747 cfqd->queue = q;
3748 q->elevator->elevator_data = cfqd;
3749 3750
3750 init_timer(&cfqd->idle_slice_timer); 3751 init_timer(&cfqd->idle_slice_timer);
3751 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 3752 cfqd->idle_slice_timer.function = cfq_idle_slice_timer;