aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:15:18 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:23 -0500
commit4eef3049986e8397d5003916aed8cad6567a5e02 (patch)
tree5e36431986907b9fa728a6fbeb819a2738a64c82 /block
parentc1768268f9424410761da57ea71107acae7b03cc (diff)
blkcg: move per-queue blkg list heads and counters to queue and blkg
Currently, specific policy implementations are responsible for maintaining list and number of blkgs. This duplicates code unnecessarily, and hinders factoring common code and providing blkcg API with better defined semantics. After this patch, request_queue hosts list heads and counters and blkg has list nodes for both policies. This patch only relocates the necessary fields and the next patch will actually move management code into blkcg core. Note that request_queue->blkg_list[] and ->nr_blkgs[] are hardcoded to have 2 elements. This is to avoid include dependency and will be removed by the next patch. This patch doesn't introduce any behavior change. -v2: Now unnecessary conditional on CONFIG_BLK_CGROUP_MODULE removed as pointed out by Vivek. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-cgroup.h1
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-throttle.c49
-rw-r--r--block/cfq-iosched.c47
5 files changed, 49 insertions, 54 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 91f9824be5cc..e940972ccd66 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -499,6 +499,8 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
499 499
500 spin_lock_init(&blkg->stats_lock); 500 spin_lock_init(&blkg->stats_lock);
501 rcu_assign_pointer(blkg->q, q); 501 rcu_assign_pointer(blkg->q, q);
502 INIT_LIST_HEAD(&blkg->q_node[0]);
503 INIT_LIST_HEAD(&blkg->q_node[1]);
502 blkg->blkcg = blkcg; 504 blkg->blkcg = blkcg;
503 blkg->plid = pol->plid; 505 blkg->plid = pol->plid;
504 blkg->refcnt = 1; 506 blkg->refcnt = 1;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 60e96b4be4ce..ae96f196d469 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -178,6 +178,7 @@ struct blkg_policy_data {
178struct blkio_group { 178struct blkio_group {
179 /* Pointer to the associated request_queue, RCU protected */ 179 /* Pointer to the associated request_queue, RCU protected */
180 struct request_queue __rcu *q; 180 struct request_queue __rcu *q;
181 struct list_head q_node[BLKIO_NR_POLICIES];
181 struct hlist_node blkcg_node; 182 struct hlist_node blkcg_node;
182 struct blkio_cgroup *blkcg; 183 struct blkio_cgroup *blkcg;
183 /* Store cgroup path */ 184 /* Store cgroup path */
diff --git a/block/blk-core.c b/block/blk-core.c
index c3434c6395b9..83a47fcf5946 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -547,6 +547,10 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
547 INIT_LIST_HEAD(&q->queue_head); 547 INIT_LIST_HEAD(&q->queue_head);
548 INIT_LIST_HEAD(&q->timeout_list); 548 INIT_LIST_HEAD(&q->timeout_list);
549 INIT_LIST_HEAD(&q->icq_list); 549 INIT_LIST_HEAD(&q->icq_list);
550#ifdef CONFIG_BLK_CGROUP
551 INIT_LIST_HEAD(&q->blkg_list[0]);
552 INIT_LIST_HEAD(&q->blkg_list[1]);
553#endif
550 INIT_LIST_HEAD(&q->flush_queue[0]); 554 INIT_LIST_HEAD(&q->flush_queue[0]);
551 INIT_LIST_HEAD(&q->flush_queue[1]); 555 INIT_LIST_HEAD(&q->flush_queue[1]);
552 INIT_LIST_HEAD(&q->flush_data_in_flight); 556 INIT_LIST_HEAD(&q->flush_data_in_flight);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index b2fddaf20b98..c15d38307e1d 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -41,9 +41,6 @@ struct throtl_rb_root {
41#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) 41#define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
42 42
43struct throtl_grp { 43struct throtl_grp {
44 /* List of throtl groups on the request queue*/
45 struct hlist_node tg_node;
46
47 /* active throtl group service_tree member */ 44 /* active throtl group service_tree member */
48 struct rb_node rb_node; 45 struct rb_node rb_node;
49 46
@@ -83,9 +80,6 @@ struct throtl_grp {
83 80
84struct throtl_data 81struct throtl_data
85{ 82{
86 /* List of throtl groups */
87 struct hlist_head tg_list;
88
89 /* service tree for active throtl groups */ 83 /* service tree for active throtl groups */
90 struct throtl_rb_root tg_service_tree; 84 struct throtl_rb_root tg_service_tree;
91 85
@@ -152,7 +146,6 @@ static void throtl_init_blkio_group(struct blkio_group *blkg)
152{ 146{
153 struct throtl_grp *tg = blkg_to_tg(blkg); 147 struct throtl_grp *tg = blkg_to_tg(blkg);
154 148
155 INIT_HLIST_NODE(&tg->tg_node);
156 RB_CLEAR_NODE(&tg->rb_node); 149 RB_CLEAR_NODE(&tg->rb_node);
157 bio_list_init(&tg->bio_lists[0]); 150 bio_list_init(&tg->bio_lists[0]);
158 bio_list_init(&tg->bio_lists[1]); 151 bio_list_init(&tg->bio_lists[1]);
@@ -167,11 +160,9 @@ static void throtl_init_blkio_group(struct blkio_group *blkg)
167static void throtl_link_blkio_group(struct request_queue *q, 160static void throtl_link_blkio_group(struct request_queue *q,
168 struct blkio_group *blkg) 161 struct blkio_group *blkg)
169{ 162{
170 struct throtl_data *td = q->td; 163 list_add(&blkg->q_node[BLKIO_POLICY_THROTL],
171 struct throtl_grp *tg = blkg_to_tg(blkg); 164 &q->blkg_list[BLKIO_POLICY_THROTL]);
172 165 q->nr_blkgs[BLKIO_POLICY_THROTL]++;
173 hlist_add_head(&tg->tg_node, &td->tg_list);
174 td->nr_undestroyed_grps++;
175} 166}
176 167
177static struct 168static struct
@@ -711,8 +702,8 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl)
711 702
712static void throtl_process_limit_change(struct throtl_data *td) 703static void throtl_process_limit_change(struct throtl_data *td)
713{ 704{
714 struct throtl_grp *tg; 705 struct request_queue *q = td->queue;
715 struct hlist_node *pos, *n; 706 struct blkio_group *blkg, *n;
716 707
717 if (!td->limits_changed) 708 if (!td->limits_changed)
718 return; 709 return;
@@ -721,7 +712,10 @@ static void throtl_process_limit_change(struct throtl_data *td)
721 712
722 throtl_log(td, "limits changed"); 713 throtl_log(td, "limits changed");
723 714
724 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 715 list_for_each_entry_safe(blkg, n, &q->blkg_list[BLKIO_POLICY_THROTL],
716 q_node[BLKIO_POLICY_THROTL]) {
717 struct throtl_grp *tg = blkg_to_tg(blkg);
718
725 if (!tg->limits_changed) 719 if (!tg->limits_changed)
726 continue; 720 continue;
727 721
@@ -822,26 +816,31 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
822static void 816static void
823throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) 817throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
824{ 818{
819 struct blkio_group *blkg = tg_to_blkg(tg);
820
825 /* Something wrong if we are trying to remove same group twice */ 821 /* Something wrong if we are trying to remove same group twice */
826 BUG_ON(hlist_unhashed(&tg->tg_node)); 822 WARN_ON_ONCE(list_empty(&blkg->q_node[BLKIO_POLICY_THROTL]));
827 823
828 hlist_del_init(&tg->tg_node); 824 list_del_init(&blkg->q_node[BLKIO_POLICY_THROTL]);
829 825
830 /* 826 /*
831 * Put the reference taken at the time of creation so that when all 827 * Put the reference taken at the time of creation so that when all
832 * queues are gone, group can be destroyed. 828 * queues are gone, group can be destroyed.
833 */ 829 */
834 blkg_put(tg_to_blkg(tg)); 830 blkg_put(tg_to_blkg(tg));
835 td->nr_undestroyed_grps--; 831 td->queue->nr_blkgs[BLKIO_POLICY_THROTL]--;
836} 832}
837 833
838static bool throtl_release_tgs(struct throtl_data *td, bool release_root) 834static bool throtl_release_tgs(struct throtl_data *td, bool release_root)
839{ 835{
840 struct hlist_node *pos, *n; 836 struct request_queue *q = td->queue;
841 struct throtl_grp *tg; 837 struct blkio_group *blkg, *n;
842 bool empty = true; 838 bool empty = true;
843 839
844 hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { 840 list_for_each_entry_safe(blkg, n, &q->blkg_list[BLKIO_POLICY_THROTL],
841 q_node[BLKIO_POLICY_THROTL]) {
842 struct throtl_grp *tg = blkg_to_tg(blkg);
843
845 /* skip root? */ 844 /* skip root? */
846 if (!release_root && tg == td->root_tg) 845 if (!release_root && tg == td->root_tg)
847 continue; 846 continue;
@@ -851,7 +850,7 @@ static bool throtl_release_tgs(struct throtl_data *td, bool release_root)
851 * it from cgroup list, then it will take care of destroying 850 * it from cgroup list, then it will take care of destroying
852 * cfqg also. 851 * cfqg also.
853 */ 852 */
854 if (!blkiocg_del_blkio_group(tg_to_blkg(tg))) 853 if (!blkiocg_del_blkio_group(blkg))
855 throtl_destroy_tg(td, tg); 854 throtl_destroy_tg(td, tg);
856 else 855 else
857 empty = false; 856 empty = false;
@@ -1114,7 +1113,6 @@ int blk_throtl_init(struct request_queue *q)
1114 if (!td) 1113 if (!td)
1115 return -ENOMEM; 1114 return -ENOMEM;
1116 1115
1117 INIT_HLIST_HEAD(&td->tg_list);
1118 td->tg_service_tree = THROTL_RB_ROOT; 1116 td->tg_service_tree = THROTL_RB_ROOT;
1119 td->limits_changed = false; 1117 td->limits_changed = false;
1120 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); 1118 INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work);
@@ -1144,7 +1142,7 @@ int blk_throtl_init(struct request_queue *q)
1144void blk_throtl_exit(struct request_queue *q) 1142void blk_throtl_exit(struct request_queue *q)
1145{ 1143{
1146 struct throtl_data *td = q->td; 1144 struct throtl_data *td = q->td;
1147 bool wait = false; 1145 bool wait;
1148 1146
1149 BUG_ON(!td); 1147 BUG_ON(!td);
1150 1148
@@ -1154,8 +1152,7 @@ void blk_throtl_exit(struct request_queue *q)
1154 throtl_release_tgs(td, true); 1152 throtl_release_tgs(td, true);
1155 1153
1156 /* If there are other groups */ 1154 /* If there are other groups */
1157 if (td->nr_undestroyed_grps > 0) 1155 wait = q->nr_blkgs[BLKIO_POLICY_THROTL];
1158 wait = true;
1159 1156
1160 spin_unlock_irq(q->queue_lock); 1157 spin_unlock_irq(q->queue_lock);
1161 1158
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 11dd9d7f2edb..e846803280a6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -208,9 +208,7 @@ struct cfq_group {
208 unsigned long saved_workload_slice; 208 unsigned long saved_workload_slice;
209 enum wl_type_t saved_workload; 209 enum wl_type_t saved_workload;
210 enum wl_prio_t saved_serving_prio; 210 enum wl_prio_t saved_serving_prio;
211#ifdef CONFIG_CFQ_GROUP_IOSCHED 211
212 struct hlist_node cfqd_node;
213#endif
214 /* number of requests that are on the dispatch list or inside driver */ 212 /* number of requests that are on the dispatch list or inside driver */
215 int dispatched; 213 int dispatched;
216 struct cfq_ttime ttime; 214 struct cfq_ttime ttime;
@@ -302,12 +300,6 @@ struct cfq_data {
302 struct cfq_queue oom_cfqq; 300 struct cfq_queue oom_cfqq;
303 301
304 unsigned long last_delayed_sync; 302 unsigned long last_delayed_sync;
305
306 /* List of cfq groups being managed on this device*/
307 struct hlist_head cfqg_list;
308
309 /* Number of groups which are on blkcg->blkg_list */
310 unsigned int nr_blkcg_linked_grps;
311}; 303};
312 304
313static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) 305static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
@@ -1056,13 +1048,9 @@ static void cfq_update_blkio_group_weight(struct request_queue *q,
1056static void cfq_link_blkio_group(struct request_queue *q, 1048static void cfq_link_blkio_group(struct request_queue *q,
1057 struct blkio_group *blkg) 1049 struct blkio_group *blkg)
1058{ 1050{
1059 struct cfq_data *cfqd = q->elevator->elevator_data; 1051 list_add(&blkg->q_node[BLKIO_POLICY_PROP],
1060 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1052 &q->blkg_list[BLKIO_POLICY_PROP]);
1061 1053 q->nr_blkgs[BLKIO_POLICY_PROP]++;
1062 cfqd->nr_blkcg_linked_grps++;
1063
1064 /* Add group on cfqd list */
1065 hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1066} 1054}
1067 1055
1068static void cfq_init_blkio_group(struct blkio_group *blkg) 1056static void cfq_init_blkio_group(struct blkio_group *blkg)
@@ -1110,13 +1098,15 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1110 1098
1111static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg) 1099static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1112{ 1100{
1101 struct blkio_group *blkg = cfqg_to_blkg(cfqg);
1102
1113 /* Something wrong if we are trying to remove same group twice */ 1103 /* Something wrong if we are trying to remove same group twice */
1114 BUG_ON(hlist_unhashed(&cfqg->cfqd_node)); 1104 BUG_ON(list_empty(&blkg->q_node[BLKIO_POLICY_PROP]));
1115 1105
1116 hlist_del_init(&cfqg->cfqd_node); 1106 list_del_init(&blkg->q_node[BLKIO_POLICY_PROP]);
1117 1107
1118 BUG_ON(cfqd->nr_blkcg_linked_grps <= 0); 1108 BUG_ON(cfqd->queue->nr_blkgs[BLKIO_POLICY_PROP] <= 0);
1119 cfqd->nr_blkcg_linked_grps--; 1109 cfqd->queue->nr_blkgs[BLKIO_POLICY_PROP]--;
1120 1110
1121 /* 1111 /*
1122 * Put the reference taken at the time of creation so that when all 1112 * Put the reference taken at the time of creation so that when all
@@ -1127,18 +1117,19 @@ static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1127 1117
1128static bool cfq_release_cfq_groups(struct cfq_data *cfqd) 1118static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
1129{ 1119{
1130 struct hlist_node *pos, *n; 1120 struct request_queue *q = cfqd->queue;
1131 struct cfq_group *cfqg; 1121 struct blkio_group *blkg, *n;
1132 bool empty = true; 1122 bool empty = true;
1133 1123
1134 hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) { 1124 list_for_each_entry_safe(blkg, n, &q->blkg_list[BLKIO_POLICY_PROP],
1125 q_node[BLKIO_POLICY_PROP]) {
1135 /* 1126 /*
1136 * If cgroup removal path got to blk_group first and removed 1127 * If cgroup removal path got to blk_group first and removed
1137 * it from cgroup list, then it will take care of destroying 1128 * it from cgroup list, then it will take care of destroying
1138 * cfqg also. 1129 * cfqg also.
1139 */ 1130 */
1140 if (!cfq_blkiocg_del_blkio_group(cfqg_to_blkg(cfqg))) 1131 if (!cfq_blkiocg_del_blkio_group(blkg))
1141 cfq_destroy_cfqg(cfqd, cfqg); 1132 cfq_destroy_cfqg(cfqd, blkg_to_cfqg(blkg));
1142 else 1133 else
1143 empty = false; 1134 empty = false;
1144 } 1135 }
@@ -3558,13 +3549,13 @@ static void cfq_exit_queue(struct elevator_queue *e)
3558 cfq_put_async_queues(cfqd); 3549 cfq_put_async_queues(cfqd);
3559 cfq_release_cfq_groups(cfqd); 3550 cfq_release_cfq_groups(cfqd);
3560 3551
3552#ifdef CONFIG_BLK_CGROUP
3561 /* 3553 /*
3562 * If there are groups which we could not unlink from blkcg list, 3554 * If there are groups which we could not unlink from blkcg list,
3563 * wait for a rcu period for them to be freed. 3555 * wait for a rcu period for them to be freed.
3564 */ 3556 */
3565 if (cfqd->nr_blkcg_linked_grps) 3557 wait = q->nr_blkgs[BLKIO_POLICY_PROP];
3566 wait = true; 3558#endif
3567
3568 spin_unlock_irq(q->queue_lock); 3559 spin_unlock_irq(q->queue_lock);
3569 3560
3570 cfq_shutdown_timer_wq(cfqd); 3561 cfq_shutdown_timer_wq(cfqd);