aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
committerTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
commit8a3d26151f24e2a2ffa550890144c3d54d2edb15 (patch)
treeb99f5cf8db0abc02e39e5c5107f8d4342f81effa /block/blk-cgroup.c
parent155fead9b6347ead90e0b0396cb108a6ba6126c6 (diff)
blkcg: move blkio_group_stats_cpu and friends to blk-throttle.c
blkio_group_stats_cpu is used only by blk-throtl and has no reason to be defined in blkcg core. * Move blkio_group_stats_cpu to blk-throttle.c and rename it to tg_stats_cpu. * blkg_policy_data->stats_cpu is replaced with throtl_grp->stats_cpu. prfill functions updated accordingly. * All related macros / functions are renamed so that they have tg_ prefix and the unnecessary @pol arguments are dropped. * Per-cpu stats allocation code is also moved from blk-cgroup.c to blk-throttle.c and gets simplified to only deal with BLKIO_POLICY_THROTL. percpu stat free is performed by the exit method throtl_exit_blkio_group(). * throtl_reset_group_stats() implemented for blkio_reset_group_stats_fn method so that tg->stats_cpu can be reset. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c98
1 files changed, 1 insertions, 97 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index cfdda44f4a0b..16f6ee65a593 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -30,13 +30,6 @@ static LIST_HEAD(blkio_list);
30static DEFINE_MUTEX(all_q_mutex); 30static DEFINE_MUTEX(all_q_mutex);
31static LIST_HEAD(all_q_list); 31static LIST_HEAD(all_q_list);
32 32
33/* List of groups pending per cpu stats allocation */
34static DEFINE_SPINLOCK(alloc_list_lock);
35static LIST_HEAD(alloc_list);
36
37static void blkio_stat_alloc_fn(struct work_struct *);
38static DECLARE_DELAYED_WORK(blkio_stat_alloc_work, blkio_stat_alloc_fn);
39
40struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; 33struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
41EXPORT_SYMBOL_GPL(blkio_root_cgroup); 34EXPORT_SYMBOL_GPL(blkio_root_cgroup);
42 35
@@ -63,60 +56,6 @@ struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
63} 56}
64EXPORT_SYMBOL_GPL(bio_blkio_cgroup); 57EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
65 58
66/*
67 * Worker for allocating per cpu stat for blk groups. This is scheduled on
68 * the system_nrt_wq once there are some groups on the alloc_list waiting
69 * for allocation.
70 */
71static void blkio_stat_alloc_fn(struct work_struct *work)
72{
73 static void *pcpu_stats[BLKIO_NR_POLICIES];
74 struct delayed_work *dwork = to_delayed_work(work);
75 struct blkio_group *blkg;
76 int i;
77 bool empty = false;
78
79alloc_stats:
80 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
81 if (pcpu_stats[i] != NULL)
82 continue;
83
84 pcpu_stats[i] = alloc_percpu(struct blkio_group_stats_cpu);
85
86 /* Allocation failed. Try again after some time. */
87 if (pcpu_stats[i] == NULL) {
88 queue_delayed_work(system_nrt_wq, dwork,
89 msecs_to_jiffies(10));
90 return;
91 }
92 }
93
94 spin_lock_irq(&blkio_list_lock);
95 spin_lock(&alloc_list_lock);
96
97 /* cgroup got deleted or queue exited. */
98 if (!list_empty(&alloc_list)) {
99 blkg = list_first_entry(&alloc_list, struct blkio_group,
100 alloc_node);
101 for (i = 0; i < BLKIO_NR_POLICIES; i++) {
102 struct blkg_policy_data *pd = blkg->pd[i];
103
104 if (blkio_policy[i] && pd && !pd->stats_cpu)
105 swap(pd->stats_cpu, pcpu_stats[i]);
106 }
107
108 list_del_init(&blkg->alloc_node);
109 }
110
111 empty = list_empty(&alloc_list);
112
113 spin_unlock(&alloc_list_lock);
114 spin_unlock_irq(&blkio_list_lock);
115
116 if (!empty)
117 goto alloc_stats;
118}
119
120/** 59/**
121 * blkg_free - free a blkg 60 * blkg_free - free a blkg
122 * @blkg: blkg to free 61 * @blkg: blkg to free
@@ -140,7 +79,6 @@ static void blkg_free(struct blkio_group *blkg)
140 if (pol && pol->ops.blkio_exit_group_fn) 79 if (pol && pol->ops.blkio_exit_group_fn)
141 pol->ops.blkio_exit_group_fn(blkg); 80 pol->ops.blkio_exit_group_fn(blkg);
142 81
143 free_percpu(pd->stats_cpu);
144 kfree(pd); 82 kfree(pd);
145 } 83 }
146 84
@@ -167,7 +105,6 @@ static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
167 105
168 blkg->q = q; 106 blkg->q = q;
169 INIT_LIST_HEAD(&blkg->q_node); 107 INIT_LIST_HEAD(&blkg->q_node);
170 INIT_LIST_HEAD(&blkg->alloc_node);
171 blkg->blkcg = blkcg; 108 blkg->blkcg = blkcg;
172 blkg->refcnt = 1; 109 blkg->refcnt = 1;
173 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); 110 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
@@ -245,12 +182,6 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
245 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); 182 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
246 list_add(&blkg->q_node, &q->blkg_list); 183 list_add(&blkg->q_node, &q->blkg_list);
247 spin_unlock(&blkcg->lock); 184 spin_unlock(&blkcg->lock);
248
249 spin_lock(&alloc_list_lock);
250 list_add(&blkg->alloc_node, &alloc_list);
251 /* Queue per cpu stat allocation from worker thread. */
252 queue_delayed_work(system_nrt_wq, &blkio_stat_alloc_work, 0);
253 spin_unlock(&alloc_list_lock);
254out: 185out:
255 return blkg; 186 return blkg;
256} 187}
@@ -284,10 +215,6 @@ static void blkg_destroy(struct blkio_group *blkg)
284 list_del_init(&blkg->q_node); 215 list_del_init(&blkg->q_node);
285 hlist_del_init_rcu(&blkg->blkcg_node); 216 hlist_del_init_rcu(&blkg->blkcg_node);
286 217
287 spin_lock(&alloc_list_lock);
288 list_del_init(&blkg->alloc_node);
289 spin_unlock(&alloc_list_lock);
290
291 /* 218 /*
292 * Put the reference taken at the time of creation so that when all 219 * Put the reference taken at the time of creation so that when all
293 * queues are gone, group can be destroyed. 220 * queues are gone, group can be destroyed.
@@ -319,9 +246,6 @@ void update_root_blkg_pd(struct request_queue *q, enum blkio_policy_id plid)
319 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL); 246 pd = kzalloc(sizeof(*pd) + pol->pdata_size, GFP_KERNEL);
320 WARN_ON_ONCE(!pd); 247 WARN_ON_ONCE(!pd);
321 248
322 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
323 WARN_ON_ONCE(!pd->stats_cpu);
324
325 blkg->pd[plid] = pd; 249 blkg->pd[plid] = pd;
326 pd->blkg = blkg; 250 pd->blkg = blkg;
327 pol->ops.blkio_init_group_fn(blkg); 251 pol->ops.blkio_init_group_fn(blkg);
@@ -381,23 +305,6 @@ void __blkg_release(struct blkio_group *blkg)
381} 305}
382EXPORT_SYMBOL_GPL(__blkg_release); 306EXPORT_SYMBOL_GPL(__blkg_release);
383 307
384static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
385{
386 struct blkg_policy_data *pd = blkg->pd[plid];
387 int cpu;
388
389 if (pd->stats_cpu == NULL)
390 return;
391
392 for_each_possible_cpu(cpu) {
393 struct blkio_group_stats_cpu *sc =
394 per_cpu_ptr(pd->stats_cpu, cpu);
395
396 blkg_rwstat_reset(&sc->service_bytes);
397 blkg_rwstat_reset(&sc->serviced);
398 }
399}
400
401static int 308static int
402blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) 309blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
403{ 310{
@@ -416,12 +323,9 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
416 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 323 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
417 struct blkio_policy_type *pol; 324 struct blkio_policy_type *pol;
418 325
419 list_for_each_entry(pol, &blkio_list, list) { 326 list_for_each_entry(pol, &blkio_list, list)
420 blkio_reset_stats_cpu(blkg, pol->plid);
421
422 if (pol->ops.blkio_reset_group_stats_fn) 327 if (pol->ops.blkio_reset_group_stats_fn)
423 pol->ops.blkio_reset_group_stats_fn(blkg); 328 pol->ops.blkio_reset_group_stats_fn(blkg);
424 }
425 } 329 }
426 330
427 spin_unlock_irq(&blkcg->lock); 331 spin_unlock_irq(&blkcg->lock);