aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.h
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2011-05-19 15:38:28 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-05-20 14:34:52 -0400
commit5624a4e445e2ec27582984b068d7bf7f127cee10 (patch)
tree0827c83c6e5f5fa83bd0dadc1bc395c0f0657dae /block/blk-cgroup.h
parent4843c69d496a8d2e4caab6182fe016b9a79136e0 (diff)
blk-throttle: Make dispatch stats per cpu
Currently we take blkg_stat lock for even updating the stats. So even if a group has no throttling rules (common case for root group), we end up taking blkg_lock, for updating the stats. Make dispatch stats per cpu so that these can be updated without taking blkg lock. If cpu goes offline, these stats simply disappear. No protection has been provided for that yet. Do we really need anything for that? Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-cgroup.h')
-rw-r--r--block/blk-cgroup.h27
1 files changed, 21 insertions, 6 deletions
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 63f1ef4450d7..fd730a24b491 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -36,10 +36,6 @@ enum stat_type {
36 * request completion for IOs doen by this cgroup. This may not be 36 * request completion for IOs doen by this cgroup. This may not be
37 * accurate when NCQ is turned on. */ 37 * accurate when NCQ is turned on. */
38 BLKIO_STAT_SERVICE_TIME = 0, 38 BLKIO_STAT_SERVICE_TIME = 0,
39 /* Total bytes transferred */
40 BLKIO_STAT_SERVICE_BYTES,
41 /* Total IOs serviced, post merge */
42 BLKIO_STAT_SERVICED,
43 /* Total time spent waiting in scheduler queue in ns */ 39 /* Total time spent waiting in scheduler queue in ns */
44 BLKIO_STAT_WAIT_TIME, 40 BLKIO_STAT_WAIT_TIME,
45 /* Number of IOs merged */ 41 /* Number of IOs merged */
@@ -48,7 +44,6 @@ enum stat_type {
48 BLKIO_STAT_QUEUED, 44 BLKIO_STAT_QUEUED,
49 /* All the single valued stats go below this */ 45 /* All the single valued stats go below this */
50 BLKIO_STAT_TIME, 46 BLKIO_STAT_TIME,
51 BLKIO_STAT_SECTORS,
52#ifdef CONFIG_DEBUG_BLK_CGROUP 47#ifdef CONFIG_DEBUG_BLK_CGROUP
53 /* Time not charged to this cgroup */ 48 /* Time not charged to this cgroup */
54 BLKIO_STAT_UNACCOUNTED_TIME, 49 BLKIO_STAT_UNACCOUNTED_TIME,
@@ -60,6 +55,16 @@ enum stat_type {
60#endif 55#endif
61}; 56};
62 57
58/* Per cpu stats */
59enum stat_type_cpu {
60 BLKIO_STAT_CPU_SECTORS,
61 /* Total bytes transferred */
62 BLKIO_STAT_CPU_SERVICE_BYTES,
63 /* Total IOs serviced, post merge */
64 BLKIO_STAT_CPU_SERVICED,
65 BLKIO_STAT_CPU_NR
66};
67
63enum stat_sub_type { 68enum stat_sub_type {
64 BLKIO_STAT_READ = 0, 69 BLKIO_STAT_READ = 0,
65 BLKIO_STAT_WRITE, 70 BLKIO_STAT_WRITE,
@@ -116,7 +121,6 @@ struct blkio_cgroup {
116struct blkio_group_stats { 121struct blkio_group_stats {
117 /* total disk time and nr sectors dispatched by this group */ 122 /* total disk time and nr sectors dispatched by this group */
118 uint64_t time; 123 uint64_t time;
119 uint64_t sectors;
120 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; 124 uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL];
121#ifdef CONFIG_DEBUG_BLK_CGROUP 125#ifdef CONFIG_DEBUG_BLK_CGROUP
122 /* Time not charged to this cgroup */ 126 /* Time not charged to this cgroup */
@@ -146,6 +150,12 @@ struct blkio_group_stats {
146#endif 150#endif
147}; 151};
148 152
153/* Per cpu blkio group stats */
154struct blkio_group_stats_cpu {
155 uint64_t sectors;
156 uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
157};
158
149struct blkio_group { 159struct blkio_group {
150 /* An rcu protected unique identifier for the group */ 160 /* An rcu protected unique identifier for the group */
151 void *key; 161 void *key;
@@ -161,6 +171,8 @@ struct blkio_group {
161 /* Need to serialize the stats in the case of reset/update */ 171 /* Need to serialize the stats in the case of reset/update */
162 spinlock_t stats_lock; 172 spinlock_t stats_lock;
163 struct blkio_group_stats stats; 173 struct blkio_group_stats stats;
174 /* Per cpu stats pointer */
175 struct blkio_group_stats_cpu __percpu *stats_cpu;
164}; 176};
165 177
166struct blkio_policy_node { 178struct blkio_policy_node {
@@ -296,6 +308,7 @@ extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
296extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, 308extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
297 struct blkio_group *blkg, void *key, dev_t dev, 309 struct blkio_group *blkg, void *key, dev_t dev,
298 enum blkio_policy_id plid); 310 enum blkio_policy_id plid);
311extern int blkio_alloc_blkg_stats(struct blkio_group *blkg);
299extern int blkiocg_del_blkio_group(struct blkio_group *blkg); 312extern int blkiocg_del_blkio_group(struct blkio_group *blkg);
300extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, 313extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg,
301 void *key); 314 void *key);
@@ -323,6 +336,8 @@ static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
323 struct blkio_group *blkg, void *key, dev_t dev, 336 struct blkio_group *blkg, void *key, dev_t dev,
324 enum blkio_policy_id plid) {} 337 enum blkio_policy_id plid) {}
325 338
339static inline int blkio_alloc_blkg_stats(struct blkio_group *blkg) { return 0; }
340
326static inline int 341static inline int
327blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } 342blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; }
328 343