aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c245
-rw-r--r--block/blk-cgroup.h94
-rw-r--r--block/blk-throttle.c37
-rw-r--r--block/cfq-iosched.c280
4 files changed, 259 insertions, 397 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b963fb4b3995..821a0a393e85 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -63,251 +63,6 @@ struct blkio_cgroup *bio_blkio_cgroup(struct bio *bio)
63} 63}
64EXPORT_SYMBOL_GPL(bio_blkio_cgroup); 64EXPORT_SYMBOL_GPL(bio_blkio_cgroup);
65 65
66#ifdef CONFIG_DEBUG_BLK_CGROUP
67/* This should be called with the queue_lock held. */
68static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
69 struct blkio_policy_type *pol,
70 struct blkio_group *curr_blkg)
71{
72 struct blkg_policy_data *pd = blkg->pd[pol->plid];
73
74 if (blkio_blkg_waiting(&pd->stats))
75 return;
76 if (blkg == curr_blkg)
77 return;
78 pd->stats.start_group_wait_time = sched_clock();
79 blkio_mark_blkg_waiting(&pd->stats);
80}
81
82/* This should be called with the queue_lock held. */
83static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
84{
85 unsigned long long now;
86
87 if (!blkio_blkg_waiting(stats))
88 return;
89
90 now = sched_clock();
91 if (time_after64(now, stats->start_group_wait_time))
92 blkg_stat_add(&stats->group_wait_time,
93 now - stats->start_group_wait_time);
94 blkio_clear_blkg_waiting(stats);
95}
96
97/* This should be called with the queue_lock held. */
98static void blkio_end_empty_time(struct blkio_group_stats *stats)
99{
100 unsigned long long now;
101
102 if (!blkio_blkg_empty(stats))
103 return;
104
105 now = sched_clock();
106 if (time_after64(now, stats->start_empty_time))
107 blkg_stat_add(&stats->empty_time,
108 now - stats->start_empty_time);
109 blkio_clear_blkg_empty(stats);
110}
111
112void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
113 struct blkio_policy_type *pol)
114{
115 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
116
117 lockdep_assert_held(blkg->q->queue_lock);
118 BUG_ON(blkio_blkg_idling(stats));
119
120 stats->start_idle_time = sched_clock();
121 blkio_mark_blkg_idling(stats);
122}
123EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
124
125void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
126 struct blkio_policy_type *pol)
127{
128 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
129
130 lockdep_assert_held(blkg->q->queue_lock);
131
132 if (blkio_blkg_idling(stats)) {
133 unsigned long long now = sched_clock();
134
135 if (time_after64(now, stats->start_idle_time))
136 blkg_stat_add(&stats->idle_time,
137 now - stats->start_idle_time);
138 blkio_clear_blkg_idling(stats);
139 }
140}
141EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
142
143void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
144 struct blkio_policy_type *pol)
145{
146 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
147
148 lockdep_assert_held(blkg->q->queue_lock);
149
150 blkg_stat_add(&stats->avg_queue_size_sum,
151 blkg_rwstat_sum(&stats->queued));
152 blkg_stat_add(&stats->avg_queue_size_samples, 1);
153 blkio_update_group_wait_time(stats);
154}
155EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
156
157void blkiocg_set_start_empty_time(struct blkio_group *blkg,
158 struct blkio_policy_type *pol)
159{
160 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
161
162 lockdep_assert_held(blkg->q->queue_lock);
163
164 if (blkg_rwstat_sum(&stats->queued))
165 return;
166
167 /*
168 * group is already marked empty. This can happen if cfqq got new
169 * request in parent group and moved to this group while being added
170 * to service tree. Just ignore the event and move on.
171 */
172 if (blkio_blkg_empty(stats))
173 return;
174
175 stats->start_empty_time = sched_clock();
176 blkio_mark_blkg_empty(stats);
177}
178EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
179
180void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
181 struct blkio_policy_type *pol,
182 unsigned long dequeue)
183{
184 struct blkg_policy_data *pd = blkg->pd[pol->plid];
185
186 lockdep_assert_held(blkg->q->queue_lock);
187
188 blkg_stat_add(&pd->stats.dequeue, dequeue);
189}
190EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
191#else
192static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
193 struct blkio_policy_type *pol,
194 struct blkio_group *curr_blkg) { }
195static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
196#endif
197
198void blkiocg_update_io_add_stats(struct blkio_group *blkg,
199 struct blkio_policy_type *pol,
200 struct blkio_group *curr_blkg, bool direction,
201 bool sync)
202{
203 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
204 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
205
206 lockdep_assert_held(blkg->q->queue_lock);
207
208 blkg_rwstat_add(&stats->queued, rw, 1);
209 blkio_end_empty_time(stats);
210 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
211}
212EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
213
214void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
215 struct blkio_policy_type *pol,
216 bool direction, bool sync)
217{
218 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
219 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
220
221 lockdep_assert_held(blkg->q->queue_lock);
222
223 blkg_rwstat_add(&stats->queued, rw, -1);
224}
225EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
226
227void blkiocg_update_timeslice_used(struct blkio_group *blkg,
228 struct blkio_policy_type *pol,
229 unsigned long time,
230 unsigned long unaccounted_time)
231{
232 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
233
234 lockdep_assert_held(blkg->q->queue_lock);
235
236 blkg_stat_add(&stats->time, time);
237#ifdef CONFIG_DEBUG_BLK_CGROUP
238 blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
239#endif
240}
241EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
242
243/*
244 * should be called under rcu read lock or queue lock to make sure blkg pointer
245 * is valid.
246 */
247void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
248 struct blkio_policy_type *pol,
249 uint64_t bytes, bool direction, bool sync)
250{
251 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
252 struct blkg_policy_data *pd = blkg->pd[pol->plid];
253 struct blkio_group_stats_cpu *stats_cpu;
254 unsigned long flags;
255
256 /* If per cpu stats are not allocated yet, don't do any accounting. */
257 if (pd->stats_cpu == NULL)
258 return;
259
260 /*
261 * Disabling interrupts to provide mutual exclusion between two
262 * writes on same cpu. It probably is not needed for 64bit. Not
263 * optimizing that case yet.
264 */
265 local_irq_save(flags);
266
267 stats_cpu = this_cpu_ptr(pd->stats_cpu);
268
269 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
270 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
271 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
272
273 local_irq_restore(flags);
274}
275EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
276
277void blkiocg_update_completion_stats(struct blkio_group *blkg,
278 struct blkio_policy_type *pol,
279 uint64_t start_time,
280 uint64_t io_start_time, bool direction,
281 bool sync)
282{
283 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
284 unsigned long long now = sched_clock();
285 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
286
287 lockdep_assert_held(blkg->q->queue_lock);
288
289 if (time_after64(now, io_start_time))
290 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
291 if (time_after64(io_start_time, start_time))
292 blkg_rwstat_add(&stats->wait_time, rw,
293 io_start_time - start_time);
294}
295EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
296
297/* Merged stats are per cpu. */
298void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
299 struct blkio_policy_type *pol,
300 bool direction, bool sync)
301{
302 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
303 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
304
305 lockdep_assert_held(blkg->q->queue_lock);
306
307 blkg_rwstat_add(&stats->merged, rw, 1);
308}
309EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
310
311/* 66/*
312 * Worker for allocating per cpu stat for blk groups. This is scheduled on 67 * Worker for allocating per cpu stat for blk groups. This is scheduled on
313 * the system_nrt_wq once there are some groups on the alloc_list waiting 68 * the system_nrt_wq once there are some groups on the alloc_list waiting
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index ba64b2857571..0b0a176ee007 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -44,13 +44,6 @@ enum blkg_rwstat_type {
44 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, 44 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
45}; 45};
46 46
47/* blkg state flags */
48enum blkg_state_flags {
49 BLKG_waiting = 0,
50 BLKG_idling,
51 BLKG_empty,
52};
53
54struct blkio_cgroup { 47struct blkio_cgroup {
55 struct cgroup_subsys_state css; 48 struct cgroup_subsys_state css;
56 unsigned int weight; 49 unsigned int weight;
@@ -416,52 +409,6 @@ static inline void blkg_put(struct blkio_group *blkg) { }
416#define BLKIO_WEIGHT_MAX 1000 409#define BLKIO_WEIGHT_MAX 1000
417#define BLKIO_WEIGHT_DEFAULT 500 410#define BLKIO_WEIGHT_DEFAULT 500
418 411
419#ifdef CONFIG_DEBUG_BLK_CGROUP
420void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
421 struct blkio_policy_type *pol);
422void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
423 struct blkio_policy_type *pol,
424 unsigned long dequeue);
425void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
426 struct blkio_policy_type *pol);
427void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
428 struct blkio_policy_type *pol);
429void blkiocg_set_start_empty_time(struct blkio_group *blkg,
430 struct blkio_policy_type *pol);
431
432#define BLKG_FLAG_FNS(name) \
433static inline void blkio_mark_blkg_##name( \
434 struct blkio_group_stats *stats) \
435{ \
436 stats->flags |= (1 << BLKG_##name); \
437} \
438static inline void blkio_clear_blkg_##name( \
439 struct blkio_group_stats *stats) \
440{ \
441 stats->flags &= ~(1 << BLKG_##name); \
442} \
443static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
444{ \
445 return (stats->flags & (1 << BLKG_##name)) != 0; \
446} \
447
448BLKG_FLAG_FNS(waiting)
449BLKG_FLAG_FNS(idling)
450BLKG_FLAG_FNS(empty)
451#undef BLKG_FLAG_FNS
452#else
453static inline void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
454 struct blkio_policy_type *pol) { }
455static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
456 struct blkio_policy_type *pol, unsigned long dequeue) { }
457static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
458 struct blkio_policy_type *pol) { }
459static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
460 struct blkio_policy_type *pol) { }
461static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg,
462 struct blkio_policy_type *pol) { }
463#endif
464
465#ifdef CONFIG_BLK_CGROUP 412#ifdef CONFIG_BLK_CGROUP
466extern struct blkio_cgroup blkio_root_cgroup; 413extern struct blkio_cgroup blkio_root_cgroup;
467extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); 414extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
@@ -471,28 +418,6 @@ extern struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
471struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg, 418struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
472 struct request_queue *q, 419 struct request_queue *q,
473 bool for_root); 420 bool for_root);
474void blkiocg_update_timeslice_used(struct blkio_group *blkg,
475 struct blkio_policy_type *pol,
476 unsigned long time,
477 unsigned long unaccounted_time);
478void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
479 struct blkio_policy_type *pol,
480 uint64_t bytes, bool direction, bool sync);
481void blkiocg_update_completion_stats(struct blkio_group *blkg,
482 struct blkio_policy_type *pol,
483 uint64_t start_time,
484 uint64_t io_start_time, bool direction,
485 bool sync);
486void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
487 struct blkio_policy_type *pol,
488 bool direction, bool sync);
489void blkiocg_update_io_add_stats(struct blkio_group *blkg,
490 struct blkio_policy_type *pol,
491 struct blkio_group *curr_blkg, bool direction,
492 bool sync);
493void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
494 struct blkio_policy_type *pol,
495 bool direction, bool sync);
496#else 421#else
497struct cgroup; 422struct cgroup;
498static inline struct blkio_cgroup * 423static inline struct blkio_cgroup *
@@ -502,24 +427,5 @@ bio_blkio_cgroup(struct bio *bio) { return NULL; }
502 427
503static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg, 428static inline struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
504 void *key) { return NULL; } 429 void *key) { return NULL; }
505static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg,
506 struct blkio_policy_type *pol, unsigned long time,
507 unsigned long unaccounted_time) { }
508static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
509 struct blkio_policy_type *pol, uint64_t bytes,
510 bool direction, bool sync) { }
511static inline void blkiocg_update_completion_stats(struct blkio_group *blkg,
512 struct blkio_policy_type *pol, uint64_t start_time,
513 uint64_t io_start_time, bool direction, bool sync) { }
514static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
515 struct blkio_policy_type *pol, bool direction,
516 bool sync) { }
517static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg,
518 struct blkio_policy_type *pol,
519 struct blkio_group *curr_blkg, bool direction,
520 bool sync) { }
521static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
522 struct blkio_policy_type *pol, bool direction,
523 bool sync) { }
524#endif 430#endif
525#endif /* _BLK_CGROUP_H */ 431#endif /* _BLK_CGROUP_H */
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index fb6f25778fb2..5d647edc02a1 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -562,17 +562,42 @@ static bool tg_may_dispatch(struct throtl_data *td, struct throtl_grp *tg,
562 return 0; 562 return 0;
563} 563}
564 564
565static void throtl_update_dispatch_stats(struct blkio_group *blkg, u64 bytes,
566 int rw)
567{
568 struct blkg_policy_data *pd = blkg->pd[BLKIO_POLICY_THROTL];
569 struct blkio_group_stats_cpu *stats_cpu;
570 unsigned long flags;
571
572 /* If per cpu stats are not allocated yet, don't do any accounting. */
573 if (pd->stats_cpu == NULL)
574 return;
575
576 /*
577 * Disabling interrupts to provide mutual exclusion between two
578 * writes on same cpu. It probably is not needed for 64bit. Not
579 * optimizing that case yet.
580 */
581 local_irq_save(flags);
582
583 stats_cpu = this_cpu_ptr(pd->stats_cpu);
584
585 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
586 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
587 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
588
589 local_irq_restore(flags);
590}
591
565static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 592static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
566{ 593{
567 bool rw = bio_data_dir(bio); 594 bool rw = bio_data_dir(bio);
568 bool sync = rw_is_sync(bio->bi_rw);
569 595
570 /* Charge the bio to the group */ 596 /* Charge the bio to the group */
571 tg->bytes_disp[rw] += bio->bi_size; 597 tg->bytes_disp[rw] += bio->bi_size;
572 tg->io_disp[rw]++; 598 tg->io_disp[rw]++;
573 599
574 blkiocg_update_dispatch_stats(tg_to_blkg(tg), &blkio_policy_throtl, 600 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, bio->bi_rw);
575 bio->bi_size, rw, sync);
576} 601}
577 602
578static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg, 603static void throtl_add_bio_tg(struct throtl_data *td, struct throtl_grp *tg,
@@ -1012,10 +1037,8 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1012 tg = throtl_lookup_tg(td, blkcg); 1037 tg = throtl_lookup_tg(td, blkcg);
1013 if (tg) { 1038 if (tg) {
1014 if (tg_no_rule_group(tg, rw)) { 1039 if (tg_no_rule_group(tg, rw)) {
1015 blkiocg_update_dispatch_stats(tg_to_blkg(tg), 1040 throtl_update_dispatch_stats(tg_to_blkg(tg),
1016 &blkio_policy_throtl, 1041 bio->bi_size, bio->bi_rw);
1017 bio->bi_size, rw,
1018 rw_is_sync(bio->bi_rw));
1019 goto out_unlock_rcu; 1042 goto out_unlock_rcu;
1020 } 1043 }
1021 } 1044 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 2e13e9e689bd..49913804e8dd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -15,6 +15,7 @@
15#include <linux/ioprio.h> 15#include <linux/ioprio.h>
16#include <linux/blktrace_api.h> 16#include <linux/blktrace_api.h>
17#include "blk.h" 17#include "blk.h"
18#include "blk-cgroup.h"
18 19
19static struct blkio_policy_type blkio_policy_cfq; 20static struct blkio_policy_type blkio_policy_cfq;
20 21
@@ -365,9 +366,177 @@ CFQ_CFQQ_FNS(deep);
365CFQ_CFQQ_FNS(wait_busy); 366CFQ_CFQQ_FNS(wait_busy);
366#undef CFQ_CFQQ_FNS 367#undef CFQ_CFQQ_FNS
367 368
368#ifdef CONFIG_CFQ_GROUP_IOSCHED 369#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
369 370
370#include "blk-cgroup.h" 371/* blkg state flags */
372enum blkg_state_flags {
373 BLKG_waiting = 0,
374 BLKG_idling,
375 BLKG_empty,
376};
377
378#define BLKG_FLAG_FNS(name) \
379static inline void blkio_mark_blkg_##name( \
380 struct blkio_group_stats *stats) \
381{ \
382 stats->flags |= (1 << BLKG_##name); \
383} \
384static inline void blkio_clear_blkg_##name( \
385 struct blkio_group_stats *stats) \
386{ \
387 stats->flags &= ~(1 << BLKG_##name); \
388} \
389static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
390{ \
391 return (stats->flags & (1 << BLKG_##name)) != 0; \
392} \
393
394BLKG_FLAG_FNS(waiting)
395BLKG_FLAG_FNS(idling)
396BLKG_FLAG_FNS(empty)
397#undef BLKG_FLAG_FNS
398
399/* This should be called with the queue_lock held. */
400static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
401{
402 unsigned long long now;
403
404 if (!blkio_blkg_waiting(stats))
405 return;
406
407 now = sched_clock();
408 if (time_after64(now, stats->start_group_wait_time))
409 blkg_stat_add(&stats->group_wait_time,
410 now - stats->start_group_wait_time);
411 blkio_clear_blkg_waiting(stats);
412}
413
414/* This should be called with the queue_lock held. */
415static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
416 struct blkio_policy_type *pol,
417 struct blkio_group *curr_blkg)
418{
419 struct blkg_policy_data *pd = blkg->pd[pol->plid];
420
421 if (blkio_blkg_waiting(&pd->stats))
422 return;
423 if (blkg == curr_blkg)
424 return;
425 pd->stats.start_group_wait_time = sched_clock();
426 blkio_mark_blkg_waiting(&pd->stats);
427}
428
429/* This should be called with the queue_lock held. */
430static void blkio_end_empty_time(struct blkio_group_stats *stats)
431{
432 unsigned long long now;
433
434 if (!blkio_blkg_empty(stats))
435 return;
436
437 now = sched_clock();
438 if (time_after64(now, stats->start_empty_time))
439 blkg_stat_add(&stats->empty_time,
440 now - stats->start_empty_time);
441 blkio_clear_blkg_empty(stats);
442}
443
444static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
445 struct blkio_policy_type *pol,
446 unsigned long dequeue)
447{
448 struct blkg_policy_data *pd = blkg->pd[pol->plid];
449
450 lockdep_assert_held(blkg->q->queue_lock);
451
452 blkg_stat_add(&pd->stats.dequeue, dequeue);
453}
454
455static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
456 struct blkio_policy_type *pol)
457{
458 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
459
460 lockdep_assert_held(blkg->q->queue_lock);
461
462 if (blkg_rwstat_sum(&stats->queued))
463 return;
464
465 /*
466 * group is already marked empty. This can happen if cfqq got new
467 * request in parent group and moved to this group while being added
468 * to service tree. Just ignore the event and move on.
469 */
470 if (blkio_blkg_empty(stats))
471 return;
472
473 stats->start_empty_time = sched_clock();
474 blkio_mark_blkg_empty(stats);
475}
476
477static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
478 struct blkio_policy_type *pol)
479{
480 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
481
482 lockdep_assert_held(blkg->q->queue_lock);
483
484 if (blkio_blkg_idling(stats)) {
485 unsigned long long now = sched_clock();
486
487 if (time_after64(now, stats->start_idle_time))
488 blkg_stat_add(&stats->idle_time,
489 now - stats->start_idle_time);
490 blkio_clear_blkg_idling(stats);
491 }
492}
493
494static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
495 struct blkio_policy_type *pol)
496{
497 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
498
499 lockdep_assert_held(blkg->q->queue_lock);
500 BUG_ON(blkio_blkg_idling(stats));
501
502 stats->start_idle_time = sched_clock();
503 blkio_mark_blkg_idling(stats);
504}
505
506static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
507 struct blkio_policy_type *pol)
508{
509 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
510
511 lockdep_assert_held(blkg->q->queue_lock);
512
513 blkg_stat_add(&stats->avg_queue_size_sum,
514 blkg_rwstat_sum(&stats->queued));
515 blkg_stat_add(&stats->avg_queue_size_samples, 1);
516 blkio_update_group_wait_time(stats);
517}
518
519#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
520
521static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
522 struct blkio_policy_type *pol,
523 struct blkio_group *curr_blkg) { }
524static void blkio_end_empty_time(struct blkio_group_stats *stats) { }
525static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
526 struct blkio_policy_type *pol,
527 unsigned long dequeue) { }
528static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
529 struct blkio_policy_type *pol) { }
530static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
531 struct blkio_policy_type *pol) { }
532static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
533 struct blkio_policy_type *pol) { }
534static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
535 struct blkio_policy_type *pol) { }
536
537#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
538
539#ifdef CONFIG_CFQ_GROUP_IOSCHED
371 540
372static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) 541static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
373{ 542{
@@ -403,75 +572,98 @@ static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
403 struct blkio_group *curr_blkg, 572 struct blkio_group *curr_blkg,
404 bool direction, bool sync) 573 bool direction, bool sync)
405{ 574{
406 blkiocg_update_io_add_stats(blkg, pol, curr_blkg, direction, sync); 575 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
407} 576 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
408 577
409static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, 578 lockdep_assert_held(blkg->q->queue_lock);
410 struct blkio_policy_type *pol, unsigned long dequeue) 579
411{ 580 blkg_rwstat_add(&stats->queued, rw, 1);
412 blkiocg_update_dequeue_stats(blkg, pol, dequeue); 581 blkio_end_empty_time(stats);
582 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
413} 583}
414 584
415static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 585static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
416 struct blkio_policy_type *pol, unsigned long time, 586 struct blkio_policy_type *pol, unsigned long time,
417 unsigned long unaccounted_time) 587 unsigned long unaccounted_time)
418{ 588{
419 blkiocg_update_timeslice_used(blkg, pol, time, unaccounted_time); 589 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
420}
421 590
422static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg, 591 lockdep_assert_held(blkg->q->queue_lock);
423 struct blkio_policy_type *pol) 592
424{ 593 blkg_stat_add(&stats->time, time);
425 blkiocg_set_start_empty_time(blkg, pol); 594#ifdef CONFIG_DEBUG_BLK_CGROUP
595 blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
596#endif
426} 597}
427 598
428static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 599static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
429 struct blkio_policy_type *pol, bool direction, 600 struct blkio_policy_type *pol, bool direction,
430 bool sync) 601 bool sync)
431{ 602{
432 blkiocg_update_io_remove_stats(blkg, pol, direction, sync); 603 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
604 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
605
606 lockdep_assert_held(blkg->q->queue_lock);
607
608 blkg_rwstat_add(&stats->queued, rw, -1);
433} 609}
434 610
435static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, 611static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
436 struct blkio_policy_type *pol, bool direction, 612 struct blkio_policy_type *pol, bool direction,
437 bool sync) 613 bool sync)
438{ 614{
439 blkiocg_update_io_merged_stats(blkg, pol, direction, sync); 615 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
440} 616 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
441 617
442static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg, 618 lockdep_assert_held(blkg->q->queue_lock);
443 struct blkio_policy_type *pol)
444{
445 blkiocg_update_idle_time_stats(blkg, pol);
446}
447 619
448static inline void 620 blkg_rwstat_add(&stats->merged, rw, 1);
449cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
450 struct blkio_policy_type *pol)
451{
452 blkiocg_update_avg_queue_size_stats(blkg, pol);
453}
454
455static inline void
456cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
457 struct blkio_policy_type *pol)
458{
459 blkiocg_update_set_idle_time_stats(blkg, pol);
460} 621}
461 622
462static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, 623static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
463 struct blkio_policy_type *pol, uint64_t bytes, 624 struct blkio_policy_type *pol, uint64_t bytes,
464 bool direction, bool sync) 625 bool direction, bool sync)
465{ 626{
466 blkiocg_update_dispatch_stats(blkg, pol, bytes, direction, sync); 627 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
628 struct blkg_policy_data *pd = blkg->pd[pol->plid];
629 struct blkio_group_stats_cpu *stats_cpu;
630 unsigned long flags;
631
632 /* If per cpu stats are not allocated yet, don't do any accounting. */
633 if (pd->stats_cpu == NULL)
634 return;
635
636 /*
637 * Disabling interrupts to provide mutual exclusion between two
638 * writes on same cpu. It probably is not needed for 64bit. Not
639 * optimizing that case yet.
640 */
641 local_irq_save(flags);
642
643 stats_cpu = this_cpu_ptr(pd->stats_cpu);
644
645 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
646 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
647 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
648
649 local_irq_restore(flags);
467} 650}
468 651
469static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, 652static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
470 struct blkio_policy_type *pol, uint64_t start_time, 653 struct blkio_policy_type *pol, uint64_t start_time,
471 uint64_t io_start_time, bool direction, bool sync) 654 uint64_t io_start_time, bool direction, bool sync)
472{ 655{
473 blkiocg_update_completion_stats(blkg, pol, start_time, io_start_time, 656 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
474 direction, sync); 657 unsigned long long now = sched_clock();
658 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
659
660 lockdep_assert_held(blkg->q->queue_lock);
661
662 if (time_after64(now, io_start_time))
663 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
664 if (time_after64(io_start_time, start_time))
665 blkg_rwstat_add(&stats->wait_time, rw,
666 io_start_time - start_time);
475} 667}
476 668
477#else /* CONFIG_CFQ_GROUP_IOSCHED */ 669#else /* CONFIG_CFQ_GROUP_IOSCHED */
@@ -489,29 +681,15 @@ static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
489 struct blkio_policy_type *pol, 681 struct blkio_policy_type *pol,
490 struct blkio_group *curr_blkg, bool direction, 682 struct blkio_group *curr_blkg, bool direction,
491 bool sync) { } 683 bool sync) { }
492static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
493 struct blkio_policy_type *pol, unsigned long dequeue) { }
494static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 684static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
495 struct blkio_policy_type *pol, unsigned long time, 685 struct blkio_policy_type *pol, unsigned long time,
496 unsigned long unaccounted_time) { } 686 unsigned long unaccounted_time) { }
497static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
498 struct blkio_policy_type *pol) { }
499static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 687static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
500 struct blkio_policy_type *pol, bool direction, 688 struct blkio_policy_type *pol, bool direction,
501 bool sync) { } 689 bool sync) { }
502static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, 690static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
503 struct blkio_policy_type *pol, bool direction, 691 struct blkio_policy_type *pol, bool direction,
504 bool sync) { } 692 bool sync) { }
505static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
506 struct blkio_policy_type *pol) { }
507static inline void
508cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
509 struct blkio_policy_type *pol) { }
510
511static inline void
512cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
513 struct blkio_policy_type *pol) { }
514
515static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, 693static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
516 struct blkio_policy_type *pol, uint64_t bytes, 694 struct blkio_policy_type *pol, uint64_t bytes,
517 bool direction, bool sync) { } 695 bool direction, bool sync) { }