diff options
author | Tejun Heo <tj@kernel.org> | 2012-04-01 17:38:44 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-04-01 17:38:44 -0400 |
commit | 155fead9b6347ead90e0b0396cb108a6ba6126c6 (patch) | |
tree | 7e14bb87d942561aa5f44ac8a2d09d6e546c6ccb | |
parent | 9ade5ea4ce57d3596eaee6a57cd212a483674058 (diff) |
blkcg: move blkio_group_stats to cfq-iosched.c
blkio_group_stats contains only fields used by cfq and has no reason
to be defined in blkcg core.
* Move blkio_group_stats to cfq-iosched.c and rename it to cfqg_stats.
* blkg_policy_data->stats is replaced with cfq_group->stats.
blkg_prfill_[rw]stat() are updated to use offset against pd->pdata
instead.
* All related macros / functions are renamed so that they have cfqg_
prefix and the unnecessary @pol arguments are dropped.
* All stat functions now take cfq_group * instead of blkio_group *.
* lockdep assertion on queue lock dropped. Elevator runs under queue
lock by default. There isn't much to be gained by adding lockdep
assertions at stat function level.
* cfqg_stats_reset() implemented for blkio_reset_group_stats_fn method
so that cfqg->stats can be reset.
Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r-- | block/blk-cgroup.c | 23 | ||||
-rw-r--r-- | block/blk-cgroup.h | 41 | ||||
-rw-r--r-- | block/cfq-iosched.c | 407 |
3 files changed, 193 insertions, 278 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 2e6fb7d91805..cfdda44f4a0b 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -417,25 +417,6 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |||
417 | struct blkio_policy_type *pol; | 417 | struct blkio_policy_type *pol; |
418 | 418 | ||
419 | list_for_each_entry(pol, &blkio_list, list) { | 419 | list_for_each_entry(pol, &blkio_list, list) { |
420 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | ||
421 | struct blkio_group_stats *stats = &pd->stats; | ||
422 | |||
423 | /* queued stats shouldn't be cleared */ | ||
424 | blkg_rwstat_reset(&stats->service_bytes); | ||
425 | blkg_rwstat_reset(&stats->serviced); | ||
426 | blkg_rwstat_reset(&stats->merged); | ||
427 | blkg_rwstat_reset(&stats->service_time); | ||
428 | blkg_rwstat_reset(&stats->wait_time); | ||
429 | blkg_stat_reset(&stats->time); | ||
430 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
431 | blkg_stat_reset(&stats->unaccounted_time); | ||
432 | blkg_stat_reset(&stats->avg_queue_size_sum); | ||
433 | blkg_stat_reset(&stats->avg_queue_size_samples); | ||
434 | blkg_stat_reset(&stats->dequeue); | ||
435 | blkg_stat_reset(&stats->group_wait_time); | ||
436 | blkg_stat_reset(&stats->idle_time); | ||
437 | blkg_stat_reset(&stats->empty_time); | ||
438 | #endif | ||
439 | blkio_reset_stats_cpu(blkg, pol->plid); | 420 | blkio_reset_stats_cpu(blkg, pol->plid); |
440 | 421 | ||
441 | if (pol->ops.blkio_reset_group_stats_fn) | 422 | if (pol->ops.blkio_reset_group_stats_fn) |
@@ -549,13 +530,13 @@ static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, | |||
549 | int off) | 530 | int off) |
550 | { | 531 | { |
551 | return __blkg_prfill_u64(sf, pd, | 532 | return __blkg_prfill_u64(sf, pd, |
552 | blkg_stat_read((void *)&pd->stats + off)); | 533 | blkg_stat_read((void *)pd->pdata + off)); |
553 | } | 534 | } |
554 | 535 | ||
555 | static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | 536 | static u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
556 | int off) | 537 | int off) |
557 | { | 538 | { |
558 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)&pd->stats + off); | 539 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->pdata + off); |
559 | 540 | ||
560 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | 541 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
561 | } | 542 | } |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index d0ee649e8bbb..791570394e87 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -64,46 +64,6 @@ struct blkg_rwstat { | |||
64 | uint64_t cnt[BLKG_RWSTAT_NR]; | 64 | uint64_t cnt[BLKG_RWSTAT_NR]; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | struct blkio_group_stats { | ||
68 | /* total bytes transferred */ | ||
69 | struct blkg_rwstat service_bytes; | ||
70 | /* total IOs serviced, post merge */ | ||
71 | struct blkg_rwstat serviced; | ||
72 | /* number of ios merged */ | ||
73 | struct blkg_rwstat merged; | ||
74 | /* total time spent on device in ns, may not be accurate w/ queueing */ | ||
75 | struct blkg_rwstat service_time; | ||
76 | /* total time spent waiting in scheduler queue in ns */ | ||
77 | struct blkg_rwstat wait_time; | ||
78 | /* number of IOs queued up */ | ||
79 | struct blkg_rwstat queued; | ||
80 | /* total sectors transferred */ | ||
81 | struct blkg_stat sectors; | ||
82 | /* total disk time and nr sectors dispatched by this group */ | ||
83 | struct blkg_stat time; | ||
84 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
85 | /* time not charged to this cgroup */ | ||
86 | struct blkg_stat unaccounted_time; | ||
87 | /* sum of number of ios queued across all samples */ | ||
88 | struct blkg_stat avg_queue_size_sum; | ||
89 | /* count of samples taken for average */ | ||
90 | struct blkg_stat avg_queue_size_samples; | ||
91 | /* how many times this group has been removed from service tree */ | ||
92 | struct blkg_stat dequeue; | ||
93 | /* total time spent waiting for it to be assigned a timeslice. */ | ||
94 | struct blkg_stat group_wait_time; | ||
95 | /* time spent idling for this blkio_group */ | ||
96 | struct blkg_stat idle_time; | ||
97 | /* total time with empty current active q with other requests queued */ | ||
98 | struct blkg_stat empty_time; | ||
99 | /* fields after this shouldn't be cleared on stat reset */ | ||
100 | uint64_t start_group_wait_time; | ||
101 | uint64_t start_idle_time; | ||
102 | uint64_t start_empty_time; | ||
103 | uint16_t flags; | ||
104 | #endif | ||
105 | }; | ||
106 | |||
107 | /* Per cpu blkio group stats */ | 67 | /* Per cpu blkio group stats */ |
108 | struct blkio_group_stats_cpu { | 68 | struct blkio_group_stats_cpu { |
109 | /* total bytes transferred */ | 69 | /* total bytes transferred */ |
@@ -126,7 +86,6 @@ struct blkg_policy_data { | |||
126 | /* Configuration */ | 86 | /* Configuration */ |
127 | struct blkio_group_conf conf; | 87 | struct blkio_group_conf conf; |
128 | 88 | ||
129 | struct blkio_group_stats stats; | ||
130 | /* Per cpu stats pointer */ | 89 | /* Per cpu stats pointer */ |
131 | struct blkio_group_stats_cpu __percpu *stats_cpu; | 90 | struct blkio_group_stats_cpu __percpu *stats_cpu; |
132 | 91 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index effd89489506..a1f37dfd1b8b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -173,6 +173,48 @@ enum wl_type_t { | |||
173 | SYNC_WORKLOAD = 2 | 173 | SYNC_WORKLOAD = 2 |
174 | }; | 174 | }; |
175 | 175 | ||
176 | struct cfqg_stats { | ||
177 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | ||
178 | /* total bytes transferred */ | ||
179 | struct blkg_rwstat service_bytes; | ||
180 | /* total IOs serviced, post merge */ | ||
181 | struct blkg_rwstat serviced; | ||
182 | /* number of ios merged */ | ||
183 | struct blkg_rwstat merged; | ||
184 | /* total time spent on device in ns, may not be accurate w/ queueing */ | ||
185 | struct blkg_rwstat service_time; | ||
186 | /* total time spent waiting in scheduler queue in ns */ | ||
187 | struct blkg_rwstat wait_time; | ||
188 | /* number of IOs queued up */ | ||
189 | struct blkg_rwstat queued; | ||
190 | /* total sectors transferred */ | ||
191 | struct blkg_stat sectors; | ||
192 | /* total disk time and nr sectors dispatched by this group */ | ||
193 | struct blkg_stat time; | ||
194 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
195 | /* time not charged to this cgroup */ | ||
196 | struct blkg_stat unaccounted_time; | ||
197 | /* sum of number of ios queued across all samples */ | ||
198 | struct blkg_stat avg_queue_size_sum; | ||
199 | /* count of samples taken for average */ | ||
200 | struct blkg_stat avg_queue_size_samples; | ||
201 | /* how many times this group has been removed from service tree */ | ||
202 | struct blkg_stat dequeue; | ||
203 | /* total time spent waiting for it to be assigned a timeslice. */ | ||
204 | struct blkg_stat group_wait_time; | ||
205 | /* time spent idling for this blkio_group */ | ||
206 | struct blkg_stat idle_time; | ||
207 | /* total time with empty current active q with other requests queued */ | ||
208 | struct blkg_stat empty_time; | ||
209 | /* fields after this shouldn't be cleared on stat reset */ | ||
210 | uint64_t start_group_wait_time; | ||
211 | uint64_t start_idle_time; | ||
212 | uint64_t start_empty_time; | ||
213 | uint16_t flags; | ||
214 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ | ||
215 | #endif /* CONFIG_CFQ_GROUP_IOSCHED */ | ||
216 | }; | ||
217 | |||
176 | /* This is per cgroup per device grouping structure */ | 218 | /* This is per cgroup per device grouping structure */ |
177 | struct cfq_group { | 219 | struct cfq_group { |
178 | /* group service_tree member */ | 220 | /* group service_tree member */ |
@@ -212,6 +254,7 @@ struct cfq_group { | |||
212 | /* number of requests that are on the dispatch list or inside driver */ | 254 | /* number of requests that are on the dispatch list or inside driver */ |
213 | int dispatched; | 255 | int dispatched; |
214 | struct cfq_ttime ttime; | 256 | struct cfq_ttime ttime; |
257 | struct cfqg_stats stats; | ||
215 | }; | 258 | }; |
216 | 259 | ||
217 | struct cfq_io_cq { | 260 | struct cfq_io_cq { |
@@ -368,96 +411,84 @@ CFQ_CFQQ_FNS(wait_busy); | |||
368 | 411 | ||
369 | #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) | 412 | #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) |
370 | 413 | ||
371 | /* blkg state flags */ | 414 | /* cfqg stats flags */ |
372 | enum blkg_state_flags { | 415 | enum cfqg_stats_flags { |
373 | BLKG_waiting = 0, | 416 | CFQG_stats_waiting = 0, |
374 | BLKG_idling, | 417 | CFQG_stats_idling, |
375 | BLKG_empty, | 418 | CFQG_stats_empty, |
376 | }; | 419 | }; |
377 | 420 | ||
378 | #define BLKG_FLAG_FNS(name) \ | 421 | #define CFQG_FLAG_FNS(name) \ |
379 | static inline void blkio_mark_blkg_##name( \ | 422 | static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \ |
380 | struct blkio_group_stats *stats) \ | ||
381 | { \ | 423 | { \ |
382 | stats->flags |= (1 << BLKG_##name); \ | 424 | stats->flags |= (1 << CFQG_stats_##name); \ |
383 | } \ | 425 | } \ |
384 | static inline void blkio_clear_blkg_##name( \ | 426 | static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \ |
385 | struct blkio_group_stats *stats) \ | ||
386 | { \ | 427 | { \ |
387 | stats->flags &= ~(1 << BLKG_##name); \ | 428 | stats->flags &= ~(1 << CFQG_stats_##name); \ |
388 | } \ | 429 | } \ |
389 | static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \ | 430 | static inline int cfqg_stats_##name(struct cfqg_stats *stats) \ |
390 | { \ | 431 | { \ |
391 | return (stats->flags & (1 << BLKG_##name)) != 0; \ | 432 | return (stats->flags & (1 << CFQG_stats_##name)) != 0; \ |
392 | } \ | 433 | } \ |
393 | 434 | ||
394 | BLKG_FLAG_FNS(waiting) | 435 | CFQG_FLAG_FNS(waiting) |
395 | BLKG_FLAG_FNS(idling) | 436 | CFQG_FLAG_FNS(idling) |
396 | BLKG_FLAG_FNS(empty) | 437 | CFQG_FLAG_FNS(empty) |
397 | #undef BLKG_FLAG_FNS | 438 | #undef CFQG_FLAG_FNS |
398 | 439 | ||
399 | /* This should be called with the queue_lock held. */ | 440 | /* This should be called with the queue_lock held. */ |
400 | static void blkio_update_group_wait_time(struct blkio_group_stats *stats) | 441 | static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats) |
401 | { | 442 | { |
402 | unsigned long long now; | 443 | unsigned long long now; |
403 | 444 | ||
404 | if (!blkio_blkg_waiting(stats)) | 445 | if (!cfqg_stats_waiting(stats)) |
405 | return; | 446 | return; |
406 | 447 | ||
407 | now = sched_clock(); | 448 | now = sched_clock(); |
408 | if (time_after64(now, stats->start_group_wait_time)) | 449 | if (time_after64(now, stats->start_group_wait_time)) |
409 | blkg_stat_add(&stats->group_wait_time, | 450 | blkg_stat_add(&stats->group_wait_time, |
410 | now - stats->start_group_wait_time); | 451 | now - stats->start_group_wait_time); |
411 | blkio_clear_blkg_waiting(stats); | 452 | cfqg_stats_clear_waiting(stats); |
412 | } | 453 | } |
413 | 454 | ||
414 | /* This should be called with the queue_lock held. */ | 455 | /* This should be called with the queue_lock held. */ |
415 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, | 456 | static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, |
416 | struct blkio_policy_type *pol, | 457 | struct cfq_group *curr_cfqg) |
417 | struct blkio_group *curr_blkg) | ||
418 | { | 458 | { |
419 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | 459 | struct cfqg_stats *stats = &cfqg->stats; |
420 | 460 | ||
421 | if (blkio_blkg_waiting(&pd->stats)) | 461 | if (cfqg_stats_waiting(stats)) |
422 | return; | 462 | return; |
423 | if (blkg == curr_blkg) | 463 | if (cfqg == curr_cfqg) |
424 | return; | 464 | return; |
425 | pd->stats.start_group_wait_time = sched_clock(); | 465 | stats->start_group_wait_time = sched_clock(); |
426 | blkio_mark_blkg_waiting(&pd->stats); | 466 | cfqg_stats_mark_waiting(stats); |
427 | } | 467 | } |
428 | 468 | ||
429 | /* This should be called with the queue_lock held. */ | 469 | /* This should be called with the queue_lock held. */ |
430 | static void blkio_end_empty_time(struct blkio_group_stats *stats) | 470 | static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) |
431 | { | 471 | { |
432 | unsigned long long now; | 472 | unsigned long long now; |
433 | 473 | ||
434 | if (!blkio_blkg_empty(stats)) | 474 | if (!cfqg_stats_empty(stats)) |
435 | return; | 475 | return; |
436 | 476 | ||
437 | now = sched_clock(); | 477 | now = sched_clock(); |
438 | if (time_after64(now, stats->start_empty_time)) | 478 | if (time_after64(now, stats->start_empty_time)) |
439 | blkg_stat_add(&stats->empty_time, | 479 | blkg_stat_add(&stats->empty_time, |
440 | now - stats->start_empty_time); | 480 | now - stats->start_empty_time); |
441 | blkio_clear_blkg_empty(stats); | 481 | cfqg_stats_clear_empty(stats); |
442 | } | 482 | } |
443 | 483 | ||
444 | static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, | 484 | static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) |
445 | struct blkio_policy_type *pol, | ||
446 | unsigned long dequeue) | ||
447 | { | 485 | { |
448 | struct blkg_policy_data *pd = blkg->pd[pol->plid]; | 486 | blkg_stat_add(&cfqg->stats.dequeue, 1); |
449 | |||
450 | lockdep_assert_held(blkg->q->queue_lock); | ||
451 | |||
452 | blkg_stat_add(&pd->stats.dequeue, dequeue); | ||
453 | } | 487 | } |
454 | 488 | ||
455 | static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg, | 489 | static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) |
456 | struct blkio_policy_type *pol) | ||
457 | { | 490 | { |
458 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 491 | struct cfqg_stats *stats = &cfqg->stats; |
459 | |||
460 | lockdep_assert_held(blkg->q->queue_lock); | ||
461 | 492 | ||
462 | if (blkg_rwstat_sum(&stats->queued)) | 493 | if (blkg_rwstat_sum(&stats->queued)) |
463 | return; | 494 | return; |
@@ -467,72 +498,57 @@ static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg, | |||
467 | * request in parent group and moved to this group while being added | 498 | * request in parent group and moved to this group while being added |
468 | * to service tree. Just ignore the event and move on. | 499 | * to service tree. Just ignore the event and move on. |
469 | */ | 500 | */ |
470 | if (blkio_blkg_empty(stats)) | 501 | if (cfqg_stats_empty(stats)) |
471 | return; | 502 | return; |
472 | 503 | ||
473 | stats->start_empty_time = sched_clock(); | 504 | stats->start_empty_time = sched_clock(); |
474 | blkio_mark_blkg_empty(stats); | 505 | cfqg_stats_mark_empty(stats); |
475 | } | 506 | } |
476 | 507 | ||
477 | static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg, | 508 | static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) |
478 | struct blkio_policy_type *pol) | ||
479 | { | 509 | { |
480 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 510 | struct cfqg_stats *stats = &cfqg->stats; |
481 | 511 | ||
482 | lockdep_assert_held(blkg->q->queue_lock); | 512 | if (cfqg_stats_idling(stats)) { |
483 | |||
484 | if (blkio_blkg_idling(stats)) { | ||
485 | unsigned long long now = sched_clock(); | 513 | unsigned long long now = sched_clock(); |
486 | 514 | ||
487 | if (time_after64(now, stats->start_idle_time)) | 515 | if (time_after64(now, stats->start_idle_time)) |
488 | blkg_stat_add(&stats->idle_time, | 516 | blkg_stat_add(&stats->idle_time, |
489 | now - stats->start_idle_time); | 517 | now - stats->start_idle_time); |
490 | blkio_clear_blkg_idling(stats); | 518 | cfqg_stats_clear_idling(stats); |
491 | } | 519 | } |
492 | } | 520 | } |
493 | 521 | ||
494 | static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | 522 | static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) |
495 | struct blkio_policy_type *pol) | ||
496 | { | 523 | { |
497 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 524 | struct cfqg_stats *stats = &cfqg->stats; |
498 | 525 | ||
499 | lockdep_assert_held(blkg->q->queue_lock); | 526 | BUG_ON(cfqg_stats_idling(stats)); |
500 | BUG_ON(blkio_blkg_idling(stats)); | ||
501 | 527 | ||
502 | stats->start_idle_time = sched_clock(); | 528 | stats->start_idle_time = sched_clock(); |
503 | blkio_mark_blkg_idling(stats); | 529 | cfqg_stats_mark_idling(stats); |
504 | } | 530 | } |
505 | 531 | ||
506 | static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, | 532 | static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) |
507 | struct blkio_policy_type *pol) | ||
508 | { | 533 | { |
509 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 534 | struct cfqg_stats *stats = &cfqg->stats; |
510 | |||
511 | lockdep_assert_held(blkg->q->queue_lock); | ||
512 | 535 | ||
513 | blkg_stat_add(&stats->avg_queue_size_sum, | 536 | blkg_stat_add(&stats->avg_queue_size_sum, |
514 | blkg_rwstat_sum(&stats->queued)); | 537 | blkg_rwstat_sum(&stats->queued)); |
515 | blkg_stat_add(&stats->avg_queue_size_samples, 1); | 538 | blkg_stat_add(&stats->avg_queue_size_samples, 1); |
516 | blkio_update_group_wait_time(stats); | 539 | cfqg_stats_update_group_wait_time(stats); |
517 | } | 540 | } |
518 | 541 | ||
519 | #else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ | 542 | #else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ |
520 | 543 | ||
521 | static void blkio_set_start_group_wait_time(struct blkio_group *blkg, | 544 | static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, |
522 | struct blkio_policy_type *pol, | 545 | struct cfq_group *curr_cfqg) { } |
523 | struct blkio_group *curr_blkg) { } | 546 | static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { } |
524 | static void blkio_end_empty_time(struct blkio_group_stats *stats) { } | 547 | static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { } |
525 | static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, | 548 | static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { } |
526 | struct blkio_policy_type *pol, | 549 | static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { } |
527 | unsigned long dequeue) { } | 550 | static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { } |
528 | static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg, | 551 | static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { } |
529 | struct blkio_policy_type *pol) { } | ||
530 | static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg, | ||
531 | struct blkio_policy_type *pol) { } | ||
532 | static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg, | ||
533 | struct blkio_policy_type *pol) { } | ||
534 | static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg, | ||
535 | struct blkio_policy_type *pol) { } | ||
536 | 552 | ||
537 | #endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ | 553 | #endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ |
538 | 554 | ||
@@ -567,80 +583,46 @@ static inline void cfqg_put(struct cfq_group *cfqg) | |||
567 | blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ | 583 | blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ |
568 | blkg_path(cfqg_to_blkg((cfqg))), ##args) \ | 584 | blkg_path(cfqg_to_blkg((cfqg))), ##args) \ |
569 | 585 | ||
570 | static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, | 586 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, |
571 | struct blkio_policy_type *pol, | 587 | struct cfq_group *curr_cfqg, int rw) |
572 | struct blkio_group *curr_blkg, | ||
573 | bool direction, bool sync) | ||
574 | { | 588 | { |
575 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 589 | blkg_rwstat_add(&cfqg->stats.queued, rw, 1); |
576 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); | 590 | cfqg_stats_end_empty_time(&cfqg->stats); |
577 | 591 | cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); | |
578 | lockdep_assert_held(blkg->q->queue_lock); | ||
579 | |||
580 | blkg_rwstat_add(&stats->queued, rw, 1); | ||
581 | blkio_end_empty_time(stats); | ||
582 | blkio_set_start_group_wait_time(blkg, pol, curr_blkg); | ||
583 | } | 592 | } |
584 | 593 | ||
585 | static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, | 594 | static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, |
586 | struct blkio_policy_type *pol, unsigned long time, | 595 | unsigned long time, unsigned long unaccounted_time) |
587 | unsigned long unaccounted_time) | ||
588 | { | 596 | { |
589 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 597 | blkg_stat_add(&cfqg->stats.time, time); |
590 | |||
591 | lockdep_assert_held(blkg->q->queue_lock); | ||
592 | |||
593 | blkg_stat_add(&stats->time, time); | ||
594 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 598 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
595 | blkg_stat_add(&stats->unaccounted_time, unaccounted_time); | 599 | blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time); |
596 | #endif | 600 | #endif |
597 | } | 601 | } |
598 | 602 | ||
599 | static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, | 603 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) |
600 | struct blkio_policy_type *pol, bool direction, | ||
601 | bool sync) | ||
602 | { | 604 | { |
603 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 605 | blkg_rwstat_add(&cfqg->stats.queued, rw, -1); |
604 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); | ||
605 | |||
606 | lockdep_assert_held(blkg->q->queue_lock); | ||
607 | |||
608 | blkg_rwstat_add(&stats->queued, rw, -1); | ||
609 | } | 606 | } |
610 | 607 | ||
611 | static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, | 608 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) |
612 | struct blkio_policy_type *pol, bool direction, | ||
613 | bool sync) | ||
614 | { | 609 | { |
615 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 610 | blkg_rwstat_add(&cfqg->stats.merged, rw, 1); |
616 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); | ||
617 | |||
618 | lockdep_assert_held(blkg->q->queue_lock); | ||
619 | |||
620 | blkg_rwstat_add(&stats->merged, rw, 1); | ||
621 | } | 611 | } |
622 | 612 | ||
623 | static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, | 613 | static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, |
624 | struct blkio_policy_type *pol, uint64_t bytes, | 614 | uint64_t bytes, int rw) |
625 | bool direction, bool sync) | ||
626 | { | 615 | { |
627 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 616 | blkg_stat_add(&cfqg->stats.sectors, bytes >> 9); |
628 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); | 617 | blkg_rwstat_add(&cfqg->stats.serviced, rw, 1); |
629 | 618 | blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes); | |
630 | blkg_stat_add(&stats->sectors, bytes >> 9); | ||
631 | blkg_rwstat_add(&stats->serviced, rw, 1); | ||
632 | blkg_rwstat_add(&stats->service_bytes, rw, bytes); | ||
633 | } | 619 | } |
634 | 620 | ||
635 | static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, | 621 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
636 | struct blkio_policy_type *pol, uint64_t start_time, | 622 | uint64_t start_time, uint64_t io_start_time, int rw) |
637 | uint64_t io_start_time, bool direction, bool sync) | ||
638 | { | 623 | { |
639 | struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats; | 624 | struct cfqg_stats *stats = &cfqg->stats; |
640 | unsigned long long now = sched_clock(); | 625 | unsigned long long now = sched_clock(); |
641 | int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0); | ||
642 | |||
643 | lockdep_assert_held(blkg->q->queue_lock); | ||
644 | 626 | ||
645 | if (time_after64(now, io_start_time)) | 627 | if (time_after64(now, io_start_time)) |
646 | blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); | 628 | blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); |
@@ -649,6 +631,29 @@ static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, | |||
649 | io_start_time - start_time); | 631 | io_start_time - start_time); |
650 | } | 632 | } |
651 | 633 | ||
634 | static void cfqg_stats_reset(struct blkio_group *blkg) | ||
635 | { | ||
636 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); | ||
637 | struct cfqg_stats *stats = &cfqg->stats; | ||
638 | |||
639 | /* queued stats shouldn't be cleared */ | ||
640 | blkg_rwstat_reset(&stats->service_bytes); | ||
641 | blkg_rwstat_reset(&stats->serviced); | ||
642 | blkg_rwstat_reset(&stats->merged); | ||
643 | blkg_rwstat_reset(&stats->service_time); | ||
644 | blkg_rwstat_reset(&stats->wait_time); | ||
645 | blkg_stat_reset(&stats->time); | ||
646 | #ifdef CONFIG_DEBUG_BLK_CGROUP | ||
647 | blkg_stat_reset(&stats->unaccounted_time); | ||
648 | blkg_stat_reset(&stats->avg_queue_size_sum); | ||
649 | blkg_stat_reset(&stats->avg_queue_size_samples); | ||
650 | blkg_stat_reset(&stats->dequeue); | ||
651 | blkg_stat_reset(&stats->group_wait_time); | ||
652 | blkg_stat_reset(&stats->idle_time); | ||
653 | blkg_stat_reset(&stats->empty_time); | ||
654 | #endif | ||
655 | } | ||
656 | |||
652 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ | 657 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ |
653 | 658 | ||
654 | static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) { return NULL; } | 659 | static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) { return NULL; } |
@@ -660,25 +665,16 @@ static inline void cfqg_put(struct cfq_group *cfqg) { } | |||
660 | blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) | 665 | blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) |
661 | #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) | 666 | #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) |
662 | 667 | ||
663 | static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, | 668 | static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, |
664 | struct blkio_policy_type *pol, | 669 | struct cfq_group *curr_cfqg, int rw) { } |
665 | struct blkio_group *curr_blkg, bool direction, | 670 | static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, |
666 | bool sync) { } | 671 | unsigned long time, unsigned long unaccounted_time) { } |
667 | static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, | 672 | static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { } |
668 | struct blkio_policy_type *pol, unsigned long time, | 673 | static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { } |
669 | unsigned long unaccounted_time) { } | 674 | static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, |
670 | static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, | 675 | uint64_t bytes, int rw) { } |
671 | struct blkio_policy_type *pol, bool direction, | 676 | static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, |
672 | bool sync) { } | 677 | uint64_t start_time, uint64_t io_start_time, int rw) { } |
673 | static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, | ||
674 | struct blkio_policy_type *pol, bool direction, | ||
675 | bool sync) { } | ||
676 | static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, | ||
677 | struct blkio_policy_type *pol, uint64_t bytes, | ||
678 | bool direction, bool sync) { } | ||
679 | static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, | ||
680 | struct blkio_policy_type *pol, uint64_t start_time, | ||
681 | uint64_t io_start_time, bool direction, bool sync) { } | ||
682 | 678 | ||
683 | #endif /* CONFIG_CFQ_GROUP_IOSCHED */ | 679 | #endif /* CONFIG_CFQ_GROUP_IOSCHED */ |
684 | 680 | ||
@@ -1233,8 +1229,7 @@ cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
1233 | cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); | 1229 | cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); |
1234 | cfq_group_service_tree_del(st, cfqg); | 1230 | cfq_group_service_tree_del(st, cfqg); |
1235 | cfqg->saved_workload_slice = 0; | 1231 | cfqg->saved_workload_slice = 0; |
1236 | cfq_blkiocg_update_dequeue_stats(cfqg_to_blkg(cfqg), | 1232 | cfqg_stats_update_dequeue(cfqg); |
1237 | &blkio_policy_cfq, 1); | ||
1238 | } | 1233 | } |
1239 | 1234 | ||
1240 | static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, | 1235 | static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, |
@@ -1306,9 +1301,8 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
1306 | "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", | 1301 | "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", |
1307 | used_sl, cfqq->slice_dispatch, charge, | 1302 | used_sl, cfqq->slice_dispatch, charge, |
1308 | iops_mode(cfqd), cfqq->nr_sectors); | 1303 | iops_mode(cfqd), cfqq->nr_sectors); |
1309 | cfq_blkiocg_update_timeslice_used(cfqg_to_blkg(cfqg), &blkio_policy_cfq, | 1304 | cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl); |
1310 | used_sl, unaccounted_sl); | 1305 | cfqg_stats_set_start_empty_time(cfqg); |
1311 | cfq_blkiocg_set_start_empty_time(cfqg_to_blkg(cfqg), &blkio_policy_cfq); | ||
1312 | } | 1306 | } |
1313 | 1307 | ||
1314 | /** | 1308 | /** |
@@ -1456,14 +1450,15 @@ static int blkcg_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val) | |||
1456 | } | 1450 | } |
1457 | 1451 | ||
1458 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 1452 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1459 | static u64 blkg_prfill_avg_queue_size(struct seq_file *sf, | 1453 | static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, |
1460 | struct blkg_policy_data *pd, int off) | 1454 | struct blkg_policy_data *pd, int off) |
1461 | { | 1455 | { |
1462 | u64 samples = blkg_stat_read(&pd->stats.avg_queue_size_samples); | 1456 | struct cfq_group *cfqg = (void *)pd->pdata; |
1457 | u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples); | ||
1463 | u64 v = 0; | 1458 | u64 v = 0; |
1464 | 1459 | ||
1465 | if (samples) { | 1460 | if (samples) { |
1466 | v = blkg_stat_read(&pd->stats.avg_queue_size_sum); | 1461 | v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); |
1467 | do_div(v, samples); | 1462 | do_div(v, samples); |
1468 | } | 1463 | } |
1469 | __blkg_prfill_u64(sf, pd, v); | 1464 | __blkg_prfill_u64(sf, pd, v); |
@@ -1471,12 +1466,12 @@ static u64 blkg_prfill_avg_queue_size(struct seq_file *sf, | |||
1471 | } | 1466 | } |
1472 | 1467 | ||
1473 | /* print avg_queue_size */ | 1468 | /* print avg_queue_size */ |
1474 | static int blkcg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, | 1469 | static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft, |
1475 | struct seq_file *sf) | 1470 | struct seq_file *sf) |
1476 | { | 1471 | { |
1477 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); | 1472 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp); |
1478 | 1473 | ||
1479 | blkcg_print_blkgs(sf, blkcg, blkg_prfill_avg_queue_size, | 1474 | blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size, |
1480 | BLKIO_POLICY_PROP, 0, false); | 1475 | BLKIO_POLICY_PROP, 0, false); |
1481 | return 0; | 1476 | return 0; |
1482 | } | 1477 | } |
@@ -1497,84 +1492,84 @@ static struct cftype cfq_blkcg_files[] = { | |||
1497 | { | 1492 | { |
1498 | .name = "time", | 1493 | .name = "time", |
1499 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1494 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1500 | offsetof(struct blkio_group_stats, time)), | 1495 | offsetof(struct cfq_group, stats.time)), |
1501 | .read_seq_string = blkcg_print_stat, | 1496 | .read_seq_string = blkcg_print_stat, |
1502 | }, | 1497 | }, |
1503 | { | 1498 | { |
1504 | .name = "sectors", | 1499 | .name = "sectors", |
1505 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1500 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1506 | offsetof(struct blkio_group_stats, sectors)), | 1501 | offsetof(struct cfq_group, stats.sectors)), |
1507 | .read_seq_string = blkcg_print_stat, | 1502 | .read_seq_string = blkcg_print_stat, |
1508 | }, | 1503 | }, |
1509 | { | 1504 | { |
1510 | .name = "io_service_bytes", | 1505 | .name = "io_service_bytes", |
1511 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1506 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1512 | offsetof(struct blkio_group_stats, service_bytes)), | 1507 | offsetof(struct cfq_group, stats.service_bytes)), |
1513 | .read_seq_string = blkcg_print_rwstat, | 1508 | .read_seq_string = blkcg_print_rwstat, |
1514 | }, | 1509 | }, |
1515 | { | 1510 | { |
1516 | .name = "io_serviced", | 1511 | .name = "io_serviced", |
1517 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1512 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1518 | offsetof(struct blkio_group_stats, serviced)), | 1513 | offsetof(struct cfq_group, stats.serviced)), |
1519 | .read_seq_string = blkcg_print_rwstat, | 1514 | .read_seq_string = blkcg_print_rwstat, |
1520 | }, | 1515 | }, |
1521 | { | 1516 | { |
1522 | .name = "io_service_time", | 1517 | .name = "io_service_time", |
1523 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1518 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1524 | offsetof(struct blkio_group_stats, service_time)), | 1519 | offsetof(struct cfq_group, stats.service_time)), |
1525 | .read_seq_string = blkcg_print_rwstat, | 1520 | .read_seq_string = blkcg_print_rwstat, |
1526 | }, | 1521 | }, |
1527 | { | 1522 | { |
1528 | .name = "io_wait_time", | 1523 | .name = "io_wait_time", |
1529 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1524 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1530 | offsetof(struct blkio_group_stats, wait_time)), | 1525 | offsetof(struct cfq_group, stats.wait_time)), |
1531 | .read_seq_string = blkcg_print_rwstat, | 1526 | .read_seq_string = blkcg_print_rwstat, |
1532 | }, | 1527 | }, |
1533 | { | 1528 | { |
1534 | .name = "io_merged", | 1529 | .name = "io_merged", |
1535 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1530 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1536 | offsetof(struct blkio_group_stats, merged)), | 1531 | offsetof(struct cfq_group, stats.merged)), |
1537 | .read_seq_string = blkcg_print_rwstat, | 1532 | .read_seq_string = blkcg_print_rwstat, |
1538 | }, | 1533 | }, |
1539 | { | 1534 | { |
1540 | .name = "io_queued", | 1535 | .name = "io_queued", |
1541 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1536 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1542 | offsetof(struct blkio_group_stats, queued)), | 1537 | offsetof(struct cfq_group, stats.queued)), |
1543 | .read_seq_string = blkcg_print_rwstat, | 1538 | .read_seq_string = blkcg_print_rwstat, |
1544 | }, | 1539 | }, |
1545 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 1540 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
1546 | { | 1541 | { |
1547 | .name = "avg_queue_size", | 1542 | .name = "avg_queue_size", |
1548 | .read_seq_string = blkcg_print_avg_queue_size, | 1543 | .read_seq_string = cfqg_print_avg_queue_size, |
1549 | }, | 1544 | }, |
1550 | { | 1545 | { |
1551 | .name = "group_wait_time", | 1546 | .name = "group_wait_time", |
1552 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1547 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1553 | offsetof(struct blkio_group_stats, group_wait_time)), | 1548 | offsetof(struct cfq_group, stats.group_wait_time)), |
1554 | .read_seq_string = blkcg_print_stat, | 1549 | .read_seq_string = blkcg_print_stat, |
1555 | }, | 1550 | }, |
1556 | { | 1551 | { |
1557 | .name = "idle_time", | 1552 | .name = "idle_time", |
1558 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1553 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1559 | offsetof(struct blkio_group_stats, idle_time)), | 1554 | offsetof(struct cfq_group, stats.idle_time)), |
1560 | .read_seq_string = blkcg_print_stat, | 1555 | .read_seq_string = blkcg_print_stat, |
1561 | }, | 1556 | }, |
1562 | { | 1557 | { |
1563 | .name = "empty_time", | 1558 | .name = "empty_time", |
1564 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1559 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1565 | offsetof(struct blkio_group_stats, empty_time)), | 1560 | offsetof(struct cfq_group, stats.empty_time)), |
1566 | .read_seq_string = blkcg_print_stat, | 1561 | .read_seq_string = blkcg_print_stat, |
1567 | }, | 1562 | }, |
1568 | { | 1563 | { |
1569 | .name = "dequeue", | 1564 | .name = "dequeue", |
1570 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1565 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1571 | offsetof(struct blkio_group_stats, dequeue)), | 1566 | offsetof(struct cfq_group, stats.dequeue)), |
1572 | .read_seq_string = blkcg_print_stat, | 1567 | .read_seq_string = blkcg_print_stat, |
1573 | }, | 1568 | }, |
1574 | { | 1569 | { |
1575 | .name = "unaccounted_time", | 1570 | .name = "unaccounted_time", |
1576 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, | 1571 | .private = BLKCG_STAT_PRIV(BLKIO_POLICY_PROP, |
1577 | offsetof(struct blkio_group_stats, unaccounted_time)), | 1572 | offsetof(struct cfq_group, stats.unaccounted_time)), |
1578 | .read_seq_string = blkcg_print_stat, | 1573 | .read_seq_string = blkcg_print_stat, |
1579 | }, | 1574 | }, |
1580 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ | 1575 | #endif /* CONFIG_DEBUG_BLK_CGROUP */ |
@@ -1858,14 +1853,10 @@ static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) | |||
1858 | { | 1853 | { |
1859 | elv_rb_del(&cfqq->sort_list, rq); | 1854 | elv_rb_del(&cfqq->sort_list, rq); |
1860 | cfqq->queued[rq_is_sync(rq)]--; | 1855 | cfqq->queued[rq_is_sync(rq)]--; |
1861 | cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)), | 1856 | cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); |
1862 | &blkio_policy_cfq, rq_data_dir(rq), | ||
1863 | rq_is_sync(rq)); | ||
1864 | cfq_add_rq_rb(rq); | 1857 | cfq_add_rq_rb(rq); |
1865 | cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)), | 1858 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, |
1866 | &blkio_policy_cfq, | 1859 | rq->cmd_flags); |
1867 | cfqg_to_blkg(cfqq->cfqd->serving_group), | ||
1868 | rq_data_dir(rq), rq_is_sync(rq)); | ||
1869 | } | 1860 | } |
1870 | 1861 | ||
1871 | static struct request * | 1862 | static struct request * |
@@ -1921,9 +1912,7 @@ static void cfq_remove_request(struct request *rq) | |||
1921 | cfq_del_rq_rb(rq); | 1912 | cfq_del_rq_rb(rq); |
1922 | 1913 | ||
1923 | cfqq->cfqd->rq_queued--; | 1914 | cfqq->cfqd->rq_queued--; |
1924 | cfq_blkiocg_update_io_remove_stats(cfqg_to_blkg(RQ_CFQG(rq)), | 1915 | cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); |
1925 | &blkio_policy_cfq, rq_data_dir(rq), | ||
1926 | rq_is_sync(rq)); | ||
1927 | if (rq->cmd_flags & REQ_PRIO) { | 1916 | if (rq->cmd_flags & REQ_PRIO) { |
1928 | WARN_ON(!cfqq->prio_pending); | 1917 | WARN_ON(!cfqq->prio_pending); |
1929 | cfqq->prio_pending--; | 1918 | cfqq->prio_pending--; |
@@ -1958,9 +1947,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req, | |||
1958 | static void cfq_bio_merged(struct request_queue *q, struct request *req, | 1947 | static void cfq_bio_merged(struct request_queue *q, struct request *req, |
1959 | struct bio *bio) | 1948 | struct bio *bio) |
1960 | { | 1949 | { |
1961 | cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(req)), | 1950 | cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw); |
1962 | &blkio_policy_cfq, bio_data_dir(bio), | ||
1963 | cfq_bio_sync(bio)); | ||
1964 | } | 1951 | } |
1965 | 1952 | ||
1966 | static void | 1953 | static void |
@@ -1982,9 +1969,7 @@ cfq_merged_requests(struct request_queue *q, struct request *rq, | |||
1982 | if (cfqq->next_rq == next) | 1969 | if (cfqq->next_rq == next) |
1983 | cfqq->next_rq = rq; | 1970 | cfqq->next_rq = rq; |
1984 | cfq_remove_request(next); | 1971 | cfq_remove_request(next); |
1985 | cfq_blkiocg_update_io_merged_stats(cfqg_to_blkg(RQ_CFQG(rq)), | 1972 | cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags); |
1986 | &blkio_policy_cfq, rq_data_dir(next), | ||
1987 | rq_is_sync(next)); | ||
1988 | 1973 | ||
1989 | cfqq = RQ_CFQQ(next); | 1974 | cfqq = RQ_CFQQ(next); |
1990 | /* | 1975 | /* |
@@ -2025,8 +2010,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq, | |||
2025 | static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 2010 | static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
2026 | { | 2011 | { |
2027 | del_timer(&cfqd->idle_slice_timer); | 2012 | del_timer(&cfqd->idle_slice_timer); |
2028 | cfq_blkiocg_update_idle_time_stats(cfqg_to_blkg(cfqq->cfqg), | 2013 | cfqg_stats_update_idle_time(cfqq->cfqg); |
2029 | &blkio_policy_cfq); | ||
2030 | } | 2014 | } |
2031 | 2015 | ||
2032 | static void __cfq_set_active_queue(struct cfq_data *cfqd, | 2016 | static void __cfq_set_active_queue(struct cfq_data *cfqd, |
@@ -2035,8 +2019,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd, | |||
2035 | if (cfqq) { | 2019 | if (cfqq) { |
2036 | cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", | 2020 | cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", |
2037 | cfqd->serving_prio, cfqd->serving_type); | 2021 | cfqd->serving_prio, cfqd->serving_type); |
2038 | cfq_blkiocg_update_avg_queue_size_stats(cfqg_to_blkg(cfqq->cfqg), | 2022 | cfqg_stats_update_avg_queue_size(cfqq->cfqg); |
2039 | &blkio_policy_cfq); | ||
2040 | cfqq->slice_start = 0; | 2023 | cfqq->slice_start = 0; |
2041 | cfqq->dispatch_start = jiffies; | 2024 | cfqq->dispatch_start = jiffies; |
2042 | cfqq->allocated_slice = 0; | 2025 | cfqq->allocated_slice = 0; |
@@ -2384,8 +2367,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
2384 | sl = cfqd->cfq_slice_idle; | 2367 | sl = cfqd->cfq_slice_idle; |
2385 | 2368 | ||
2386 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); | 2369 | mod_timer(&cfqd->idle_slice_timer, jiffies + sl); |
2387 | cfq_blkiocg_update_set_idle_time_stats(cfqg_to_blkg(cfqq->cfqg), | 2370 | cfqg_stats_set_start_idle_time(cfqq->cfqg); |
2388 | &blkio_policy_cfq); | ||
2389 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, | 2371 | cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, |
2390 | group_idle ? 1 : 0); | 2372 | group_idle ? 1 : 0); |
2391 | } | 2373 | } |
@@ -2408,9 +2390,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
2408 | 2390 | ||
2409 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; | 2391 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
2410 | cfqq->nr_sectors += blk_rq_sectors(rq); | 2392 | cfqq->nr_sectors += blk_rq_sectors(rq); |
2411 | cfq_blkiocg_update_dispatch_stats(cfqg_to_blkg(cfqq->cfqg), | 2393 | cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags); |
2412 | &blkio_policy_cfq, blk_rq_bytes(rq), | ||
2413 | rq_data_dir(rq), rq_is_sync(rq)); | ||
2414 | } | 2394 | } |
2415 | 2395 | ||
2416 | /* | 2396 | /* |
@@ -3513,9 +3493,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3513 | cfq_clear_cfqq_wait_request(cfqq); | 3493 | cfq_clear_cfqq_wait_request(cfqq); |
3514 | __blk_run_queue(cfqd->queue); | 3494 | __blk_run_queue(cfqd->queue); |
3515 | } else { | 3495 | } else { |
3516 | cfq_blkiocg_update_idle_time_stats( | 3496 | cfqg_stats_update_idle_time(cfqq->cfqg); |
3517 | cfqg_to_blkg(cfqq->cfqg), | ||
3518 | &blkio_policy_cfq); | ||
3519 | cfq_mark_cfqq_must_dispatch(cfqq); | 3497 | cfq_mark_cfqq_must_dispatch(cfqq); |
3520 | } | 3498 | } |
3521 | } | 3499 | } |
@@ -3542,10 +3520,8 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
3542 | rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); | 3520 | rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); |
3543 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 3521 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
3544 | cfq_add_rq_rb(rq); | 3522 | cfq_add_rq_rb(rq); |
3545 | cfq_blkiocg_update_io_add_stats(cfqg_to_blkg(RQ_CFQG(rq)), | 3523 | cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, |
3546 | &blkio_policy_cfq, | 3524 | rq->cmd_flags); |
3547 | cfqg_to_blkg(cfqd->serving_group), | ||
3548 | rq_data_dir(rq), rq_is_sync(rq)); | ||
3549 | cfq_rq_enqueued(cfqd, cfqq, rq); | 3525 | cfq_rq_enqueued(cfqd, cfqq, rq); |
3550 | } | 3526 | } |
3551 | 3527 | ||
@@ -3641,10 +3617,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
3641 | cfqd->rq_in_driver--; | 3617 | cfqd->rq_in_driver--; |
3642 | cfqq->dispatched--; | 3618 | cfqq->dispatched--; |
3643 | (RQ_CFQG(rq))->dispatched--; | 3619 | (RQ_CFQG(rq))->dispatched--; |
3644 | cfq_blkiocg_update_completion_stats(cfqg_to_blkg(cfqq->cfqg), | 3620 | cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), |
3645 | &blkio_policy_cfq, rq_start_time_ns(rq), | 3621 | rq_io_start_time_ns(rq), rq->cmd_flags); |
3646 | rq_io_start_time_ns(rq), rq_data_dir(rq), | ||
3647 | rq_is_sync(rq)); | ||
3648 | 3622 | ||
3649 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; | 3623 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; |
3650 | 3624 | ||
@@ -4184,6 +4158,7 @@ static struct elevator_type iosched_cfq = { | |||
4184 | static struct blkio_policy_type blkio_policy_cfq = { | 4158 | static struct blkio_policy_type blkio_policy_cfq = { |
4185 | .ops = { | 4159 | .ops = { |
4186 | .blkio_init_group_fn = cfq_init_blkio_group, | 4160 | .blkio_init_group_fn = cfq_init_blkio_group, |
4161 | .blkio_reset_group_stats_fn = cfqg_stats_reset, | ||
4187 | }, | 4162 | }, |
4188 | .plid = BLKIO_POLICY_PROP, | 4163 | .plid = BLKIO_POLICY_PROP, |
4189 | .pdata_size = sizeof(struct cfq_group), | 4164 | .pdata_size = sizeof(struct cfq_group), |