aboutsummaryrefslogtreecommitdiffstats
path: root/block/cfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
committerTejun Heo <tj@kernel.org>2012-04-01 17:38:44 -0400
commit629ed0b10209ffc4e1d439e5508d52d5e3a090b8 (patch)
tree69caa493e4ea0714aff247c8415c4fef7ebfe996 /block/cfq-iosched.c
parent2ce4d50f9cfab40831eee5e51e950d5c4724994b (diff)
blkcg: move statistics update code to policies
As with conf/stats file handling code, there's no reason for stat update code to live in blkcg core with policies calling into update them. The current organization is both inflexible and complex. This patch moves stat update code to specific policies. All blkiocg_update_*_stats() functions which deal with BLKIO_POLICY_PROP stats are collapsed into their cfq_blkiocg_update_*_stats() counterparts. blkiocg_update_dispatch_stats() is used by both policies and duplicated as throtl_update_dispatch_stats() and cfq_blkiocg_update_dispatch_stats(). This will be cleaned up later. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r--block/cfq-iosched.c280
1 files changed, 229 insertions, 51 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 2e13e9e689bd..49913804e8dd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -15,6 +15,7 @@
15#include <linux/ioprio.h> 15#include <linux/ioprio.h>
16#include <linux/blktrace_api.h> 16#include <linux/blktrace_api.h>
17#include "blk.h" 17#include "blk.h"
18#include "blk-cgroup.h"
18 19
19static struct blkio_policy_type blkio_policy_cfq; 20static struct blkio_policy_type blkio_policy_cfq;
20 21
@@ -365,9 +366,177 @@ CFQ_CFQQ_FNS(deep);
365CFQ_CFQQ_FNS(wait_busy); 366CFQ_CFQQ_FNS(wait_busy);
366#undef CFQ_CFQQ_FNS 367#undef CFQ_CFQQ_FNS
367 368
368#ifdef CONFIG_CFQ_GROUP_IOSCHED 369#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
369 370
370#include "blk-cgroup.h" 371/* blkg state flags */
372enum blkg_state_flags {
373 BLKG_waiting = 0,
374 BLKG_idling,
375 BLKG_empty,
376};
377
378#define BLKG_FLAG_FNS(name) \
379static inline void blkio_mark_blkg_##name( \
380 struct blkio_group_stats *stats) \
381{ \
382 stats->flags |= (1 << BLKG_##name); \
383} \
384static inline void blkio_clear_blkg_##name( \
385 struct blkio_group_stats *stats) \
386{ \
387 stats->flags &= ~(1 << BLKG_##name); \
388} \
389static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \
390{ \
391 return (stats->flags & (1 << BLKG_##name)) != 0; \
392} \
393
394BLKG_FLAG_FNS(waiting)
395BLKG_FLAG_FNS(idling)
396BLKG_FLAG_FNS(empty)
397#undef BLKG_FLAG_FNS
398
399/* This should be called with the queue_lock held. */
400static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
401{
402 unsigned long long now;
403
404 if (!blkio_blkg_waiting(stats))
405 return;
406
407 now = sched_clock();
408 if (time_after64(now, stats->start_group_wait_time))
409 blkg_stat_add(&stats->group_wait_time,
410 now - stats->start_group_wait_time);
411 blkio_clear_blkg_waiting(stats);
412}
413
414/* This should be called with the queue_lock held. */
415static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
416 struct blkio_policy_type *pol,
417 struct blkio_group *curr_blkg)
418{
419 struct blkg_policy_data *pd = blkg->pd[pol->plid];
420
421 if (blkio_blkg_waiting(&pd->stats))
422 return;
423 if (blkg == curr_blkg)
424 return;
425 pd->stats.start_group_wait_time = sched_clock();
426 blkio_mark_blkg_waiting(&pd->stats);
427}
428
429/* This should be called with the queue_lock held. */
430static void blkio_end_empty_time(struct blkio_group_stats *stats)
431{
432 unsigned long long now;
433
434 if (!blkio_blkg_empty(stats))
435 return;
436
437 now = sched_clock();
438 if (time_after64(now, stats->start_empty_time))
439 blkg_stat_add(&stats->empty_time,
440 now - stats->start_empty_time);
441 blkio_clear_blkg_empty(stats);
442}
443
444static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
445 struct blkio_policy_type *pol,
446 unsigned long dequeue)
447{
448 struct blkg_policy_data *pd = blkg->pd[pol->plid];
449
450 lockdep_assert_held(blkg->q->queue_lock);
451
452 blkg_stat_add(&pd->stats.dequeue, dequeue);
453}
454
455static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
456 struct blkio_policy_type *pol)
457{
458 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
459
460 lockdep_assert_held(blkg->q->queue_lock);
461
462 if (blkg_rwstat_sum(&stats->queued))
463 return;
464
465 /*
466 * group is already marked empty. This can happen if cfqq got new
467 * request in parent group and moved to this group while being added
468 * to service tree. Just ignore the event and move on.
469 */
470 if (blkio_blkg_empty(stats))
471 return;
472
473 stats->start_empty_time = sched_clock();
474 blkio_mark_blkg_empty(stats);
475}
476
477static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
478 struct blkio_policy_type *pol)
479{
480 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
481
482 lockdep_assert_held(blkg->q->queue_lock);
483
484 if (blkio_blkg_idling(stats)) {
485 unsigned long long now = sched_clock();
486
487 if (time_after64(now, stats->start_idle_time))
488 blkg_stat_add(&stats->idle_time,
489 now - stats->start_idle_time);
490 blkio_clear_blkg_idling(stats);
491 }
492}
493
494static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
495 struct blkio_policy_type *pol)
496{
497 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
498
499 lockdep_assert_held(blkg->q->queue_lock);
500 BUG_ON(blkio_blkg_idling(stats));
501
502 stats->start_idle_time = sched_clock();
503 blkio_mark_blkg_idling(stats);
504}
505
506static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
507 struct blkio_policy_type *pol)
508{
509 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
510
511 lockdep_assert_held(blkg->q->queue_lock);
512
513 blkg_stat_add(&stats->avg_queue_size_sum,
514 blkg_rwstat_sum(&stats->queued));
515 blkg_stat_add(&stats->avg_queue_size_samples, 1);
516 blkio_update_group_wait_time(stats);
517}
518
519#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
520
521static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
522 struct blkio_policy_type *pol,
523 struct blkio_group *curr_blkg) { }
524static void blkio_end_empty_time(struct blkio_group_stats *stats) { }
525static void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
526 struct blkio_policy_type *pol,
527 unsigned long dequeue) { }
528static void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
529 struct blkio_policy_type *pol) { }
530static void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
531 struct blkio_policy_type *pol) { }
532static void cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
533 struct blkio_policy_type *pol) { }
534static void cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
535 struct blkio_policy_type *pol) { }
536
537#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
538
539#ifdef CONFIG_CFQ_GROUP_IOSCHED
371 540
372static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) 541static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
373{ 542{
@@ -403,75 +572,98 @@ static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
403 struct blkio_group *curr_blkg, 572 struct blkio_group *curr_blkg,
404 bool direction, bool sync) 573 bool direction, bool sync)
405{ 574{
406 blkiocg_update_io_add_stats(blkg, pol, curr_blkg, direction, sync); 575 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
407} 576 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
408 577
409static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, 578 lockdep_assert_held(blkg->q->queue_lock);
410 struct blkio_policy_type *pol, unsigned long dequeue) 579
411{ 580 blkg_rwstat_add(&stats->queued, rw, 1);
412 blkiocg_update_dequeue_stats(blkg, pol, dequeue); 581 blkio_end_empty_time(stats);
582 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
413} 583}
414 584
415static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 585static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
416 struct blkio_policy_type *pol, unsigned long time, 586 struct blkio_policy_type *pol, unsigned long time,
417 unsigned long unaccounted_time) 587 unsigned long unaccounted_time)
418{ 588{
419 blkiocg_update_timeslice_used(blkg, pol, time, unaccounted_time); 589 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
420}
421 590
422static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg, 591 lockdep_assert_held(blkg->q->queue_lock);
423 struct blkio_policy_type *pol) 592
424{ 593 blkg_stat_add(&stats->time, time);
425 blkiocg_set_start_empty_time(blkg, pol); 594#ifdef CONFIG_DEBUG_BLK_CGROUP
595 blkg_stat_add(&stats->unaccounted_time, unaccounted_time);
596#endif
426} 597}
427 598
428static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 599static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
429 struct blkio_policy_type *pol, bool direction, 600 struct blkio_policy_type *pol, bool direction,
430 bool sync) 601 bool sync)
431{ 602{
432 blkiocg_update_io_remove_stats(blkg, pol, direction, sync); 603 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
604 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
605
606 lockdep_assert_held(blkg->q->queue_lock);
607
608 blkg_rwstat_add(&stats->queued, rw, -1);
433} 609}
434 610
435static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, 611static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
436 struct blkio_policy_type *pol, bool direction, 612 struct blkio_policy_type *pol, bool direction,
437 bool sync) 613 bool sync)
438{ 614{
439 blkiocg_update_io_merged_stats(blkg, pol, direction, sync); 615 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
440} 616 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
441 617
442static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg, 618 lockdep_assert_held(blkg->q->queue_lock);
443 struct blkio_policy_type *pol)
444{
445 blkiocg_update_idle_time_stats(blkg, pol);
446}
447 619
448static inline void 620 blkg_rwstat_add(&stats->merged, rw, 1);
449cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
450 struct blkio_policy_type *pol)
451{
452 blkiocg_update_avg_queue_size_stats(blkg, pol);
453}
454
455static inline void
456cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
457 struct blkio_policy_type *pol)
458{
459 blkiocg_update_set_idle_time_stats(blkg, pol);
460} 621}
461 622
462static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, 623static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
463 struct blkio_policy_type *pol, uint64_t bytes, 624 struct blkio_policy_type *pol, uint64_t bytes,
464 bool direction, bool sync) 625 bool direction, bool sync)
465{ 626{
466 blkiocg_update_dispatch_stats(blkg, pol, bytes, direction, sync); 627 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
628 struct blkg_policy_data *pd = blkg->pd[pol->plid];
629 struct blkio_group_stats_cpu *stats_cpu;
630 unsigned long flags;
631
632 /* If per cpu stats are not allocated yet, don't do any accounting. */
633 if (pd->stats_cpu == NULL)
634 return;
635
636 /*
637 * Disabling interrupts to provide mutual exclusion between two
638 * writes on same cpu. It probably is not needed for 64bit. Not
639 * optimizing that case yet.
640 */
641 local_irq_save(flags);
642
643 stats_cpu = this_cpu_ptr(pd->stats_cpu);
644
645 blkg_stat_add(&stats_cpu->sectors, bytes >> 9);
646 blkg_rwstat_add(&stats_cpu->serviced, rw, 1);
647 blkg_rwstat_add(&stats_cpu->service_bytes, rw, bytes);
648
649 local_irq_restore(flags);
467} 650}
468 651
469static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, 652static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg,
470 struct blkio_policy_type *pol, uint64_t start_time, 653 struct blkio_policy_type *pol, uint64_t start_time,
471 uint64_t io_start_time, bool direction, bool sync) 654 uint64_t io_start_time, bool direction, bool sync)
472{ 655{
473 blkiocg_update_completion_stats(blkg, pol, start_time, io_start_time, 656 struct blkio_group_stats *stats = &blkg->pd[pol->plid]->stats;
474 direction, sync); 657 unsigned long long now = sched_clock();
658 int rw = (direction ? REQ_WRITE : 0) | (sync ? REQ_SYNC : 0);
659
660 lockdep_assert_held(blkg->q->queue_lock);
661
662 if (time_after64(now, io_start_time))
663 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
664 if (time_after64(io_start_time, start_time))
665 blkg_rwstat_add(&stats->wait_time, rw,
666 io_start_time - start_time);
475} 667}
476 668
477#else /* CONFIG_CFQ_GROUP_IOSCHED */ 669#else /* CONFIG_CFQ_GROUP_IOSCHED */
@@ -489,29 +681,15 @@ static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg,
489 struct blkio_policy_type *pol, 681 struct blkio_policy_type *pol,
490 struct blkio_group *curr_blkg, bool direction, 682 struct blkio_group *curr_blkg, bool direction,
491 bool sync) { } 683 bool sync) { }
492static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg,
493 struct blkio_policy_type *pol, unsigned long dequeue) { }
494static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, 684static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg,
495 struct blkio_policy_type *pol, unsigned long time, 685 struct blkio_policy_type *pol, unsigned long time,
496 unsigned long unaccounted_time) { } 686 unsigned long unaccounted_time) { }
497static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg,
498 struct blkio_policy_type *pol) { }
499static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, 687static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg,
500 struct blkio_policy_type *pol, bool direction, 688 struct blkio_policy_type *pol, bool direction,
501 bool sync) { } 689 bool sync) { }
502static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, 690static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg,
503 struct blkio_policy_type *pol, bool direction, 691 struct blkio_policy_type *pol, bool direction,
504 bool sync) { } 692 bool sync) { }
505static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg,
506 struct blkio_policy_type *pol) { }
507static inline void
508cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
509 struct blkio_policy_type *pol) { }
510
511static inline void
512cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
513 struct blkio_policy_type *pol) { }
514
515static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, 693static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg,
516 struct blkio_policy_type *pol, uint64_t bytes, 694 struct blkio_policy_type *pol, uint64_t bytes,
517 bool direction, bool sync) { } 695 bool direction, bool sync) { }