diff options
-rw-r--r-- | block/blk-cgroup.c | 10 | ||||
-rw-r--r-- | block/cfq-iosched.c | 67 | ||||
-rw-r--r-- | include/linux/blk-cgroup.h | 46 |
3 files changed, 57 insertions, 66 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 097c4a670fa4..ff79b52d1a0e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -584,7 +584,7 @@ EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); | |||
584 | * @off: offset to the blkg_stat in @pd | 584 | * @off: offset to the blkg_stat in @pd |
585 | * | 585 | * |
586 | * Collect the blkg_stat specified by @off from @pd and all its online | 586 | * Collect the blkg_stat specified by @off from @pd and all its online |
587 | * descendants and return the sum. The caller must be holding the queue | 587 | * descendants and their aux counts. The caller must be holding the queue |
588 | * lock for online tests. | 588 | * lock for online tests. |
589 | */ | 589 | */ |
590 | u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) | 590 | u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) |
@@ -602,7 +602,8 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off) | |||
602 | struct blkg_stat *stat = (void *)pos_pd + off; | 602 | struct blkg_stat *stat = (void *)pos_pd + off; |
603 | 603 | ||
604 | if (pos_blkg->online) | 604 | if (pos_blkg->online) |
605 | sum += blkg_stat_read(stat); | 605 | sum += blkg_stat_read(stat) + |
606 | atomic64_read(&stat->aux_cnt); | ||
606 | } | 607 | } |
607 | rcu_read_unlock(); | 608 | rcu_read_unlock(); |
608 | 609 | ||
@@ -616,7 +617,7 @@ EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); | |||
616 | * @off: offset to the blkg_stat in @pd | 617 | * @off: offset to the blkg_stat in @pd |
617 | * | 618 | * |
618 | * Collect the blkg_rwstat specified by @off from @pd and all its online | 619 | * Collect the blkg_rwstat specified by @off from @pd and all its online |
619 | * descendants and return the sum. The caller must be holding the queue | 620 | * descendants and their aux counts. The caller must be holding the queue |
620 | * lock for online tests. | 621 | * lock for online tests. |
621 | */ | 622 | */ |
622 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, | 623 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, |
@@ -642,7 +643,8 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, | |||
642 | tmp = blkg_rwstat_read(rwstat); | 643 | tmp = blkg_rwstat_read(rwstat); |
643 | 644 | ||
644 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | 645 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
645 | sum.cnt[i] += tmp.cnt[i]; | 646 | sum.cnt[i] += tmp.cnt[i] + |
647 | atomic64_read(&rwstat->aux_cnt[i]); | ||
646 | } | 648 | } |
647 | rcu_read_unlock(); | 649 | rcu_read_unlock(); |
648 | 650 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 0994f3b523a8..b272cfff7364 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -304,7 +304,6 @@ struct cfq_group { | |||
304 | int dispatched; | 304 | int dispatched; |
305 | struct cfq_ttime ttime; | 305 | struct cfq_ttime ttime; |
306 | struct cfqg_stats stats; /* stats for this cfqg */ | 306 | struct cfqg_stats stats; /* stats for this cfqg */ |
307 | struct cfqg_stats dead_stats; /* stats pushed from dead children */ | ||
308 | 307 | ||
309 | /* async queue for each priority case */ | 308 | /* async queue for each priority case */ |
310 | struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; | 309 | struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; |
@@ -736,28 +735,28 @@ static void cfqg_stats_reset(struct cfqg_stats *stats) | |||
736 | } | 735 | } |
737 | 736 | ||
738 | /* @to += @from */ | 737 | /* @to += @from */ |
739 | static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from) | 738 | static void cfqg_stats_add_aux(struct cfqg_stats *to, struct cfqg_stats *from) |
740 | { | 739 | { |
741 | /* queued stats shouldn't be cleared */ | 740 | /* queued stats shouldn't be cleared */ |
742 | blkg_rwstat_merge(&to->service_bytes, &from->service_bytes); | 741 | blkg_rwstat_add_aux(&to->service_bytes, &from->service_bytes); |
743 | blkg_rwstat_merge(&to->serviced, &from->serviced); | 742 | blkg_rwstat_add_aux(&to->serviced, &from->serviced); |
744 | blkg_rwstat_merge(&to->merged, &from->merged); | 743 | blkg_rwstat_add_aux(&to->merged, &from->merged); |
745 | blkg_rwstat_merge(&to->service_time, &from->service_time); | 744 | blkg_rwstat_add_aux(&to->service_time, &from->service_time); |
746 | blkg_rwstat_merge(&to->wait_time, &from->wait_time); | 745 | blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); |
747 | blkg_stat_merge(&from->time, &from->time); | 746 | blkg_stat_add_aux(&from->time, &from->time); |
748 | #ifdef CONFIG_DEBUG_BLK_CGROUP | 747 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
749 | blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time); | 748 | blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time); |
750 | blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum); | 749 | blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); |
751 | blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples); | 750 | blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples); |
752 | blkg_stat_merge(&to->dequeue, &from->dequeue); | 751 | blkg_stat_add_aux(&to->dequeue, &from->dequeue); |
753 | blkg_stat_merge(&to->group_wait_time, &from->group_wait_time); | 752 | blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); |
754 | blkg_stat_merge(&to->idle_time, &from->idle_time); | 753 | blkg_stat_add_aux(&to->idle_time, &from->idle_time); |
755 | blkg_stat_merge(&to->empty_time, &from->empty_time); | 754 | blkg_stat_add_aux(&to->empty_time, &from->empty_time); |
756 | #endif | 755 | #endif |
757 | } | 756 | } |
758 | 757 | ||
759 | /* | 758 | /* |
760 | * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors' | 759 | * Transfer @cfqg's stats to its parent's aux counts so that the ancestors' |
761 | * recursive stats can still account for the amount used by this cfqg after | 760 | * recursive stats can still account for the amount used by this cfqg after |
762 | * it's gone. | 761 | * it's gone. |
763 | */ | 762 | */ |
@@ -770,10 +769,8 @@ static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) | |||
770 | if (unlikely(!parent)) | 769 | if (unlikely(!parent)) |
771 | return; | 770 | return; |
772 | 771 | ||
773 | cfqg_stats_merge(&parent->dead_stats, &cfqg->stats); | 772 | cfqg_stats_add_aux(&parent->stats, &cfqg->stats); |
774 | cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats); | ||
775 | cfqg_stats_reset(&cfqg->stats); | 773 | cfqg_stats_reset(&cfqg->stats); |
776 | cfqg_stats_reset(&cfqg->dead_stats); | ||
777 | } | 774 | } |
778 | 775 | ||
779 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ | 776 | #else /* CONFIG_CFQ_GROUP_IOSCHED */ |
@@ -1606,7 +1603,6 @@ static struct blkg_policy_data *cfq_pd_alloc(gfp_t gfp, int node) | |||
1606 | 1603 | ||
1607 | cfq_init_cfqg_base(cfqg); | 1604 | cfq_init_cfqg_base(cfqg); |
1608 | cfqg_stats_init(&cfqg->stats); | 1605 | cfqg_stats_init(&cfqg->stats); |
1609 | cfqg_stats_init(&cfqg->dead_stats); | ||
1610 | 1606 | ||
1611 | return &cfqg->pd; | 1607 | return &cfqg->pd; |
1612 | } | 1608 | } |
@@ -1649,38 +1645,11 @@ static void cfq_pd_free(struct blkg_policy_data *pd) | |||
1649 | return kfree(pd); | 1645 | return kfree(pd); |
1650 | } | 1646 | } |
1651 | 1647 | ||
1652 | /* offset delta from cfqg->stats to cfqg->dead_stats */ | ||
1653 | static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) - | ||
1654 | offsetof(struct cfq_group, stats); | ||
1655 | |||
1656 | /* to be used by recursive prfill, sums live and dead stats recursively */ | ||
1657 | static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off) | ||
1658 | { | ||
1659 | u64 sum = 0; | ||
1660 | |||
1661 | sum += blkg_stat_recursive_sum(pd, off); | ||
1662 | sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta); | ||
1663 | return sum; | ||
1664 | } | ||
1665 | |||
1666 | /* to be used by recursive prfill, sums live and dead rwstats recursively */ | ||
1667 | static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, | ||
1668 | int off) | ||
1669 | { | ||
1670 | struct blkg_rwstat a, b; | ||
1671 | |||
1672 | a = blkg_rwstat_recursive_sum(pd, off); | ||
1673 | b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta); | ||
1674 | blkg_rwstat_merge(&a, &b); | ||
1675 | return a; | ||
1676 | } | ||
1677 | |||
1678 | static void cfq_pd_reset_stats(struct blkg_policy_data *pd) | 1648 | static void cfq_pd_reset_stats(struct blkg_policy_data *pd) |
1679 | { | 1649 | { |
1680 | struct cfq_group *cfqg = pd_to_cfqg(pd); | 1650 | struct cfq_group *cfqg = pd_to_cfqg(pd); |
1681 | 1651 | ||
1682 | cfqg_stats_reset(&cfqg->stats); | 1652 | cfqg_stats_reset(&cfqg->stats); |
1683 | cfqg_stats_reset(&cfqg->dead_stats); | ||
1684 | } | 1653 | } |
1685 | 1654 | ||
1686 | static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd, | 1655 | static struct cfq_group *cfq_lookup_cfqg(struct cfq_data *cfqd, |
@@ -1883,7 +1852,7 @@ static int cfqg_print_rwstat(struct seq_file *sf, void *v) | |||
1883 | static u64 cfqg_prfill_stat_recursive(struct seq_file *sf, | 1852 | static u64 cfqg_prfill_stat_recursive(struct seq_file *sf, |
1884 | struct blkg_policy_data *pd, int off) | 1853 | struct blkg_policy_data *pd, int off) |
1885 | { | 1854 | { |
1886 | u64 sum = cfqg_stat_pd_recursive_sum(pd, off); | 1855 | u64 sum = blkg_stat_recursive_sum(pd, off); |
1887 | 1856 | ||
1888 | return __blkg_prfill_u64(sf, pd, sum); | 1857 | return __blkg_prfill_u64(sf, pd, sum); |
1889 | } | 1858 | } |
@@ -1891,7 +1860,7 @@ static u64 cfqg_prfill_stat_recursive(struct seq_file *sf, | |||
1891 | static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, | 1860 | static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, |
1892 | struct blkg_policy_data *pd, int off) | 1861 | struct blkg_policy_data *pd, int off) |
1893 | { | 1862 | { |
1894 | struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off); | 1863 | struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd, off); |
1895 | 1864 | ||
1896 | return __blkg_prfill_rwstat(sf, pd, &sum); | 1865 | return __blkg_prfill_rwstat(sf, pd, &sum); |
1897 | } | 1866 | } |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 4d1659c7f84b..e8092276af58 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
@@ -53,14 +53,20 @@ struct blkcg { | |||
53 | #endif | 53 | #endif |
54 | }; | 54 | }; |
55 | 55 | ||
56 | /* | ||
57 | * blkg_[rw]stat->aux_cnt is excluded for local stats but included for | ||
58 | * recursive. Used to carry stats of dead children. | ||
59 | */ | ||
56 | struct blkg_stat { | 60 | struct blkg_stat { |
57 | struct u64_stats_sync syncp; | 61 | struct u64_stats_sync syncp; |
58 | uint64_t cnt; | 62 | uint64_t cnt; |
63 | atomic64_t aux_cnt; | ||
59 | }; | 64 | }; |
60 | 65 | ||
61 | struct blkg_rwstat { | 66 | struct blkg_rwstat { |
62 | struct u64_stats_sync syncp; | 67 | struct u64_stats_sync syncp; |
63 | uint64_t cnt[BLKG_RWSTAT_NR]; | 68 | uint64_t cnt[BLKG_RWSTAT_NR]; |
69 | atomic64_t aux_cnt[BLKG_RWSTAT_NR]; | ||
64 | }; | 70 | }; |
65 | 71 | ||
66 | /* | 72 | /* |
@@ -483,6 +489,7 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl, | |||
483 | static inline void blkg_stat_init(struct blkg_stat *stat) | 489 | static inline void blkg_stat_init(struct blkg_stat *stat) |
484 | { | 490 | { |
485 | u64_stats_init(&stat->syncp); | 491 | u64_stats_init(&stat->syncp); |
492 | atomic64_set(&stat->aux_cnt, 0); | ||
486 | } | 493 | } |
487 | 494 | ||
488 | /** | 495 | /** |
@@ -504,8 +511,9 @@ static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) | |||
504 | * blkg_stat_read - read the current value of a blkg_stat | 511 | * blkg_stat_read - read the current value of a blkg_stat |
505 | * @stat: blkg_stat to read | 512 | * @stat: blkg_stat to read |
506 | * | 513 | * |
507 | * Read the current value of @stat. This function can be called without | 514 | * Read the current value of @stat. The returned value doesn't include the |
508 | * synchroniztion and takes care of u64 atomicity. | 515 | * aux count. This function can be called without synchroniztion and takes |
516 | * care of u64 atomicity. | ||
509 | */ | 517 | */ |
510 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) | 518 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) |
511 | { | 519 | { |
@@ -527,23 +535,31 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat) | |||
527 | static inline void blkg_stat_reset(struct blkg_stat *stat) | 535 | static inline void blkg_stat_reset(struct blkg_stat *stat) |
528 | { | 536 | { |
529 | stat->cnt = 0; | 537 | stat->cnt = 0; |
538 | atomic64_set(&stat->aux_cnt, 0); | ||
530 | } | 539 | } |
531 | 540 | ||
532 | /** | 541 | /** |
533 | * blkg_stat_merge - merge a blkg_stat into another | 542 | * blkg_stat_add_aux - add a blkg_stat into another's aux count |
534 | * @to: the destination blkg_stat | 543 | * @to: the destination blkg_stat |
535 | * @from: the source | 544 | * @from: the source |
536 | * | 545 | * |
537 | * Add @from's count to @to. | 546 | * Add @from's count including the aux one to @to's aux count. |
538 | */ | 547 | */ |
539 | static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) | 548 | static inline void blkg_stat_add_aux(struct blkg_stat *to, |
549 | struct blkg_stat *from) | ||
540 | { | 550 | { |
541 | blkg_stat_add(to, blkg_stat_read(from)); | 551 | atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt), |
552 | &to->aux_cnt); | ||
542 | } | 553 | } |
543 | 554 | ||
544 | static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) | 555 | static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) |
545 | { | 556 | { |
557 | int i; | ||
558 | |||
546 | u64_stats_init(&rwstat->syncp); | 559 | u64_stats_init(&rwstat->syncp); |
560 | |||
561 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | ||
562 | atomic64_set(&rwstat->aux_cnt[i], 0); | ||
547 | } | 563 | } |
548 | 564 | ||
549 | /** | 565 | /** |
@@ -614,26 +630,30 @@ static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) | |||
614 | */ | 630 | */ |
615 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) | 631 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) |
616 | { | 632 | { |
633 | int i; | ||
634 | |||
617 | memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); | 635 | memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); |
636 | |||
637 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | ||
638 | atomic64_set(&rwstat->aux_cnt[i], 0); | ||
618 | } | 639 | } |
619 | 640 | ||
620 | /** | 641 | /** |
621 | * blkg_rwstat_merge - merge a blkg_rwstat into another | 642 | * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count |
622 | * @to: the destination blkg_rwstat | 643 | * @to: the destination blkg_rwstat |
623 | * @from: the source | 644 | * @from: the source |
624 | * | 645 | * |
625 | * Add @from's counts to @to. | 646 | * Add @from's count including the aux one to @to's aux count. |
626 | */ | 647 | */ |
627 | static inline void blkg_rwstat_merge(struct blkg_rwstat *to, | 648 | static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, |
628 | struct blkg_rwstat *from) | 649 | struct blkg_rwstat *from) |
629 | { | 650 | { |
630 | struct blkg_rwstat v = blkg_rwstat_read(from); | 651 | struct blkg_rwstat v = blkg_rwstat_read(from); |
631 | int i; | 652 | int i; |
632 | 653 | ||
633 | u64_stats_update_begin(&to->syncp); | ||
634 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | 654 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
635 | to->cnt[i] += v.cnt[i]; | 655 | atomic64_add(v.cnt[i] + atomic64_read(&from->aux_cnt[i]), |
636 | u64_stats_update_end(&to->syncp); | 656 | &to->aux_cnt[i]); |
637 | } | 657 | } |
638 | 658 | ||
639 | #ifdef CONFIG_BLK_DEV_THROTTLING | 659 | #ifdef CONFIG_BLK_DEV_THROTTLING |