diff options
| -rw-r--r-- | include/linux/memcontrol.h | 54 | ||||
| -rw-r--r-- | mm/memcontrol.c | 205 |
2 files changed, 150 insertions, 109 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e35e6a651187..bc74d6a4407c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -128,6 +128,7 @@ struct mem_cgroup_per_node { | |||
| 128 | 128 | ||
| 129 | struct lruvec_stat __percpu *lruvec_stat_cpu; | 129 | struct lruvec_stat __percpu *lruvec_stat_cpu; |
| 130 | atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; | 130 | atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; |
| 131 | atomic_long_t lruvec_stat_local[NR_VM_NODE_STAT_ITEMS]; | ||
| 131 | 132 | ||
| 132 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; | 133 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; |
| 133 | 134 | ||
| @@ -279,8 +280,12 @@ struct mem_cgroup { | |||
| 279 | MEMCG_PADDING(_pad2_); | 280 | MEMCG_PADDING(_pad2_); |
| 280 | 281 | ||
| 281 | atomic_long_t vmstats[MEMCG_NR_STAT]; | 282 | atomic_long_t vmstats[MEMCG_NR_STAT]; |
| 283 | atomic_long_t vmstats_local[MEMCG_NR_STAT]; | ||
| 284 | |||
| 282 | atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; | 285 | atomic_long_t vmevents[NR_VM_EVENT_ITEMS]; |
| 283 | atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; | 286 | atomic_long_t vmevents_local[NR_VM_EVENT_ITEMS]; |
| 287 | |||
| 288 | atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS]; | ||
| 284 | 289 | ||
| 285 | unsigned long socket_pressure; | 290 | unsigned long socket_pressure; |
| 286 | 291 | ||
| @@ -554,10 +559,24 @@ void unlock_page_memcg(struct page *page); | |||
| 554 | * idx can be of type enum memcg_stat_item or node_stat_item. | 559 | * idx can be of type enum memcg_stat_item or node_stat_item. |
| 555 | * Keep in sync with memcg_exact_page_state(). | 560 | * Keep in sync with memcg_exact_page_state(). |
| 556 | */ | 561 | */ |
| 562 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) | ||
| 563 | { | ||
| 564 | long x = atomic_long_read(&memcg->vmstats[idx]); | ||
| 565 | #ifdef CONFIG_SMP | ||
| 566 | if (x < 0) | ||
| 567 | x = 0; | ||
| 568 | #endif | ||
| 569 | return x; | ||
| 570 | } | ||
| 571 | |||
| 572 | /* | ||
| 573 | * idx can be of type enum memcg_stat_item or node_stat_item. | ||
| 574 | * Keep in sync with memcg_exact_page_state(). | ||
| 575 | */ | ||
| 557 | static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, | 576 | static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, |
| 558 | int idx) | 577 | int idx) |
| 559 | { | 578 | { |
| 560 | long x = atomic_long_read(&memcg->vmstats[idx]); | 579 | long x = atomic_long_read(&memcg->vmstats_local[idx]); |
| 561 | #ifdef CONFIG_SMP | 580 | #ifdef CONFIG_SMP |
| 562 | if (x < 0) | 581 | if (x < 0) |
| 563 | x = 0; | 582 | x = 0; |
| @@ -609,6 +628,24 @@ static inline void mod_memcg_page_state(struct page *page, | |||
| 609 | mod_memcg_state(page->mem_cgroup, idx, val); | 628 | mod_memcg_state(page->mem_cgroup, idx, val); |
| 610 | } | 629 | } |
| 611 | 630 | ||
| 631 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, | ||
| 632 | enum node_stat_item idx) | ||
| 633 | { | ||
| 634 | struct mem_cgroup_per_node *pn; | ||
| 635 | long x; | ||
| 636 | |||
| 637 | if (mem_cgroup_disabled()) | ||
| 638 | return node_page_state(lruvec_pgdat(lruvec), idx); | ||
| 639 | |||
| 640 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | ||
| 641 | x = atomic_long_read(&pn->lruvec_stat[idx]); | ||
| 642 | #ifdef CONFIG_SMP | ||
| 643 | if (x < 0) | ||
| 644 | x = 0; | ||
| 645 | #endif | ||
| 646 | return x; | ||
| 647 | } | ||
| 648 | |||
| 612 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, | 649 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
| 613 | enum node_stat_item idx) | 650 | enum node_stat_item idx) |
| 614 | { | 651 | { |
| @@ -619,7 +656,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, | |||
| 619 | return node_page_state(lruvec_pgdat(lruvec), idx); | 656 | return node_page_state(lruvec_pgdat(lruvec), idx); |
| 620 | 657 | ||
| 621 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | 658 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
| 622 | x = atomic_long_read(&pn->lruvec_stat[idx]); | 659 | x = atomic_long_read(&pn->lruvec_stat_local[idx]); |
| 623 | #ifdef CONFIG_SMP | 660 | #ifdef CONFIG_SMP |
| 624 | if (x < 0) | 661 | if (x < 0) |
| 625 | x = 0; | 662 | x = 0; |
| @@ -959,6 +996,11 @@ static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg) | |||
| 959 | { | 996 | { |
| 960 | } | 997 | } |
| 961 | 998 | ||
| 999 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) | ||
| 1000 | { | ||
| 1001 | return 0; | ||
| 1002 | } | ||
| 1003 | |||
| 962 | static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, | 1004 | static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg, |
| 963 | int idx) | 1005 | int idx) |
| 964 | { | 1006 | { |
| @@ -989,6 +1031,12 @@ static inline void mod_memcg_page_state(struct page *page, | |||
| 989 | { | 1031 | { |
| 990 | } | 1032 | } |
| 991 | 1033 | ||
| 1034 | static inline unsigned long lruvec_page_state(struct lruvec *lruvec, | ||
| 1035 | enum node_stat_item idx) | ||
| 1036 | { | ||
| 1037 | return node_page_state(lruvec_pgdat(lruvec), idx); | ||
| 1038 | } | ||
| 1039 | |||
| 992 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, | 1040 | static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, |
| 993 | enum node_stat_item idx) | 1041 | enum node_stat_item idx) |
| 994 | { | 1042 | { |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f09ca5c498a6..7a764cbf8495 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -702,12 +702,27 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) | |||
| 702 | 702 | ||
| 703 | x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); | 703 | x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); |
| 704 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { | 704 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { |
| 705 | atomic_long_add(x, &memcg->vmstats[idx]); | 705 | struct mem_cgroup *mi; |
| 706 | |||
| 707 | atomic_long_add(x, &memcg->vmstats_local[idx]); | ||
| 708 | for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) | ||
| 709 | atomic_long_add(x, &mi->vmstats[idx]); | ||
| 706 | x = 0; | 710 | x = 0; |
| 707 | } | 711 | } |
| 708 | __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); | 712 | __this_cpu_write(memcg->vmstats_percpu->stat[idx], x); |
| 709 | } | 713 | } |
| 710 | 714 | ||
| 715 | static struct mem_cgroup_per_node * | ||
| 716 | parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid) | ||
| 717 | { | ||
| 718 | struct mem_cgroup *parent; | ||
| 719 | |||
| 720 | parent = parent_mem_cgroup(pn->memcg); | ||
| 721 | if (!parent) | ||
| 722 | return NULL; | ||
| 723 | return mem_cgroup_nodeinfo(parent, nid); | ||
| 724 | } | ||
| 725 | |||
| 711 | /** | 726 | /** |
| 712 | * __mod_lruvec_state - update lruvec memory statistics | 727 | * __mod_lruvec_state - update lruvec memory statistics |
| 713 | * @lruvec: the lruvec | 728 | * @lruvec: the lruvec |
| @@ -721,24 +736,31 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) | |||
| 721 | void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, | 736 | void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, |
| 722 | int val) | 737 | int val) |
| 723 | { | 738 | { |
| 739 | pg_data_t *pgdat = lruvec_pgdat(lruvec); | ||
| 724 | struct mem_cgroup_per_node *pn; | 740 | struct mem_cgroup_per_node *pn; |
| 741 | struct mem_cgroup *memcg; | ||
| 725 | long x; | 742 | long x; |
| 726 | 743 | ||
| 727 | /* Update node */ | 744 | /* Update node */ |
| 728 | __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); | 745 | __mod_node_page_state(pgdat, idx, val); |
| 729 | 746 | ||
| 730 | if (mem_cgroup_disabled()) | 747 | if (mem_cgroup_disabled()) |
| 731 | return; | 748 | return; |
| 732 | 749 | ||
| 733 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | 750 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
| 751 | memcg = pn->memcg; | ||
| 734 | 752 | ||
| 735 | /* Update memcg */ | 753 | /* Update memcg */ |
| 736 | __mod_memcg_state(pn->memcg, idx, val); | 754 | __mod_memcg_state(memcg, idx, val); |
| 737 | 755 | ||
| 738 | /* Update lruvec */ | 756 | /* Update lruvec */ |
| 739 | x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); | 757 | x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); |
| 740 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { | 758 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { |
| 741 | atomic_long_add(x, &pn->lruvec_stat[idx]); | 759 | struct mem_cgroup_per_node *pi; |
| 760 | |||
| 761 | atomic_long_add(x, &pn->lruvec_stat_local[idx]); | ||
| 762 | for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) | ||
| 763 | atomic_long_add(x, &pi->lruvec_stat[idx]); | ||
| 742 | x = 0; | 764 | x = 0; |
| 743 | } | 765 | } |
| 744 | __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); | 766 | __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); |
| @@ -760,18 +782,26 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx, | |||
| 760 | 782 | ||
| 761 | x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); | 783 | x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); |
| 762 | if (unlikely(x > MEMCG_CHARGE_BATCH)) { | 784 | if (unlikely(x > MEMCG_CHARGE_BATCH)) { |
| 763 | atomic_long_add(x, &memcg->vmevents[idx]); | 785 | struct mem_cgroup *mi; |
| 786 | |||
| 787 | atomic_long_add(x, &memcg->vmevents_local[idx]); | ||
| 788 | for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) | ||
| 789 | atomic_long_add(x, &mi->vmevents[idx]); | ||
| 764 | x = 0; | 790 | x = 0; |
| 765 | } | 791 | } |
| 766 | __this_cpu_write(memcg->vmstats_percpu->events[idx], x); | 792 | __this_cpu_write(memcg->vmstats_percpu->events[idx], x); |
| 767 | } | 793 | } |
| 768 | 794 | ||
| 769 | static unsigned long memcg_events_local(struct mem_cgroup *memcg, | 795 | static unsigned long memcg_events(struct mem_cgroup *memcg, int event) |
| 770 | int event) | ||
| 771 | { | 796 | { |
| 772 | return atomic_long_read(&memcg->vmevents[event]); | 797 | return atomic_long_read(&memcg->vmevents[event]); |
| 773 | } | 798 | } |
| 774 | 799 | ||
| 800 | static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event) | ||
| 801 | { | ||
| 802 | return atomic_long_read(&memcg->vmevents_local[event]); | ||
| 803 | } | ||
| 804 | |||
| 775 | static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, | 805 | static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, |
| 776 | struct page *page, | 806 | struct page *page, |
| 777 | bool compound, int nr_pages) | 807 | bool compound, int nr_pages) |
| @@ -2157,7 +2187,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) | |||
| 2157 | static int memcg_hotplug_cpu_dead(unsigned int cpu) | 2187 | static int memcg_hotplug_cpu_dead(unsigned int cpu) |
| 2158 | { | 2188 | { |
| 2159 | struct memcg_stock_pcp *stock; | 2189 | struct memcg_stock_pcp *stock; |
| 2160 | struct mem_cgroup *memcg; | 2190 | struct mem_cgroup *memcg, *mi; |
| 2161 | 2191 | ||
| 2162 | stock = &per_cpu(memcg_stock, cpu); | 2192 | stock = &per_cpu(memcg_stock, cpu); |
| 2163 | drain_stock(stock); | 2193 | drain_stock(stock); |
| @@ -2170,8 +2200,11 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) | |||
| 2170 | long x; | 2200 | long x; |
| 2171 | 2201 | ||
| 2172 | x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); | 2202 | x = this_cpu_xchg(memcg->vmstats_percpu->stat[i], 0); |
| 2173 | if (x) | 2203 | if (x) { |
| 2174 | atomic_long_add(x, &memcg->vmstats[i]); | 2204 | atomic_long_add(x, &memcg->vmstats_local[i]); |
| 2205 | for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) | ||
| 2206 | atomic_long_add(x, &memcg->vmstats[i]); | ||
| 2207 | } | ||
| 2175 | 2208 | ||
| 2176 | if (i >= NR_VM_NODE_STAT_ITEMS) | 2209 | if (i >= NR_VM_NODE_STAT_ITEMS) |
| 2177 | continue; | 2210 | continue; |
| @@ -2181,8 +2214,12 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) | |||
| 2181 | 2214 | ||
| 2182 | pn = mem_cgroup_nodeinfo(memcg, nid); | 2215 | pn = mem_cgroup_nodeinfo(memcg, nid); |
| 2183 | x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); | 2216 | x = this_cpu_xchg(pn->lruvec_stat_cpu->count[i], 0); |
| 2184 | if (x) | 2217 | if (x) { |
| 2185 | atomic_long_add(x, &pn->lruvec_stat[i]); | 2218 | atomic_long_add(x, &pn->lruvec_stat_local[i]); |
| 2219 | do { | ||
| 2220 | atomic_long_add(x, &pn->lruvec_stat[i]); | ||
| 2221 | } while ((pn = parent_nodeinfo(pn, nid))); | ||
| 2222 | } | ||
| 2186 | } | 2223 | } |
| 2187 | } | 2224 | } |
| 2188 | 2225 | ||
| @@ -2190,8 +2227,11 @@ static int memcg_hotplug_cpu_dead(unsigned int cpu) | |||
| 2190 | long x; | 2227 | long x; |
| 2191 | 2228 | ||
| 2192 | x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); | 2229 | x = this_cpu_xchg(memcg->vmstats_percpu->events[i], 0); |
| 2193 | if (x) | 2230 | if (x) { |
| 2194 | atomic_long_add(x, &memcg->vmevents[i]); | 2231 | atomic_long_add(x, &memcg->vmevents_local[i]); |
| 2232 | for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) | ||
| 2233 | atomic_long_add(x, &memcg->vmevents[i]); | ||
| 2234 | } | ||
| 2195 | } | 2235 | } |
| 2196 | } | 2236 | } |
| 2197 | 2237 | ||
| @@ -3021,54 +3061,15 @@ static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css, | |||
| 3021 | return retval; | 3061 | return retval; |
| 3022 | } | 3062 | } |
| 3023 | 3063 | ||
| 3024 | struct accumulated_vmstats { | ||
| 3025 | unsigned long vmstats[MEMCG_NR_STAT]; | ||
| 3026 | unsigned long vmevents[NR_VM_EVENT_ITEMS]; | ||
| 3027 | unsigned long lru_pages[NR_LRU_LISTS]; | ||
| 3028 | |||
| 3029 | /* overrides for v1 */ | ||
| 3030 | const unsigned int *vmstats_array; | ||
| 3031 | const unsigned int *vmevents_array; | ||
| 3032 | |||
| 3033 | int vmstats_size; | ||
| 3034 | int vmevents_size; | ||
| 3035 | }; | ||
| 3036 | |||
| 3037 | static void accumulate_vmstats(struct mem_cgroup *memcg, | ||
| 3038 | struct accumulated_vmstats *acc) | ||
| 3039 | { | ||
| 3040 | struct mem_cgroup *mi; | ||
| 3041 | int i; | ||
| 3042 | |||
| 3043 | for_each_mem_cgroup_tree(mi, memcg) { | ||
| 3044 | for (i = 0; i < acc->vmstats_size; i++) | ||
| 3045 | acc->vmstats[i] += memcg_page_state_local(mi, | ||
| 3046 | acc->vmstats_array ? acc->vmstats_array[i] : i); | ||
| 3047 | |||
| 3048 | for (i = 0; i < acc->vmevents_size; i++) | ||
| 3049 | acc->vmevents[i] += memcg_events_local(mi, | ||
| 3050 | acc->vmevents_array | ||
| 3051 | ? acc->vmevents_array[i] : i); | ||
| 3052 | |||
| 3053 | for (i = 0; i < NR_LRU_LISTS; i++) | ||
| 3054 | acc->lru_pages[i] += memcg_page_state_local(mi, | ||
| 3055 | NR_LRU_BASE + i); | ||
| 3056 | } | ||
| 3057 | } | ||
| 3058 | |||
| 3059 | static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) | 3064 | static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) |
| 3060 | { | 3065 | { |
| 3061 | unsigned long val = 0; | 3066 | unsigned long val; |
| 3062 | 3067 | ||
| 3063 | if (mem_cgroup_is_root(memcg)) { | 3068 | if (mem_cgroup_is_root(memcg)) { |
| 3064 | struct mem_cgroup *iter; | 3069 | val = memcg_page_state(memcg, MEMCG_CACHE) + |
| 3065 | 3070 | memcg_page_state(memcg, MEMCG_RSS); | |
| 3066 | for_each_mem_cgroup_tree(iter, memcg) { | 3071 | if (swap) |
| 3067 | val += memcg_page_state_local(iter, MEMCG_CACHE); | 3072 | val += memcg_page_state(memcg, MEMCG_SWAP); |
| 3068 | val += memcg_page_state_local(iter, MEMCG_RSS); | ||
| 3069 | if (swap) | ||
| 3070 | val += memcg_page_state_local(iter, MEMCG_SWAP); | ||
| 3071 | } | ||
| 3072 | } else { | 3073 | } else { |
| 3073 | if (!swap) | 3074 | if (!swap) |
| 3074 | val = page_counter_read(&memcg->memory); | 3075 | val = page_counter_read(&memcg->memory); |
| @@ -3499,7 +3500,6 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |||
| 3499 | unsigned long memory, memsw; | 3500 | unsigned long memory, memsw; |
| 3500 | struct mem_cgroup *mi; | 3501 | struct mem_cgroup *mi; |
| 3501 | unsigned int i; | 3502 | unsigned int i; |
| 3502 | struct accumulated_vmstats acc; | ||
| 3503 | 3503 | ||
| 3504 | BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); | 3504 | BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats)); |
| 3505 | BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); | 3505 | BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS); |
| @@ -3533,27 +3533,21 @@ static int memcg_stat_show(struct seq_file *m, void *v) | |||
| 3533 | seq_printf(m, "hierarchical_memsw_limit %llu\n", | 3533 | seq_printf(m, "hierarchical_memsw_limit %llu\n", |
| 3534 | (u64)memsw * PAGE_SIZE); | 3534 | (u64)memsw * PAGE_SIZE); |
| 3535 | 3535 | ||
| 3536 | memset(&acc, 0, sizeof(acc)); | ||
| 3537 | acc.vmstats_size = ARRAY_SIZE(memcg1_stats); | ||
| 3538 | acc.vmstats_array = memcg1_stats; | ||
| 3539 | acc.vmevents_size = ARRAY_SIZE(memcg1_events); | ||
| 3540 | acc.vmevents_array = memcg1_events; | ||
| 3541 | accumulate_vmstats(memcg, &acc); | ||
| 3542 | |||
| 3543 | for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { | 3536 | for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { |
| 3544 | if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) | 3537 | if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) |
| 3545 | continue; | 3538 | continue; |
| 3546 | seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], | 3539 | seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], |
| 3547 | (u64)acc.vmstats[i] * PAGE_SIZE); | 3540 | (u64)memcg_page_state(memcg, i) * PAGE_SIZE); |
| 3548 | } | 3541 | } |
| 3549 | 3542 | ||
| 3550 | for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) | 3543 | for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) |
| 3551 | seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], | 3544 | seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], |
| 3552 | (u64)acc.vmevents[i]); | 3545 | (u64)memcg_events(memcg, i)); |
| 3553 | 3546 | ||
| 3554 | for (i = 0; i < NR_LRU_LISTS; i++) | 3547 | for (i = 0; i < NR_LRU_LISTS; i++) |
| 3555 | seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], | 3548 | seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], |
| 3556 | (u64)acc.lru_pages[i] * PAGE_SIZE); | 3549 | (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * |
| 3550 | PAGE_SIZE); | ||
| 3557 | 3551 | ||
| 3558 | #ifdef CONFIG_DEBUG_VM | 3552 | #ifdef CONFIG_DEBUG_VM |
| 3559 | { | 3553 | { |
| @@ -5646,7 +5640,6 @@ static int memory_events_show(struct seq_file *m, void *v) | |||
| 5646 | static int memory_stat_show(struct seq_file *m, void *v) | 5640 | static int memory_stat_show(struct seq_file *m, void *v) |
| 5647 | { | 5641 | { |
| 5648 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); | 5642 | struct mem_cgroup *memcg = mem_cgroup_from_seq(m); |
| 5649 | struct accumulated_vmstats acc; | ||
| 5650 | int i; | 5643 | int i; |
| 5651 | 5644 | ||
| 5652 | /* | 5645 | /* |
| @@ -5660,31 +5653,27 @@ static int memory_stat_show(struct seq_file *m, void *v) | |||
| 5660 | * Current memory state: | 5653 | * Current memory state: |
| 5661 | */ | 5654 | */ |
| 5662 | 5655 | ||
| 5663 | memset(&acc, 0, sizeof(acc)); | ||
| 5664 | acc.vmstats_size = MEMCG_NR_STAT; | ||
| 5665 | acc.vmevents_size = NR_VM_EVENT_ITEMS; | ||
| 5666 | accumulate_vmstats(memcg, &acc); | ||
| 5667 | |||
| 5668 | seq_printf(m, "anon %llu\n", | 5656 | seq_printf(m, "anon %llu\n", |
| 5669 | (u64)acc.vmstats[MEMCG_RSS] * PAGE_SIZE); | 5657 | (u64)memcg_page_state(memcg, MEMCG_RSS) * PAGE_SIZE); |
| 5670 | seq_printf(m, "file %llu\n", | 5658 | seq_printf(m, "file %llu\n", |
| 5671 | (u64)acc.vmstats[MEMCG_CACHE] * PAGE_SIZE); | 5659 | (u64)memcg_page_state(memcg, MEMCG_CACHE) * PAGE_SIZE); |
| 5672 | seq_printf(m, "kernel_stack %llu\n", | 5660 | seq_printf(m, "kernel_stack %llu\n", |
| 5673 | (u64)acc.vmstats[MEMCG_KERNEL_STACK_KB] * 1024); | 5661 | (u64)memcg_page_state(memcg, MEMCG_KERNEL_STACK_KB) * 1024); |
| 5674 | seq_printf(m, "slab %llu\n", | 5662 | seq_printf(m, "slab %llu\n", |
| 5675 | (u64)(acc.vmstats[NR_SLAB_RECLAIMABLE] + | 5663 | (u64)(memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) + |
| 5676 | acc.vmstats[NR_SLAB_UNRECLAIMABLE]) * PAGE_SIZE); | 5664 | memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE)) * |
| 5665 | PAGE_SIZE); | ||
| 5677 | seq_printf(m, "sock %llu\n", | 5666 | seq_printf(m, "sock %llu\n", |
| 5678 | (u64)acc.vmstats[MEMCG_SOCK] * PAGE_SIZE); | 5667 | (u64)memcg_page_state(memcg, MEMCG_SOCK) * PAGE_SIZE); |
| 5679 | 5668 | ||
| 5680 | seq_printf(m, "shmem %llu\n", | 5669 | seq_printf(m, "shmem %llu\n", |
| 5681 | (u64)acc.vmstats[NR_SHMEM] * PAGE_SIZE); | 5670 | (u64)memcg_page_state(memcg, NR_SHMEM) * PAGE_SIZE); |
| 5682 | seq_printf(m, "file_mapped %llu\n", | 5671 | seq_printf(m, "file_mapped %llu\n", |
| 5683 | (u64)acc.vmstats[NR_FILE_MAPPED] * PAGE_SIZE); | 5672 | (u64)memcg_page_state(memcg, NR_FILE_MAPPED) * PAGE_SIZE); |
| 5684 | seq_printf(m, "file_dirty %llu\n", | 5673 | seq_printf(m, "file_dirty %llu\n", |
| 5685 | (u64)acc.vmstats[NR_FILE_DIRTY] * PAGE_SIZE); | 5674 | (u64)memcg_page_state(memcg, NR_FILE_DIRTY) * PAGE_SIZE); |
| 5686 | seq_printf(m, "file_writeback %llu\n", | 5675 | seq_printf(m, "file_writeback %llu\n", |
| 5687 | (u64)acc.vmstats[NR_WRITEBACK] * PAGE_SIZE); | 5676 | (u64)memcg_page_state(memcg, NR_WRITEBACK) * PAGE_SIZE); |
| 5688 | 5677 | ||
| 5689 | /* | 5678 | /* |
| 5690 | * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter | 5679 | * TODO: We should eventually replace our own MEMCG_RSS_HUGE counter |
| @@ -5693,43 +5682,47 @@ static int memory_stat_show(struct seq_file *m, void *v) | |||
| 5693 | * where the page->mem_cgroup is set up and stable. | 5682 | * where the page->mem_cgroup is set up and stable. |
| 5694 | */ | 5683 | */ |
| 5695 | seq_printf(m, "anon_thp %llu\n", | 5684 | seq_printf(m, "anon_thp %llu\n", |
| 5696 | (u64)acc.vmstats[MEMCG_RSS_HUGE] * PAGE_SIZE); | 5685 | (u64)memcg_page_state(memcg, MEMCG_RSS_HUGE) * PAGE_SIZE); |
| 5697 | 5686 | ||
| 5698 | for (i = 0; i < NR_LRU_LISTS; i++) | 5687 | for (i = 0; i < NR_LRU_LISTS; i++) |
| 5699 | seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i], | 5688 | seq_printf(m, "%s %llu\n", mem_cgroup_lru_names[i], |
| 5700 | (u64)acc.lru_pages[i] * PAGE_SIZE); | 5689 | (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * |
| 5690 | PAGE_SIZE); | ||
| 5701 | 5691 | ||
| 5702 | seq_printf(m, "slab_reclaimable %llu\n", | 5692 | seq_printf(m, "slab_reclaimable %llu\n", |
| 5703 | (u64)acc.vmstats[NR_SLAB_RECLAIMABLE] * PAGE_SIZE); | 5693 | (u64)memcg_page_state(memcg, NR_SLAB_RECLAIMABLE) * |
| 5694 | PAGE_SIZE); | ||
| 5704 | seq_printf(m, "slab_unreclaimable %llu\n", | 5695 | seq_printf(m, "slab_unreclaimable %llu\n", |
| 5705 | (u64)acc.vmstats[NR_SLAB_UNRECLAIMABLE] * PAGE_SIZE); | 5696 | (u64)memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE) * |
| 5697 | PAGE_SIZE); | ||
| 5706 | 5698 | ||
| 5707 | /* Accumulated memory events */ | 5699 | /* Accumulated memory events */ |
| 5708 | 5700 | ||
| 5709 | seq_printf(m, "pgfault %lu\n", acc.vmevents[PGFAULT]); | 5701 | seq_printf(m, "pgfault %lu\n", memcg_events(memcg, PGFAULT)); |
| 5710 | seq_printf(m, "pgmajfault %lu\n", acc.vmevents[PGMAJFAULT]); | 5702 | seq_printf(m, "pgmajfault %lu\n", memcg_events(memcg, PGMAJFAULT)); |
| 5711 | 5703 | ||
| 5712 | seq_printf(m, "workingset_refault %lu\n", | 5704 | seq_printf(m, "workingset_refault %lu\n", |
| 5713 | acc.vmstats[WORKINGSET_REFAULT]); | 5705 | memcg_page_state(memcg, WORKINGSET_REFAULT)); |
| 5714 | seq_printf(m, "workingset_activate %lu\n", | 5706 | seq_printf(m, "workingset_activate %lu\n", |
| 5715 | acc.vmstats[WORKINGSET_ACTIVATE]); | 5707 | memcg_page_state(memcg, WORKINGSET_ACTIVATE)); |
| 5716 | seq_printf(m, "workingset_nodereclaim %lu\n", | 5708 | seq_printf(m, "workingset_nodereclaim %lu\n", |
| 5717 | acc.vmstats[WORKINGSET_NODERECLAIM]); | 5709 | memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); |
| 5718 | 5710 | ||
| 5719 | seq_printf(m, "pgrefill %lu\n", acc.vmevents[PGREFILL]); | 5711 | seq_printf(m, "pgrefill %lu\n", memcg_events(memcg, PGREFILL)); |
| 5720 | seq_printf(m, "pgscan %lu\n", acc.vmevents[PGSCAN_KSWAPD] + | 5712 | seq_printf(m, "pgscan %lu\n", memcg_events(memcg, PGSCAN_KSWAPD) + |
| 5721 | acc.vmevents[PGSCAN_DIRECT]); | 5713 | memcg_events(memcg, PGSCAN_DIRECT)); |
| 5722 | seq_printf(m, "pgsteal %lu\n", acc.vmevents[PGSTEAL_KSWAPD] + | 5714 | seq_printf(m, "pgsteal %lu\n", memcg_events(memcg, PGSTEAL_KSWAPD) + |
| 5723 | acc.vmevents[PGSTEAL_DIRECT]); | 5715 | memcg_events(memcg, PGSTEAL_DIRECT)); |
| 5724 | seq_printf(m, "pgactivate %lu\n", acc.vmevents[PGACTIVATE]); | 5716 | seq_printf(m, "pgactivate %lu\n", memcg_events(memcg, PGACTIVATE)); |
| 5725 | seq_printf(m, "pgdeactivate %lu\n", acc.vmevents[PGDEACTIVATE]); | 5717 | seq_printf(m, "pgdeactivate %lu\n", memcg_events(memcg, PGDEACTIVATE)); |
| 5726 | seq_printf(m, "pglazyfree %lu\n", acc.vmevents[PGLAZYFREE]); | 5718 | seq_printf(m, "pglazyfree %lu\n", memcg_events(memcg, PGLAZYFREE)); |
| 5727 | seq_printf(m, "pglazyfreed %lu\n", acc.vmevents[PGLAZYFREED]); | 5719 | seq_printf(m, "pglazyfreed %lu\n", memcg_events(memcg, PGLAZYFREED)); |
| 5728 | 5720 | ||
| 5729 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 5721 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 5730 | seq_printf(m, "thp_fault_alloc %lu\n", acc.vmevents[THP_FAULT_ALLOC]); | 5722 | seq_printf(m, "thp_fault_alloc %lu\n", |
| 5723 | memcg_events(memcg, THP_FAULT_ALLOC)); | ||
| 5731 | seq_printf(m, "thp_collapse_alloc %lu\n", | 5724 | seq_printf(m, "thp_collapse_alloc %lu\n", |
| 5732 | acc.vmevents[THP_COLLAPSE_ALLOC]); | 5725 | memcg_events(memcg, THP_COLLAPSE_ALLOC)); |
| 5733 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 5726 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 5734 | 5727 | ||
| 5735 | return 0; | 5728 | return 0; |
