aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 249671873aa9..cdbb7a84cb6e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -695,12 +695,15 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
695 if (mem_cgroup_disabled()) 695 if (mem_cgroup_disabled())
696 return; 696 return;
697 697
698 __this_cpu_add(memcg->vmstats_local->stat[idx], val);
699
700 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 698 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
701 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 699 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
702 struct mem_cgroup *mi; 700 struct mem_cgroup *mi;
703 701
702 /*
703 * Batch local counters to keep them in sync with
704 * the hierarchical ones.
705 */
706 __this_cpu_add(memcg->vmstats_local->stat[idx], x);
704 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 707 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
705 atomic_long_add(x, &mi->vmstats[idx]); 708 atomic_long_add(x, &mi->vmstats[idx]);
706 x = 0; 709 x = 0;
@@ -749,13 +752,15 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
749 /* Update memcg */ 752 /* Update memcg */
750 __mod_memcg_state(memcg, idx, val); 753 __mod_memcg_state(memcg, idx, val);
751 754
752 /* Update lruvec */
753 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
754
755 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 755 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
756 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 756 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
757 struct mem_cgroup_per_node *pi; 757 struct mem_cgroup_per_node *pi;
758 758
759 /*
760 * Batch local counters to keep them in sync with
761 * the hierarchical ones.
762 */
763 __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
759 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 764 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
760 atomic_long_add(x, &pi->lruvec_stat[idx]); 765 atomic_long_add(x, &pi->lruvec_stat[idx]);
761 x = 0; 766 x = 0;
@@ -777,12 +782,15 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
777 if (mem_cgroup_disabled()) 782 if (mem_cgroup_disabled())
778 return; 783 return;
779 784
780 __this_cpu_add(memcg->vmstats_local->events[idx], count);
781
782 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 785 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
783 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 786 if (unlikely(x > MEMCG_CHARGE_BATCH)) {
784 struct mem_cgroup *mi; 787 struct mem_cgroup *mi;
785 788
789 /*
790 * Batch local counters to keep them in sync with
791 * the hierarchical ones.
792 */
793 __this_cpu_add(memcg->vmstats_local->events[idx], x);
786 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 794 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
787 atomic_long_add(x, &mi->vmevents[idx]); 795 atomic_long_add(x, &mi->vmevents[idx]);
788 x = 0; 796 x = 0;