aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-07-17 11:58:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-17 11:58:04 -0400
commit57a8ec387e1441ea5e1232bc0749fb99a8cba7e7 (patch)
treeb5fb03fc6bc5754de8b5b1f8b0e4f36d67c8315c /mm/memcontrol.c
parent0a8ad0ffa4d80a544f6cbff703bf6394339afcdf (diff)
parent43e11fa2d1d3b6e35629fa556eb7d571edba2010 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "VM: - z3fold fixes and enhancements by Henry Burns and Vitaly Wool - more accurate reclaimed slab caches calculations by Yafang Shao - fix MAP_UNINITIALIZED UAPI symbol to not depend on config, by Christoph Hellwig - !CONFIG_MMU fixes by Christoph Hellwig - new novmcoredd parameter to omit device dumps from vmcore, by Kairui Song - new test_meminit module for testing heap and pagealloc initialization, by Alexander Potapenko - ioremap improvements for huge mappings, by Anshuman Khandual - generalize kprobe page fault handling, by Anshuman Khandual - device-dax hotplug fixes and improvements, by Pavel Tatashin - enable synchronous DAX fault on powerpc, by Aneesh Kumar K.V - add pte_devmap() support for arm64, by Robin Murphy - unify locked_vm accounting with a helper, by Daniel Jordan - several misc fixes core/lib: - new typeof_member() macro including some users, by Alexey Dobriyan - make BIT() and GENMASK() available in asm, by Masahiro Yamada - changed LIST_POISON2 on x86_64 to 0xdead000000000122 for better code generation, by Alexey Dobriyan - rbtree code size optimizations, by Michel Lespinasse - convert struct pid count to refcount_t, by Joel Fernandes get_maintainer.pl: - add --no-moderated switch to skip moderated ML's, by Joe Perches misc: - ptrace PTRACE_GET_SYSCALL_INFO interface - coda updates - gdb scripts, various" [ Using merge message suggestion from Vlastimil Babka, with some editing - Linus ] * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (100 commits) fs/select.c: use struct_size() in kmalloc() mm: add account_locked_vm utility function arm64: mm: implement pte_devmap support mm: introduce ARCH_HAS_PTE_DEVMAP mm: clean up is_device_*_page() definitions mm/mmap: move common defines to mman-common.h mm: move MAP_SYNC to asm-generic/mman-common.h device-dax: "Hotremove" persistent memory that is used like normal RAM mm/hotplug: make remove_memory() interface usable device-dax: fix memory and resource leak if hotplug fails include/linux/lz4.h: fix spelling and copy-paste errors in documentation ipc/mqueue.c: only perform resource calculation if user valid include/asm-generic/bug.h: fix "cut here" for WARN_ON for __WARN_TAINT architectures scripts/gdb: add helpers to find and list devices scripts/gdb: add lx-genpd-summary command drivers/pps/pps.c: clear offset flags in PPS_SETPARAMS ioctl kernel/pid.c: convert struct pid count to refcount_t drivers/rapidio/devices/rio_mport_cdev.c: NUL terminate some strings select: shift restore_saved_sigmask_unless() into poll_select_copy_remaining() select: change do_poll() to return -ERESTARTNOHAND rather than -EINTR ...
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 249671873aa9..cdbb7a84cb6e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -695,12 +695,15 @@ void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
695 if (mem_cgroup_disabled()) 695 if (mem_cgroup_disabled())
696 return; 696 return;
697 697
698 __this_cpu_add(memcg->vmstats_local->stat[idx], val);
699
700 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]); 698 x = val + __this_cpu_read(memcg->vmstats_percpu->stat[idx]);
701 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 699 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
702 struct mem_cgroup *mi; 700 struct mem_cgroup *mi;
703 701
702 /*
703 * Batch local counters to keep them in sync with
704 * the hierarchical ones.
705 */
706 __this_cpu_add(memcg->vmstats_local->stat[idx], x);
704 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 707 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
705 atomic_long_add(x, &mi->vmstats[idx]); 708 atomic_long_add(x, &mi->vmstats[idx]);
706 x = 0; 709 x = 0;
@@ -749,13 +752,15 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
749 /* Update memcg */ 752 /* Update memcg */
750 __mod_memcg_state(memcg, idx, val); 753 __mod_memcg_state(memcg, idx, val);
751 754
752 /* Update lruvec */
753 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
754
755 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); 755 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
756 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { 756 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
757 struct mem_cgroup_per_node *pi; 757 struct mem_cgroup_per_node *pi;
758 758
759 /*
760 * Batch local counters to keep them in sync with
761 * the hierarchical ones.
762 */
763 __this_cpu_add(pn->lruvec_stat_local->count[idx], x);
759 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id)) 764 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
760 atomic_long_add(x, &pi->lruvec_stat[idx]); 765 atomic_long_add(x, &pi->lruvec_stat[idx]);
761 x = 0; 766 x = 0;
@@ -777,12 +782,15 @@ void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
777 if (mem_cgroup_disabled()) 782 if (mem_cgroup_disabled())
778 return; 783 return;
779 784
780 __this_cpu_add(memcg->vmstats_local->events[idx], count);
781
782 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]); 785 x = count + __this_cpu_read(memcg->vmstats_percpu->events[idx]);
783 if (unlikely(x > MEMCG_CHARGE_BATCH)) { 786 if (unlikely(x > MEMCG_CHARGE_BATCH)) {
784 struct mem_cgroup *mi; 787 struct mem_cgroup *mi;
785 788
789 /*
790 * Batch local counters to keep them in sync with
791 * the hierarchical ones.
792 */
793 __this_cpu_add(memcg->vmstats_local->events[idx], x);
786 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) 794 for (mi = memcg; mi; mi = parent_mem_cgroup(mi))
787 atomic_long_add(x, &mi->vmevents[idx]); 795 atomic_long_add(x, &mi->vmevents[idx]);
788 x = 0; 796 x = 0;