diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-11 19:58:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-11 19:58:24 -0400 |
commit | 4b9eaf33d83d91430b7ca45d0ebf8241da489c92 (patch) | |
tree | 85ca3cf23a902ac53fecf8f9b6129c745af44d29 | |
parent | d3396e1e4ec4aa41ef563841e8ba511ae0589003 (diff) | |
parent | 5830169f47269f78f6624bd70165eb571270da82 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton:
"7 fixes"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
mm/memory_hotplug.c: initialize per_cpu_nodestats for hotadded pgdats
mm, oom: fix uninitialized ret in task_will_free_mem()
kasan: remove the unnecessary WARN_ONCE from quarantine.c
mm: memcontrol: fix memcg id ref counter on swap charge move
mm: memcontrol: fix swap counter leak on swapout from offline cgroup
proc, meminfo: use correct helpers for calculating LRU sizes in meminfo
mm/hugetlb: fix incorrect hugepages count during mem hotplug
-rw-r--r-- | fs/proc/meminfo.c | 2 | ||||
-rw-r--r-- | mm/hugetlb.c | 1 | ||||
-rw-r--r-- | mm/kasan/quarantine.c | 7 | ||||
-rw-r--r-- | mm/memcontrol.c | 68 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 |
7 files changed, 64 insertions, 20 deletions
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 09e18fdf61e5..b9a8c813e5e6 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c | |||
@@ -46,7 +46,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) | |||
46 | cached = 0; | 46 | cached = 0; |
47 | 47 | ||
48 | for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) | 48 | for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) |
49 | pages[lru] = global_page_state(NR_LRU_BASE + lru); | 49 | pages[lru] = global_node_page_state(NR_LRU_BASE + lru); |
50 | 50 | ||
51 | available = si_mem_available(); | 51 | available = si_mem_available(); |
52 | 52 | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index b9aa1b0b38b0..87e11d8ad536 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page) | |||
1448 | list_del(&page->lru); | 1448 | list_del(&page->lru); |
1449 | h->free_huge_pages--; | 1449 | h->free_huge_pages--; |
1450 | h->free_huge_pages_node[nid]--; | 1450 | h->free_huge_pages_node[nid]--; |
1451 | h->max_huge_pages--; | ||
1451 | update_and_free_page(h, page); | 1452 | update_and_free_page(h, page); |
1452 | } | 1453 | } |
1453 | spin_unlock(&hugetlb_lock); | 1454 | spin_unlock(&hugetlb_lock); |
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c index b6728a33a4ac..baabaad4a4aa 100644 --- a/mm/kasan/quarantine.c +++ b/mm/kasan/quarantine.c | |||
@@ -217,11 +217,8 @@ void quarantine_reduce(void) | |||
217 | new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / | 217 | new_quarantine_size = (READ_ONCE(totalram_pages) << PAGE_SHIFT) / |
218 | QUARANTINE_FRACTION; | 218 | QUARANTINE_FRACTION; |
219 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); | 219 | percpu_quarantines = QUARANTINE_PERCPU_SIZE * num_online_cpus(); |
220 | if (WARN_ONCE(new_quarantine_size < percpu_quarantines, | 220 | new_quarantine_size = (new_quarantine_size < percpu_quarantines) ? |
221 | "Too little memory, disabling global KASAN quarantine.\n")) | 221 | 0 : new_quarantine_size - percpu_quarantines; |
222 | new_quarantine_size = 0; | ||
223 | else | ||
224 | new_quarantine_size -= percpu_quarantines; | ||
225 | WRITE_ONCE(quarantine_size, new_quarantine_size); | 222 | WRITE_ONCE(quarantine_size, new_quarantine_size); |
226 | 223 | ||
227 | last = global_quarantine.head; | 224 | last = global_quarantine.head; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e74d7080ec9e..2ff0289ad061 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -4077,14 +4077,32 @@ static struct cftype mem_cgroup_legacy_files[] = { | |||
4077 | 4077 | ||
4078 | static DEFINE_IDR(mem_cgroup_idr); | 4078 | static DEFINE_IDR(mem_cgroup_idr); |
4079 | 4079 | ||
4080 | static void mem_cgroup_id_get(struct mem_cgroup *memcg) | 4080 | static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n) |
4081 | { | 4081 | { |
4082 | atomic_inc(&memcg->id.ref); | 4082 | atomic_add(n, &memcg->id.ref); |
4083 | } | 4083 | } |
4084 | 4084 | ||
4085 | static void mem_cgroup_id_put(struct mem_cgroup *memcg) | 4085 | static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg) |
4086 | { | 4086 | { |
4087 | if (atomic_dec_and_test(&memcg->id.ref)) { | 4087 | while (!atomic_inc_not_zero(&memcg->id.ref)) { |
4088 | /* | ||
4089 | * The root cgroup cannot be destroyed, so it's refcount must | ||
4090 | * always be >= 1. | ||
4091 | */ | ||
4092 | if (WARN_ON_ONCE(memcg == root_mem_cgroup)) { | ||
4093 | VM_BUG_ON(1); | ||
4094 | break; | ||
4095 | } | ||
4096 | memcg = parent_mem_cgroup(memcg); | ||
4097 | if (!memcg) | ||
4098 | memcg = root_mem_cgroup; | ||
4099 | } | ||
4100 | return memcg; | ||
4101 | } | ||
4102 | |||
4103 | static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n) | ||
4104 | { | ||
4105 | if (atomic_sub_and_test(n, &memcg->id.ref)) { | ||
4088 | idr_remove(&mem_cgroup_idr, memcg->id.id); | 4106 | idr_remove(&mem_cgroup_idr, memcg->id.id); |
4089 | memcg->id.id = 0; | 4107 | memcg->id.id = 0; |
4090 | 4108 | ||
@@ -4093,6 +4111,16 @@ static void mem_cgroup_id_put(struct mem_cgroup *memcg) | |||
4093 | } | 4111 | } |
4094 | } | 4112 | } |
4095 | 4113 | ||
4114 | static inline void mem_cgroup_id_get(struct mem_cgroup *memcg) | ||
4115 | { | ||
4116 | mem_cgroup_id_get_many(memcg, 1); | ||
4117 | } | ||
4118 | |||
4119 | static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) | ||
4120 | { | ||
4121 | mem_cgroup_id_put_many(memcg, 1); | ||
4122 | } | ||
4123 | |||
4096 | /** | 4124 | /** |
4097 | * mem_cgroup_from_id - look up a memcg from a memcg id | 4125 | * mem_cgroup_from_id - look up a memcg from a memcg id |
4098 | * @id: the memcg id to look up | 4126 | * @id: the memcg id to look up |
@@ -4727,6 +4755,8 @@ static void __mem_cgroup_clear_mc(void) | |||
4727 | if (!mem_cgroup_is_root(mc.from)) | 4755 | if (!mem_cgroup_is_root(mc.from)) |
4728 | page_counter_uncharge(&mc.from->memsw, mc.moved_swap); | 4756 | page_counter_uncharge(&mc.from->memsw, mc.moved_swap); |
4729 | 4757 | ||
4758 | mem_cgroup_id_put_many(mc.from, mc.moved_swap); | ||
4759 | |||
4730 | /* | 4760 | /* |
4731 | * we charged both to->memory and to->memsw, so we | 4761 | * we charged both to->memory and to->memsw, so we |
4732 | * should uncharge to->memory. | 4762 | * should uncharge to->memory. |
@@ -4734,9 +4764,9 @@ static void __mem_cgroup_clear_mc(void) | |||
4734 | if (!mem_cgroup_is_root(mc.to)) | 4764 | if (!mem_cgroup_is_root(mc.to)) |
4735 | page_counter_uncharge(&mc.to->memory, mc.moved_swap); | 4765 | page_counter_uncharge(&mc.to->memory, mc.moved_swap); |
4736 | 4766 | ||
4737 | css_put_many(&mc.from->css, mc.moved_swap); | 4767 | mem_cgroup_id_get_many(mc.to, mc.moved_swap); |
4768 | css_put_many(&mc.to->css, mc.moved_swap); | ||
4738 | 4769 | ||
4739 | /* we've already done css_get(mc.to) */ | ||
4740 | mc.moved_swap = 0; | 4770 | mc.moved_swap = 0; |
4741 | } | 4771 | } |
4742 | memcg_oom_recover(from); | 4772 | memcg_oom_recover(from); |
@@ -5800,7 +5830,7 @@ subsys_initcall(mem_cgroup_init); | |||
5800 | */ | 5830 | */ |
5801 | void mem_cgroup_swapout(struct page *page, swp_entry_t entry) | 5831 | void mem_cgroup_swapout(struct page *page, swp_entry_t entry) |
5802 | { | 5832 | { |
5803 | struct mem_cgroup *memcg; | 5833 | struct mem_cgroup *memcg, *swap_memcg; |
5804 | unsigned short oldid; | 5834 | unsigned short oldid; |
5805 | 5835 | ||
5806 | VM_BUG_ON_PAGE(PageLRU(page), page); | 5836 | VM_BUG_ON_PAGE(PageLRU(page), page); |
@@ -5815,16 +5845,27 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) | |||
5815 | if (!memcg) | 5845 | if (!memcg) |
5816 | return; | 5846 | return; |
5817 | 5847 | ||
5818 | mem_cgroup_id_get(memcg); | 5848 | /* |
5819 | oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); | 5849 | * In case the memcg owning these pages has been offlined and doesn't |
5850 | * have an ID allocated to it anymore, charge the closest online | ||
5851 | * ancestor for the swap instead and transfer the memory+swap charge. | ||
5852 | */ | ||
5853 | swap_memcg = mem_cgroup_id_get_online(memcg); | ||
5854 | oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg)); | ||
5820 | VM_BUG_ON_PAGE(oldid, page); | 5855 | VM_BUG_ON_PAGE(oldid, page); |
5821 | mem_cgroup_swap_statistics(memcg, true); | 5856 | mem_cgroup_swap_statistics(swap_memcg, true); |
5822 | 5857 | ||
5823 | page->mem_cgroup = NULL; | 5858 | page->mem_cgroup = NULL; |
5824 | 5859 | ||
5825 | if (!mem_cgroup_is_root(memcg)) | 5860 | if (!mem_cgroup_is_root(memcg)) |
5826 | page_counter_uncharge(&memcg->memory, 1); | 5861 | page_counter_uncharge(&memcg->memory, 1); |
5827 | 5862 | ||
5863 | if (memcg != swap_memcg) { | ||
5864 | if (!mem_cgroup_is_root(swap_memcg)) | ||
5865 | page_counter_charge(&swap_memcg->memsw, 1); | ||
5866 | page_counter_uncharge(&memcg->memsw, 1); | ||
5867 | } | ||
5868 | |||
5828 | /* | 5869 | /* |
5829 | * Interrupts should be disabled here because the caller holds the | 5870 | * Interrupts should be disabled here because the caller holds the |
5830 | * mapping->tree_lock lock which is taken with interrupts-off. It is | 5871 | * mapping->tree_lock lock which is taken with interrupts-off. It is |
@@ -5863,11 +5904,14 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) | |||
5863 | if (!memcg) | 5904 | if (!memcg) |
5864 | return 0; | 5905 | return 0; |
5865 | 5906 | ||
5907 | memcg = mem_cgroup_id_get_online(memcg); | ||
5908 | |||
5866 | if (!mem_cgroup_is_root(memcg) && | 5909 | if (!mem_cgroup_is_root(memcg) && |
5867 | !page_counter_try_charge(&memcg->swap, 1, &counter)) | 5910 | !page_counter_try_charge(&memcg->swap, 1, &counter)) { |
5911 | mem_cgroup_id_put(memcg); | ||
5868 | return -ENOMEM; | 5912 | return -ENOMEM; |
5913 | } | ||
5869 | 5914 | ||
5870 | mem_cgroup_id_get(memcg); | ||
5871 | oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); | 5915 | oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); |
5872 | VM_BUG_ON_PAGE(oldid, page); | 5916 | VM_BUG_ON_PAGE(oldid, page); |
5873 | mem_cgroup_swap_statistics(memcg, true); | 5917 | mem_cgroup_swap_statistics(memcg, true); |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3894b65b1555..41266dc29f33 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1219,6 +1219,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |||
1219 | 1219 | ||
1220 | /* init node's zones as empty zones, we don't have any present pages.*/ | 1220 | /* init node's zones as empty zones, we don't have any present pages.*/ |
1221 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); | 1221 | free_area_init_node(nid, zones_size, start_pfn, zholes_size); |
1222 | pgdat->per_cpu_nodestats = alloc_percpu(struct per_cpu_nodestat); | ||
1222 | 1223 | ||
1223 | /* | 1224 | /* |
1224 | * The node we allocated has no zone fallback lists. For avoiding | 1225 | * The node we allocated has no zone fallback lists. For avoiding |
@@ -1249,6 +1250,7 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) | |||
1249 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) | 1250 | static void rollback_node_hotadd(int nid, pg_data_t *pgdat) |
1250 | { | 1251 | { |
1251 | arch_refresh_nodedata(nid, NULL); | 1252 | arch_refresh_nodedata(nid, NULL); |
1253 | free_percpu(pgdat->per_cpu_nodestats); | ||
1252 | arch_free_nodedata(pgdat); | 1254 | arch_free_nodedata(pgdat); |
1253 | return; | 1255 | return; |
1254 | } | 1256 | } |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 7d0a275df822..d53a9aa00977 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -764,7 +764,7 @@ bool task_will_free_mem(struct task_struct *task) | |||
764 | { | 764 | { |
765 | struct mm_struct *mm = task->mm; | 765 | struct mm_struct *mm = task->mm; |
766 | struct task_struct *p; | 766 | struct task_struct *p; |
767 | bool ret; | 767 | bool ret = true; |
768 | 768 | ||
769 | /* | 769 | /* |
770 | * Skip tasks without mm because it might have passed its exit_mm and | 770 | * Skip tasks without mm because it might have passed its exit_mm and |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ab2c0ff8c2e6..3fbe73a6fe4b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4060,7 +4060,7 @@ long si_mem_available(void) | |||
4060 | int lru; | 4060 | int lru; |
4061 | 4061 | ||
4062 | for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) | 4062 | for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) |
4063 | pages[lru] = global_page_state(NR_LRU_BASE + lru); | 4063 | pages[lru] = global_node_page_state(NR_LRU_BASE + lru); |
4064 | 4064 | ||
4065 | for_each_zone(zone) | 4065 | for_each_zone(zone) |
4066 | wmark_low += zone->watermark[WMARK_LOW]; | 4066 | wmark_low += zone->watermark[WMARK_LOW]; |