aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/mlock.c6
-rw-r--r--mm/mmap.c3
-rw-r--r--mm/mremap.c1
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/page-writeback.c1
-rw-r--r--mm/page_alloc.c20
10 files changed, 33 insertions, 12 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6001ee6347a9..b5783d81eda9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1257,6 +1257,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1257 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1257 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1258 goto out; 1258 goto out;
1259 1259
1260 /* Avoid dumping huge zero page */
1261 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1262 return ERR_PTR(-EFAULT);
1263
1260 page = pmd_page(*pmd); 1264 page = pmd_page(*pmd);
1261 VM_BUG_ON(!PageHead(page)); 1265 VM_BUG_ON(!PageHead(page));
1262 if (flags & FOLL_TOUCH) { 1266 if (flags & FOLL_TOUCH) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4f3ea0b1e57c..546db81820e4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3033,6 +3033,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3033 if (!huge_pte_none(huge_ptep_get(ptep))) { 3033 if (!huge_pte_none(huge_ptep_get(ptep))) {
3034 pte = huge_ptep_get_and_clear(mm, address, ptep); 3034 pte = huge_ptep_get_and_clear(mm, address, ptep);
3035 pte = pte_mkhuge(pte_modify(pte, newprot)); 3035 pte = pte_mkhuge(pte_modify(pte, newprot));
3036 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3036 set_huge_pte_at(mm, address, ptep, pte); 3037 set_huge_pte_at(mm, address, ptep, pte);
3037 pages++; 3038 pages++;
3038 } 3039 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 09255ec8159c..fbb60b103e64 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3030 if (memcg) { 3030 if (memcg) {
3031 s->memcg_params->memcg = memcg; 3031 s->memcg_params->memcg = memcg;
3032 s->memcg_params->root_cache = root_cache; 3032 s->memcg_params->root_cache = root_cache;
3033 } 3033 } else
3034 s->memcg_params->is_root_cache = true;
3035
3034 return 0; 3036 return 0;
3035} 3037}
3036 3038
diff --git a/mm/migrate.c b/mm/migrate.c
index c38778610aa8..2fd8b4af4744 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -160,8 +160,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
160 if (is_write_migration_entry(entry)) 160 if (is_write_migration_entry(entry))
161 pte = pte_mkwrite(pte); 161 pte = pte_mkwrite(pte);
162#ifdef CONFIG_HUGETLB_PAGE 162#ifdef CONFIG_HUGETLB_PAGE
163 if (PageHuge(new)) 163 if (PageHuge(new)) {
164 pte = pte_mkhuge(pte); 164 pte = pte_mkhuge(pte);
165 pte = arch_make_huge_pte(pte, vma, new, 0);
166 }
165#endif 167#endif
166 flush_cache_page(vma, addr, pte_pfn(pte)); 168 flush_cache_page(vma, addr, pte_pfn(pte));
167 set_pte_at(mm, addr, ptep, pte); 169 set_pte_at(mm, addr, ptep, pte);
diff --git a/mm/mlock.c b/mm/mlock.c
index f0b9ce572fc7..c9bd528b01d2 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
517static int do_mlockall(int flags) 517static int do_mlockall(int flags)
518{ 518{
519 struct vm_area_struct * vma, * prev = NULL; 519 struct vm_area_struct * vma, * prev = NULL;
520 unsigned int def_flags = 0;
521 520
522 if (flags & MCL_FUTURE) 521 if (flags & MCL_FUTURE)
523 def_flags = VM_LOCKED; 522 current->mm->def_flags |= VM_LOCKED;
524 current->mm->def_flags = def_flags; 523 else
524 current->mm->def_flags &= ~VM_LOCKED;
525 if (flags == MCL_FUTURE) 525 if (flags == MCL_FUTURE)
526 goto out; 526 goto out;
527 527
diff --git a/mm/mmap.c b/mm/mmap.c
index 35730ee9d515..09da0b264982 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -32,6 +32,7 @@
32#include <linux/khugepaged.h> 32#include <linux/khugepaged.h>
33#include <linux/uprobes.h> 33#include <linux/uprobes.h>
34#include <linux/rbtree_augmented.h> 34#include <linux/rbtree_augmented.h>
35#include <linux/sched/sysctl.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
@@ -2943,7 +2944,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2943 * vma in this mm is backed by the same anon_vma or address_space. 2944 * vma in this mm is backed by the same anon_vma or address_space.
2944 * 2945 *
2945 * We can take all the locks in random order because the VM code 2946 * We can take all the locks in random order because the VM code
2946 * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never 2947 * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
2947 * takes more than one of them in a row. Secondly we're protected 2948 * takes more than one of them in a row. Secondly we're protected
2948 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 2949 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2949 * 2950 *
diff --git a/mm/mremap.c b/mm/mremap.c
index e1031e1f6a61..f9766f460299 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -19,6 +19,7 @@
19#include <linux/security.h> 19#include <linux/security.h>
20#include <linux/syscalls.h> 20#include <linux/syscalls.h>
21#include <linux/mmu_notifier.h> 21#include <linux/mmu_notifier.h>
22#include <linux/sched/sysctl.h>
22 23
23#include <asm/uaccess.h> 24#include <asm/uaccess.h>
24#include <asm/cacheflush.h> 25#include <asm/cacheflush.h>
diff --git a/mm/nommu.c b/mm/nommu.c
index 79c3cac87afa..b20db4e22263 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -29,6 +29,7 @@
29#include <linux/security.h> 29#include <linux/security.h>
30#include <linux/syscalls.h> 30#include <linux/syscalls.h>
31#include <linux/audit.h> 31#include <linux/audit.h>
32#include <linux/sched/sysctl.h>
32 33
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
34#include <asm/tlb.h> 35#include <asm/tlb.h>
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0713bfbf0954..66a0024becd9 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -35,6 +35,7 @@
35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */ 35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
36#include <linux/pagevec.h> 36#include <linux/pagevec.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/sched/rt.h>
38#include <trace/events/writeback.h> 39#include <trace/events/writeback.h>
39 40
40/* 41/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index df2022ff0c8a..d1107adf174a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -58,6 +58,7 @@
58#include <linux/prefetch.h> 58#include <linux/prefetch.h>
59#include <linux/migrate.h> 59#include <linux/migrate.h>
60#include <linux/page-debug-flags.h> 60#include <linux/page-debug-flags.h>
61#include <linux/sched/rt.h>
61 62
62#include <asm/tlbflush.h> 63#include <asm/tlbflush.h>
63#include <asm/div64.h> 64#include <asm/div64.h>
@@ -773,6 +774,10 @@ void __init init_cma_reserved_pageblock(struct page *page)
773 set_pageblock_migratetype(page, MIGRATE_CMA); 774 set_pageblock_migratetype(page, MIGRATE_CMA);
774 __free_pages(page, pageblock_order); 775 __free_pages(page, pageblock_order);
775 totalram_pages += pageblock_nr_pages; 776 totalram_pages += pageblock_nr_pages;
777#ifdef CONFIG_HIGHMEM
778 if (PageHighMem(page))
779 totalhigh_pages += pageblock_nr_pages;
780#endif
776} 781}
777#endif 782#endif
778 783
@@ -4416,10 +4421,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4416 * round what is now in bits to nearest long in bits, then return it in 4421 * round what is now in bits to nearest long in bits, then return it in
4417 * bytes. 4422 * bytes.
4418 */ 4423 */
4419static unsigned long __init usemap_size(unsigned long zonesize) 4424static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
4420{ 4425{
4421 unsigned long usemapsize; 4426 unsigned long usemapsize;
4422 4427
4428 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
4423 usemapsize = roundup(zonesize, pageblock_nr_pages); 4429 usemapsize = roundup(zonesize, pageblock_nr_pages);
4424 usemapsize = usemapsize >> pageblock_order; 4430 usemapsize = usemapsize >> pageblock_order;
4425 usemapsize *= NR_PAGEBLOCK_BITS; 4431 usemapsize *= NR_PAGEBLOCK_BITS;
@@ -4429,17 +4435,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
4429} 4435}
4430 4436
4431static void __init setup_usemap(struct pglist_data *pgdat, 4437static void __init setup_usemap(struct pglist_data *pgdat,
4432 struct zone *zone, unsigned long zonesize) 4438 struct zone *zone,
4439 unsigned long zone_start_pfn,
4440 unsigned long zonesize)
4433{ 4441{
4434 unsigned long usemapsize = usemap_size(zonesize); 4442 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
4435 zone->pageblock_flags = NULL; 4443 zone->pageblock_flags = NULL;
4436 if (usemapsize) 4444 if (usemapsize)
4437 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, 4445 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4438 usemapsize); 4446 usemapsize);
4439} 4447}
4440#else 4448#else
4441static inline void setup_usemap(struct pglist_data *pgdat, 4449static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4442 struct zone *zone, unsigned long zonesize) {} 4450 unsigned long zone_start_pfn, unsigned long zonesize) {}
4443#endif /* CONFIG_SPARSEMEM */ 4451#endif /* CONFIG_SPARSEMEM */
4444 4452
4445#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4453#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -4590,7 +4598,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4590 continue; 4598 continue;
4591 4599
4592 set_pageblock_order(); 4600 set_pageblock_order();
4593 setup_usemap(pgdat, zone, size); 4601 setup_usemap(pgdat, zone, zone_start_pfn, size);
4594 ret = init_currently_empty_zone(zone, zone_start_pfn, 4602 ret = init_currently_empty_zone(zone, zone_start_pfn,
4595 size, MEMMAP_EARLY); 4603 size, MEMMAP_EARLY);
4596 BUG_ON(ret); 4604 BUG_ON(ret);