aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/migrate.c4
-rw-r--r--mm/mlock.c6
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c19
7 files changed, 28 insertions, 12 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6001ee6347a9..b5783d81eda9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1257,6 +1257,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1257 if (flags & FOLL_WRITE && !pmd_write(*pmd)) 1257 if (flags & FOLL_WRITE && !pmd_write(*pmd))
1258 goto out; 1258 goto out;
1259 1259
1260 /* Avoid dumping huge zero page */
1261 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1262 return ERR_PTR(-EFAULT);
1263
1260 page = pmd_page(*pmd); 1264 page = pmd_page(*pmd);
1261 VM_BUG_ON(!PageHead(page)); 1265 VM_BUG_ON(!PageHead(page));
1262 if (flags & FOLL_TOUCH) { 1266 if (flags & FOLL_TOUCH) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 4f3ea0b1e57c..546db81820e4 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3033,6 +3033,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3033 if (!huge_pte_none(huge_ptep_get(ptep))) { 3033 if (!huge_pte_none(huge_ptep_get(ptep))) {
3034 pte = huge_ptep_get_and_clear(mm, address, ptep); 3034 pte = huge_ptep_get_and_clear(mm, address, ptep);
3035 pte = pte_mkhuge(pte_modify(pte, newprot)); 3035 pte = pte_mkhuge(pte_modify(pte, newprot));
3036 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3036 set_huge_pte_at(mm, address, ptep, pte); 3037 set_huge_pte_at(mm, address, ptep, pte);
3037 pages++; 3038 pages++;
3038 } 3039 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 09255ec8159c..fbb60b103e64 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3030,7 +3030,9 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
3030 if (memcg) { 3030 if (memcg) {
3031 s->memcg_params->memcg = memcg; 3031 s->memcg_params->memcg = memcg;
3032 s->memcg_params->root_cache = root_cache; 3032 s->memcg_params->root_cache = root_cache;
3033 } 3033 } else
3034 s->memcg_params->is_root_cache = true;
3035
3034 return 0; 3036 return 0;
3035} 3037}
3036 3038
diff --git a/mm/migrate.c b/mm/migrate.c
index c38778610aa8..2fd8b4af4744 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -160,8 +160,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
160 if (is_write_migration_entry(entry)) 160 if (is_write_migration_entry(entry))
161 pte = pte_mkwrite(pte); 161 pte = pte_mkwrite(pte);
162#ifdef CONFIG_HUGETLB_PAGE 162#ifdef CONFIG_HUGETLB_PAGE
163 if (PageHuge(new)) 163 if (PageHuge(new)) {
164 pte = pte_mkhuge(pte); 164 pte = pte_mkhuge(pte);
165 pte = arch_make_huge_pte(pte, vma, new, 0);
166 }
165#endif 167#endif
166 flush_cache_page(vma, addr, pte_pfn(pte)); 168 flush_cache_page(vma, addr, pte_pfn(pte));
167 set_pte_at(mm, addr, ptep, pte); 169 set_pte_at(mm, addr, ptep, pte);
diff --git a/mm/mlock.c b/mm/mlock.c
index f0b9ce572fc7..c9bd528b01d2 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -517,11 +517,11 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
517static int do_mlockall(int flags) 517static int do_mlockall(int flags)
518{ 518{
519 struct vm_area_struct * vma, * prev = NULL; 519 struct vm_area_struct * vma, * prev = NULL;
520 unsigned int def_flags = 0;
521 520
522 if (flags & MCL_FUTURE) 521 if (flags & MCL_FUTURE)
523 def_flags = VM_LOCKED; 522 current->mm->def_flags |= VM_LOCKED;
524 current->mm->def_flags = def_flags; 523 else
524 current->mm->def_flags &= ~VM_LOCKED;
525 if (flags == MCL_FUTURE) 525 if (flags == MCL_FUTURE)
526 goto out; 526 goto out;
527 527
diff --git a/mm/mmap.c b/mm/mmap.c
index 35730ee9d515..d1e4124f3d0e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2943,7 +2943,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2943 * vma in this mm is backed by the same anon_vma or address_space. 2943 * vma in this mm is backed by the same anon_vma or address_space.
2944 * 2944 *
2945 * We can take all the locks in random order because the VM code 2945 * We can take all the locks in random order because the VM code
2946 * taking i_mmap_mutex or anon_vma->mutex outside the mmap_sem never 2946 * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
2947 * takes more than one of them in a row. Secondly we're protected 2947 * takes more than one of them in a row. Secondly we're protected
2948 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 2948 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2949 * 2949 *
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index df2022ff0c8a..6a83cd35cfde 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -773,6 +773,10 @@ void __init init_cma_reserved_pageblock(struct page *page)
773 set_pageblock_migratetype(page, MIGRATE_CMA); 773 set_pageblock_migratetype(page, MIGRATE_CMA);
774 __free_pages(page, pageblock_order); 774 __free_pages(page, pageblock_order);
775 totalram_pages += pageblock_nr_pages; 775 totalram_pages += pageblock_nr_pages;
776#ifdef CONFIG_HIGHMEM
777 if (PageHighMem(page))
778 totalhigh_pages += pageblock_nr_pages;
779#endif
776} 780}
777#endif 781#endif
778 782
@@ -4416,10 +4420,11 @@ static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
4416 * round what is now in bits to nearest long in bits, then return it in 4420 * round what is now in bits to nearest long in bits, then return it in
4417 * bytes. 4421 * bytes.
4418 */ 4422 */
4419static unsigned long __init usemap_size(unsigned long zonesize) 4423static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
4420{ 4424{
4421 unsigned long usemapsize; 4425 unsigned long usemapsize;
4422 4426
4427 zonesize += zone_start_pfn & (pageblock_nr_pages-1);
4423 usemapsize = roundup(zonesize, pageblock_nr_pages); 4428 usemapsize = roundup(zonesize, pageblock_nr_pages);
4424 usemapsize = usemapsize >> pageblock_order; 4429 usemapsize = usemapsize >> pageblock_order;
4425 usemapsize *= NR_PAGEBLOCK_BITS; 4430 usemapsize *= NR_PAGEBLOCK_BITS;
@@ -4429,17 +4434,19 @@ static unsigned long __init usemap_size(unsigned long zonesize)
4429} 4434}
4430 4435
4431static void __init setup_usemap(struct pglist_data *pgdat, 4436static void __init setup_usemap(struct pglist_data *pgdat,
4432 struct zone *zone, unsigned long zonesize) 4437 struct zone *zone,
4438 unsigned long zone_start_pfn,
4439 unsigned long zonesize)
4433{ 4440{
4434 unsigned long usemapsize = usemap_size(zonesize); 4441 unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
4435 zone->pageblock_flags = NULL; 4442 zone->pageblock_flags = NULL;
4436 if (usemapsize) 4443 if (usemapsize)
4437 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat, 4444 zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
4438 usemapsize); 4445 usemapsize);
4439} 4446}
4440#else 4447#else
4441static inline void setup_usemap(struct pglist_data *pgdat, 4448static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
4442 struct zone *zone, unsigned long zonesize) {} 4449 unsigned long zone_start_pfn, unsigned long zonesize) {}
4443#endif /* CONFIG_SPARSEMEM */ 4450#endif /* CONFIG_SPARSEMEM */
4444 4451
4445#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4452#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -4590,7 +4597,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4590 continue; 4597 continue;
4591 4598
4592 set_pageblock_order(); 4599 set_pageblock_order();
4593 setup_usemap(pgdat, zone, size); 4600 setup_usemap(pgdat, zone, zone_start_pfn, size);
4594 ret = init_currently_empty_zone(zone, zone_start_pfn, 4601 ret = init_currently_empty_zone(zone, zone_start_pfn,
4595 size, MEMMAP_EARLY); 4602 size, MEMMAP_EARLY);
4596 BUG_ON(ret); 4603 BUG_ON(ret);