aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-01-11 20:16:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-11 20:16:01 -0500
commit0ffedcda63f56eaca99a77392b9f057dfb738817 (patch)
tree933eb3bc193d1b4adb83ad61a52ce0b4948c1fe0
parent6896d9f7e7ee98d772224a539b7581a1e6dd6b2c (diff)
parent1f1a89ac05f6e88aa341e86e57435fdbb1177c0c (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: "The main changes in this cycle were: - make the debugfs 'kernel_page_tables' file read-only, as it only has read ops. (Borislav Petkov) - micro-optimize clflush_cache_range() (Chris Wilson) - swiotlb enhancements, which fixes certain KVM emulated devices (Igor Mammedov) - fix an LDT related debug message (Jan Beulich) - modularize CONFIG_X86_PTDUMP (Kees Cook) - tone down an overly alarming warning (Laura Abbott) - Mark variable __initdata (Rasmus Villemoes) - PAT additions (Toshi Kani)" * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Micro-optimise clflush_cache_range() x86/mm/pat: Change free_memtype() to support shrinking case x86/mm/pat: Add untrack_pfn_moved for mremap x86/mm: Drop WARN from multi-BAR check x86/LDT: Print the real LDT base address x86/mm/64: Enable SWIOTLB if system has SRAT memory regions above MAX_DMA32_PFN x86/mm: Introduce max_possible_pfn x86/mm/ptdump: Make (debugfs)/kernel_page_tables read-only x86/mm/mtrr: Mark the 'range_new' static variable in mtrr_calc_range_state() as __initdata x86/mm: Turn CONFIG_X86_PTDUMP into a module
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c11
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/mm/Makefile1
-rw-r--r--arch/x86/mm/debug_pagetables.c46
-rw-r--r--arch/x86/mm/dump_pagetables.c34
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/pageattr.c10
-rw-r--r--arch/x86/mm/pat.c12
-rw-r--r--arch/x86/mm/pat_rbtree.c52
-rw-r--r--arch/x86/mm/srat.c2
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/linux/bootmem.h4
-rw-r--r--mm/bootmem.c1
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/nobootmem.c1
18 files changed, 146 insertions, 54 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 137dfa96aa14..110253ce83af 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -69,7 +69,7 @@ config X86_PTDUMP_CORE
69 def_bool n 69 def_bool n
70 70
71config X86_PTDUMP 71config X86_PTDUMP
72 bool "Export kernel pagetable layout to userspace via debugfs" 72 tristate "Export kernel pagetable layout to userspace via debugfs"
73 depends on DEBUG_KERNEL 73 depends on DEBUG_KERNEL
74 select DEBUG_FS 74 select DEBUG_FS
75 select X86_PTDUMP_CORE 75 select X86_PTDUMP_CORE
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 70d7c93f4550..0d98503c2245 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -593,9 +593,16 @@ mtrr_calc_range_state(u64 chunk_size, u64 gran_size,
593 unsigned long x_remove_base, 593 unsigned long x_remove_base,
594 unsigned long x_remove_size, int i) 594 unsigned long x_remove_size, int i)
595{ 595{
596 static struct range range_new[RANGE_NUM]; 596 /*
597 * range_new should really be an automatic variable, but
598 * putting 4096 bytes on the stack is frowned upon, to put it
599 * mildly. It is safe to make it a static __initdata variable,
600 * since mtrr_calc_range_state is only called during init and
601 * there's no way it will call itself recursively.
602 */
603 static struct range range_new[RANGE_NUM] __initdata;
597 unsigned long range_sums_new; 604 unsigned long range_sums_new;
598 static int nr_range_new; 605 int nr_range_new;
599 int num_reg; 606 int num_reg;
600 607
601 /* Convert ranges to var ranges state: */ 608 /* Convert ranges to var ranges state: */
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index adf0392d549a..7c577a178859 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -88,7 +88,7 @@ int __init pci_swiotlb_detect_4gb(void)
88{ 88{
89 /* don't initialize swiotlb if iommu=off (no_iommu=1) */ 89 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
90#ifdef CONFIG_X86_64 90#ifdef CONFIG_X86_64
91 if (!no_iommu && max_pfn > MAX_DMA32_PFN) 91 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
92 swiotlb = 1; 92 swiotlb = 1;
93#endif 93#endif
94 return swiotlb; 94 return swiotlb;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e835d263a33b..b9d99e0f82c4 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -125,7 +125,7 @@ void release_thread(struct task_struct *dead_task)
125 if (dead_task->mm->context.ldt) { 125 if (dead_task->mm->context.ldt) {
126 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", 126 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
127 dead_task->comm, 127 dead_task->comm,
128 dead_task->mm->context.ldt, 128 dead_task->mm->context.ldt->entries,
129 dead_task->mm->context.ldt->size); 129 dead_task->mm->context.ldt->size);
130 BUG(); 130 BUG();
131 } 131 }
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d2bbe343fda7..d3d80e6d42a2 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1048,6 +1048,8 @@ void __init setup_arch(char **cmdline_p)
1048 if (mtrr_trim_uncached_memory(max_pfn)) 1048 if (mtrr_trim_uncached_memory(max_pfn))
1049 max_pfn = e820_end_of_ram_pfn(); 1049 max_pfn = e820_end_of_ram_pfn();
1050 1050
1051 max_possible_pfn = max_pfn;
1052
1051#ifdef CONFIG_X86_32 1053#ifdef CONFIG_X86_32
1052 /* max_low_pfn get updated here */ 1054 /* max_low_pfn get updated here */
1053 find_low_pfn_range(); 1055 find_low_pfn_range();
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 65c47fda26fc..f9d38a48e3c8 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
15 15
16obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 16obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
17obj-$(CONFIG_X86_PTDUMP_CORE) += dump_pagetables.o 17obj-$(CONFIG_X86_PTDUMP_CORE) += dump_pagetables.o
18obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o
18 19
19obj-$(CONFIG_HIGHMEM) += highmem_32.o 20obj-$(CONFIG_HIGHMEM) += highmem_32.o
20 21
diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c
new file mode 100644
index 000000000000..bfcffdf6c577
--- /dev/null
+++ b/arch/x86/mm/debug_pagetables.c
@@ -0,0 +1,46 @@
1#include <linux/debugfs.h>
2#include <linux/module.h>
3#include <linux/seq_file.h>
4#include <asm/pgtable.h>
5
6static int ptdump_show(struct seq_file *m, void *v)
7{
8 ptdump_walk_pgd_level(m, NULL);
9 return 0;
10}
11
12static int ptdump_open(struct inode *inode, struct file *filp)
13{
14 return single_open(filp, ptdump_show, NULL);
15}
16
17static const struct file_operations ptdump_fops = {
18 .owner = THIS_MODULE,
19 .open = ptdump_open,
20 .read = seq_read,
21 .llseek = seq_lseek,
22 .release = single_release,
23};
24
25static struct dentry *pe;
26
27static int __init pt_dump_debug_init(void)
28{
29 pe = debugfs_create_file("kernel_page_tables", S_IRUSR, NULL, NULL,
30 &ptdump_fops);
31 if (!pe)
32 return -ENOMEM;
33
34 return 0;
35}
36
37static void __exit pt_dump_debug_exit(void)
38{
39 debugfs_remove_recursive(pe);
40}
41
42module_init(pt_dump_debug_init);
43module_exit(pt_dump_debug_exit);
44MODULE_LICENSE("GPL");
45MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
46MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 0f1c6fc3ddd8..4a6f1d9b5106 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -426,38 +426,15 @@ void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
426{ 426{
427 ptdump_walk_pgd_level_core(m, pgd, false); 427 ptdump_walk_pgd_level_core(m, pgd, false);
428} 428}
429EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level);
429 430
430void ptdump_walk_pgd_level_checkwx(void) 431void ptdump_walk_pgd_level_checkwx(void)
431{ 432{
432 ptdump_walk_pgd_level_core(NULL, NULL, true); 433 ptdump_walk_pgd_level_core(NULL, NULL, true);
433} 434}
434 435
435#ifdef CONFIG_X86_PTDUMP 436static int __init pt_dump_init(void)
436static int ptdump_show(struct seq_file *m, void *v)
437{ 437{
438 ptdump_walk_pgd_level(m, NULL);
439 return 0;
440}
441
442static int ptdump_open(struct inode *inode, struct file *filp)
443{
444 return single_open(filp, ptdump_show, NULL);
445}
446
447static const struct file_operations ptdump_fops = {
448 .open = ptdump_open,
449 .read = seq_read,
450 .llseek = seq_lseek,
451 .release = single_release,
452};
453#endif
454
455static int pt_dump_init(void)
456{
457#ifdef CONFIG_X86_PTDUMP
458 struct dentry *pe;
459#endif
460
461#ifdef CONFIG_X86_32 438#ifdef CONFIG_X86_32
462 /* Not a compile-time constant on x86-32 */ 439 /* Not a compile-time constant on x86-32 */
463 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; 440 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
@@ -468,13 +445,6 @@ static int pt_dump_init(void)
468 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START; 445 address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
469#endif 446#endif
470 447
471#ifdef CONFIG_X86_PTDUMP
472 pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL,
473 &ptdump_fops);
474 if (!pe)
475 return -ENOMEM;
476#endif
477
478 return 0; 448 return 0;
479} 449}
480 450
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index b9c78f3bcd67..0d8d53d1f5cc 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -194,8 +194,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
194 * Check if the request spans more than any BAR in the iomem resource 194 * Check if the request spans more than any BAR in the iomem resource
195 * tree. 195 * tree.
196 */ 196 */
197 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), 197 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
198 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); 198 pr_warn("caller %pS mapping multiple BARs\n", caller);
199 199
200 return ret_addr; 200 return ret_addr;
201err_free_area: 201err_free_area:
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index a3137a4feed1..6000ad7f560c 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -129,14 +129,16 @@ within(unsigned long addr, unsigned long start, unsigned long end)
129 */ 129 */
130void clflush_cache_range(void *vaddr, unsigned int size) 130void clflush_cache_range(void *vaddr, unsigned int size)
131{ 131{
132 unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1; 132 const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
133 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
133 void *vend = vaddr + size; 134 void *vend = vaddr + size;
134 void *p; 135
136 if (p >= vend)
137 return;
135 138
136 mb(); 139 mb();
137 140
138 for (p = (void *)((unsigned long)vaddr & ~clflush_mask); 141 for (; p < vend; p += clflush_size)
139 p < vend; p += boot_cpu_data.x86_clflush_size)
140 clflushopt(p); 142 clflushopt(p);
141 143
142 mb(); 144 mb();
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 188e3e07eeeb..031782e74231 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -586,7 +586,7 @@ int free_memtype(u64 start, u64 end)
586 entry = rbt_memtype_erase(start, end); 586 entry = rbt_memtype_erase(start, end);
587 spin_unlock(&memtype_lock); 587 spin_unlock(&memtype_lock);
588 588
589 if (!entry) { 589 if (IS_ERR(entry)) {
590 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n", 590 pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
591 current->comm, current->pid, start, end - 1); 591 current->comm, current->pid, start, end - 1);
592 return -EINVAL; 592 return -EINVAL;
@@ -992,6 +992,16 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
992 vma->vm_flags &= ~VM_PAT; 992 vma->vm_flags &= ~VM_PAT;
993} 993}
994 994
995/*
996 * untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
997 * with the old vma after its pfnmap page table has been removed. The new
998 * vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
999 */
1000void untrack_pfn_moved(struct vm_area_struct *vma)
1001{
1002 vma->vm_flags &= ~VM_PAT;
1003}
1004
995pgprot_t pgprot_writecombine(pgprot_t prot) 1005pgprot_t pgprot_writecombine(pgprot_t prot)
996{ 1006{
997 return __pgprot(pgprot_val(prot) | 1007 return __pgprot(pgprot_val(prot) |
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index 63931080366a..2f7702253ccf 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -98,8 +98,13 @@ static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
98 return last_lower; /* Returns NULL if there is no overlap */ 98 return last_lower; /* Returns NULL if there is no overlap */
99} 99}
100 100
101static struct memtype *memtype_rb_exact_match(struct rb_root *root, 101enum {
102 u64 start, u64 end) 102 MEMTYPE_EXACT_MATCH = 0,
103 MEMTYPE_END_MATCH = 1
104};
105
106static struct memtype *memtype_rb_match(struct rb_root *root,
107 u64 start, u64 end, int match_type)
103{ 108{
104 struct memtype *match; 109 struct memtype *match;
105 110
@@ -107,7 +112,12 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root,
107 while (match != NULL && match->start < end) { 112 while (match != NULL && match->start < end) {
108 struct rb_node *node; 113 struct rb_node *node;
109 114
110 if (match->start == start && match->end == end) 115 if ((match_type == MEMTYPE_EXACT_MATCH) &&
116 (match->start == start) && (match->end == end))
117 return match;
118
119 if ((match_type == MEMTYPE_END_MATCH) &&
120 (match->start < start) && (match->end == end))
111 return match; 121 return match;
112 122
113 node = rb_next(&match->rb); 123 node = rb_next(&match->rb);
@@ -117,7 +127,7 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root,
117 match = NULL; 127 match = NULL;
118 } 128 }
119 129
120 return NULL; /* Returns NULL if there is no exact match */ 130 return NULL; /* Returns NULL if there is no match */
121} 131}
122 132
123static int memtype_rb_check_conflict(struct rb_root *root, 133static int memtype_rb_check_conflict(struct rb_root *root,
@@ -210,12 +220,36 @@ struct memtype *rbt_memtype_erase(u64 start, u64 end)
210{ 220{
211 struct memtype *data; 221 struct memtype *data;
212 222
213 data = memtype_rb_exact_match(&memtype_rbroot, start, end); 223 /*
214 if (!data) 224 * Since the memtype_rbroot tree allows overlapping ranges,
215 goto out; 225 * rbt_memtype_erase() checks with EXACT_MATCH first, i.e. free
226 * a whole node for the munmap case. If no such entry is found,
227 * it then checks with END_MATCH, i.e. shrink the size of a node
228 * from the end for the mremap case.
229 */
230 data = memtype_rb_match(&memtype_rbroot, start, end,
231 MEMTYPE_EXACT_MATCH);
232 if (!data) {
233 data = memtype_rb_match(&memtype_rbroot, start, end,
234 MEMTYPE_END_MATCH);
235 if (!data)
236 return ERR_PTR(-EINVAL);
237 }
238
239 if (data->start == start) {
240 /* munmap: erase this node */
241 rb_erase_augmented(&data->rb, &memtype_rbroot,
242 &memtype_rb_augment_cb);
243 } else {
244 /* mremap: update the end value of this node */
245 rb_erase_augmented(&data->rb, &memtype_rbroot,
246 &memtype_rb_augment_cb);
247 data->end = start;
248 data->subtree_max_end = data->end;
249 memtype_rb_insert(&memtype_rbroot, data);
250 return NULL;
251 }
216 252
217 rb_erase_augmented(&data->rb, &memtype_rbroot, &memtype_rb_augment_cb);
218out:
219 return data; 253 return data;
220} 254}
221 255
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index c2aea63bee20..b5f821881465 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -203,6 +203,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
203 pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n", 203 pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
204 (unsigned long long)start, (unsigned long long)end - 1); 204 (unsigned long long)start, (unsigned long long)end - 1);
205 205
206 max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
207
206 return 0; 208 return 0;
207out_err_bad_srat: 209out_err_bad_srat:
208 bad_srat(); 210 bad_srat();
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 14b0ff32fb9f..3a6803cb0ec9 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -569,7 +569,7 @@ static inline int track_pfn_copy(struct vm_area_struct *vma)
569} 569}
570 570
571/* 571/*
572 * untrack_pfn_vma is called while unmapping a pfnmap for a region. 572 * untrack_pfn is called while unmapping a pfnmap for a region.
573 * untrack can be called for a specific region indicated by pfn and size or 573 * untrack can be called for a specific region indicated by pfn and size or
574 * can be for the entire vma (in which case pfn, size are zero). 574 * can be for the entire vma (in which case pfn, size are zero).
575 */ 575 */
@@ -577,6 +577,13 @@ static inline void untrack_pfn(struct vm_area_struct *vma,
577 unsigned long pfn, unsigned long size) 577 unsigned long pfn, unsigned long size)
578{ 578{
579} 579}
580
581/*
582 * untrack_pfn_moved is called while mremapping a pfnmap for a new region.
583 */
584static inline void untrack_pfn_moved(struct vm_area_struct *vma)
585{
586}
580#else 587#else
581extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot, 588extern int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
582 unsigned long pfn, unsigned long addr, 589 unsigned long pfn, unsigned long addr,
@@ -586,6 +593,7 @@ extern int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
586extern int track_pfn_copy(struct vm_area_struct *vma); 593extern int track_pfn_copy(struct vm_area_struct *vma);
587extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, 594extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
588 unsigned long size); 595 unsigned long size);
596extern void untrack_pfn_moved(struct vm_area_struct *vma);
589#endif 597#endif
590 598
591#ifdef __HAVE_COLOR_ZERO_PAGE 599#ifdef __HAVE_COLOR_ZERO_PAGE
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index f589222bfa87..35b22f94d2d2 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -19,6 +19,10 @@ extern unsigned long min_low_pfn;
19 * highest page 19 * highest page
20 */ 20 */
21extern unsigned long max_pfn; 21extern unsigned long max_pfn;
22/*
23 * highest possible page
24 */
25extern unsigned long long max_possible_pfn;
22 26
23#ifndef CONFIG_NO_BOOTMEM 27#ifndef CONFIG_NO_BOOTMEM
24/* 28/*
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 3b6380784c28..91e32bc8517f 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -33,6 +33,7 @@ EXPORT_SYMBOL(contig_page_data);
33unsigned long max_low_pfn; 33unsigned long max_low_pfn;
34unsigned long min_low_pfn; 34unsigned long min_low_pfn;
35unsigned long max_pfn; 35unsigned long max_pfn;
36unsigned long long max_possible_pfn;
36 37
37bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata; 38bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
38 39
diff --git a/mm/mremap.c b/mm/mremap.c
index c25bc6268e46..de824e72c3e8 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -319,6 +319,10 @@ static unsigned long move_vma(struct vm_area_struct *vma,
319 hiwater_vm = mm->hiwater_vm; 319 hiwater_vm = mm->hiwater_vm;
320 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); 320 vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
321 321
322 /* Tell pfnmap has moved from this vma */
323 if (unlikely(vma->vm_flags & VM_PFNMAP))
324 untrack_pfn_moved(vma);
325
322 if (do_munmap(mm, old_addr, old_len) < 0) { 326 if (do_munmap(mm, old_addr, old_len) < 0) {
323 /* OOM: unable to split vma, just get accounts right */ 327 /* OOM: unable to split vma, just get accounts right */
324 vm_unacct_memory(excess >> PAGE_SHIFT); 328 vm_unacct_memory(excess >> PAGE_SHIFT);
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index e57cf24babd6..99feb2b07fc5 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -31,6 +31,7 @@ EXPORT_SYMBOL(contig_page_data);
31unsigned long max_low_pfn; 31unsigned long max_low_pfn;
32unsigned long min_low_pfn; 32unsigned long min_low_pfn;
33unsigned long max_pfn; 33unsigned long max_pfn;
34unsigned long long max_possible_pfn;
34 35
35static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, 36static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
36 u64 goal, u64 limit) 37 u64 goal, u64 limit)