aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c21
-rw-r--r--arch/x86/mm/gup.c12
-rw-r--r--arch/x86/mm/highmem_32.c2
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/mm/kmemcheck/error.c2
-rw-r--r--arch/x86/mm/mmap.c4
-rw-r--r--arch/x86/mm/mmio-mod.c2
-rw-r--r--arch/x86/mm/numa.c15
-rw-r--r--arch/x86/mm/numa_32.c6
-rw-r--r--arch/x86/mm/pageattr-test.c3
10 files changed, 45 insertions, 25 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 2dbf6bf4c7e..0d17c8c50ac 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -17,6 +17,7 @@
17#include <asm/traps.h> /* dotraplinkage, ... */ 17#include <asm/traps.h> /* dotraplinkage, ... */
18#include <asm/pgalloc.h> /* pgd_*(), ... */ 18#include <asm/pgalloc.h> /* pgd_*(), ... */
19#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ 19#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20#include <asm/vsyscall.h>
20 21
21/* 22/*
22 * Page fault error code bits: 23 * Page fault error code bits:
@@ -105,7 +106,7 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
105 * but for now it's good enough to assume that long 106 * but for now it's good enough to assume that long
106 * mode only uses well known segments or kernel. 107 * mode only uses well known segments or kernel.
107 */ 108 */
108 return (!user_mode(regs)) || (regs->cs == __USER_CS); 109 return (!user_mode(regs) || user_64bit_mode(regs));
109#endif 110#endif
110 case 0x60: 111 case 0x60:
111 /* 0x64 thru 0x67 are valid prefixes in all modes. */ 112 /* 0x64 thru 0x67 are valid prefixes in all modes. */
@@ -720,6 +721,18 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
720 if (is_errata100(regs, address)) 721 if (is_errata100(regs, address))
721 return; 722 return;
722 723
724#ifdef CONFIG_X86_64
725 /*
726 * Instruction fetch faults in the vsyscall page might need
727 * emulation.
728 */
729 if (unlikely((error_code & PF_INSTR) &&
730 ((address & ~0xfff) == VSYSCALL_START))) {
731 if (emulate_vsyscall(regs, address))
732 return;
733 }
734#endif
735
723 if (unlikely(show_unhandled_signals)) 736 if (unlikely(show_unhandled_signals))
724 show_signal_msg(regs, error_code, address, tsk); 737 show_signal_msg(regs, error_code, address, tsk);
725 738
@@ -1059,7 +1072,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
1059 if (unlikely(error_code & PF_RSVD)) 1072 if (unlikely(error_code & PF_RSVD))
1060 pgtable_bad(regs, error_code, address); 1073 pgtable_bad(regs, error_code, address);
1061 1074
1062 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 1075 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1063 1076
1064 /* 1077 /*
1065 * If we're in an interrupt, have no user context or are running 1078 * If we're in an interrupt, have no user context or are running
@@ -1161,11 +1174,11 @@ good_area:
1161 if (flags & FAULT_FLAG_ALLOW_RETRY) { 1174 if (flags & FAULT_FLAG_ALLOW_RETRY) {
1162 if (fault & VM_FAULT_MAJOR) { 1175 if (fault & VM_FAULT_MAJOR) {
1163 tsk->maj_flt++; 1176 tsk->maj_flt++;
1164 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 1177 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
1165 regs, address); 1178 regs, address);
1166 } else { 1179 } else {
1167 tsk->min_flt++; 1180 tsk->min_flt++;
1168 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 1181 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
1169 regs, address); 1182 regs, address);
1170 } 1183 }
1171 if (fault & VM_FAULT_RETRY) { 1184 if (fault & VM_FAULT_RETRY) {
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index dbe34b93137..dd74e46828c 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -108,16 +108,6 @@ static inline void get_head_page_multiple(struct page *page, int nr)
108 SetPageReferenced(page); 108 SetPageReferenced(page);
109} 109}
110 110
111static inline void get_huge_page_tail(struct page *page)
112{
113 /*
114 * __split_huge_page_refcount() cannot run
115 * from under us.
116 */
117 VM_BUG_ON(atomic_read(&page->_count) < 0);
118 atomic_inc(&page->_count);
119}
120
121static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr, 111static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
122 unsigned long end, int write, struct page **pages, int *nr) 112 unsigned long end, int write, struct page **pages, int *nr)
123{ 113{
@@ -211,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
211 do { 201 do {
212 VM_BUG_ON(compound_head(page) != head); 202 VM_BUG_ON(compound_head(page) != head);
213 pages[*nr] = page; 203 pages[*nr] = page;
204 if (PageTail(page))
205 get_huge_page_tail(page);
214 (*nr)++; 206 (*nr)++;
215 page++; 207 page++;
216 refs++; 208 refs++;
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index b4996266210..f4f29b19fac 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
46 BUG_ON(!pte_none(*(kmap_pte-idx))); 46 BUG_ON(!pte_none(*(kmap_pte-idx)));
47 set_pte(kmap_pte-idx, mk_pte(page, prot)); 47 set_pte(kmap_pte-idx, mk_pte(page, prot));
48 arch_flush_lazy_mmu_mode();
48 49
49 return (void *)vaddr; 50 return (void *)vaddr;
50} 51}
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
88 */ 89 */
89 kpte_clear_flush(kmap_pte-idx, vaddr); 90 kpte_clear_flush(kmap_pte-idx, vaddr);
90 kmap_atomic_idx_pop(); 91 kmap_atomic_idx_pop();
92 arch_flush_lazy_mmu_mode();
91 } 93 }
92#ifdef CONFIG_DEBUG_HIGHMEM 94#ifdef CONFIG_DEBUG_HIGHMEM
93 else { 95 else {
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 30326443ab8..87488b93a65 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
63#ifdef CONFIG_X86_32 63#ifdef CONFIG_X86_32
64 /* for fixmap */ 64 /* for fixmap */
65 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); 65 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
66
67 good_end = max_pfn_mapped << PAGE_SHIFT;
68#endif 66#endif
67 good_end = max_pfn_mapped << PAGE_SHIFT;
69 68
70 base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); 69 base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
71 if (base == MEMBLOCK_ERROR) 70 if (base == MEMBLOCK_ERROR)
diff --git a/arch/x86/mm/kmemcheck/error.c b/arch/x86/mm/kmemcheck/error.c
index 704a37ceddd..dab41876cdd 100644
--- a/arch/x86/mm/kmemcheck/error.c
+++ b/arch/x86/mm/kmemcheck/error.c
@@ -185,7 +185,7 @@ void kmemcheck_error_save(enum kmemcheck_shadow state,
185 e->trace.entries = e->trace_entries; 185 e->trace.entries = e->trace_entries;
186 e->trace.max_entries = ARRAY_SIZE(e->trace_entries); 186 e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
187 e->trace.skip = 0; 187 e->trace.skip = 0;
188 save_stack_trace_regs(&e->trace, regs); 188 save_stack_trace_regs(regs, &e->trace);
189 189
190 /* Round address down to nearest 16 bytes */ 190 /* Round address down to nearest 16 bytes */
191 shadow_copy = kmemcheck_shadow_lookup(address 191 shadow_copy = kmemcheck_shadow_lookup(address
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 1dab5194fd9..f927429d07c 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -87,9 +87,9 @@ static unsigned long mmap_rnd(void)
87 */ 87 */
88 if (current->flags & PF_RANDOMIZE) { 88 if (current->flags & PF_RANDOMIZE) {
89 if (mmap_is_ia32()) 89 if (mmap_is_ia32())
90 rnd = (long)get_random_int() % (1<<8); 90 rnd = get_random_int() % (1<<8);
91 else 91 else
92 rnd = (long)(get_random_int() % (1<<28)); 92 rnd = get_random_int() % (1<<28);
93 } 93 }
94 return rnd << PAGE_SHIFT; 94 return rnd << PAGE_SHIFT;
95} 95}
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 3adff7dcc14..67421f38a21 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -34,7 +34,7 @@
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <linux/mmiotrace.h> 35#include <linux/mmiotrace.h>
36#include <asm/e820.h> /* for ISA_START_ADDRESS */ 36#include <asm/e820.h> /* for ISA_START_ADDRESS */
37#include <asm/atomic.h> 37#include <linux/atomic.h>
38#include <linux/percpu.h> 38#include <linux/percpu.h>
39#include <linux/cpu.h> 39#include <linux/cpu.h>
40 40
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index f5510d889a2..fbeaaf41661 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -496,6 +496,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
496 496
497static int __init numa_register_memblks(struct numa_meminfo *mi) 497static int __init numa_register_memblks(struct numa_meminfo *mi)
498{ 498{
499 unsigned long uninitialized_var(pfn_align);
499 int i, nid; 500 int i, nid;
500 501
501 /* Account for nodes with cpus and no memory */ 502 /* Account for nodes with cpus and no memory */
@@ -511,6 +512,20 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
511 512
512 /* for out of order entries */ 513 /* for out of order entries */
513 sort_node_map(); 514 sort_node_map();
515
516 /*
517 * If sections array is gonna be used for pfn -> nid mapping, check
518 * whether its granularity is fine enough.
519 */
520#ifdef NODE_NOT_IN_PAGE_FLAGS
521 pfn_align = node_map_pfn_alignment();
522 if (pfn_align && pfn_align < PAGES_PER_SECTION) {
523 printk(KERN_WARNING "Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
524 PFN_PHYS(pfn_align) >> 20,
525 PFN_PHYS(PAGES_PER_SECTION) >> 20);
526 return -EINVAL;
527 }
528#endif
514 if (!numa_meminfo_cover_memory(mi)) 529 if (!numa_meminfo_cover_memory(mi))
515 return -EINVAL; 530 return -EINVAL;
516 531
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 849a975d3fa..3adebe7e536 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -41,7 +41,7 @@
41 * physnode_map[16-31] = 1; 41 * physnode_map[16-31] = 1;
42 * physnode_map[32- ] = -1; 42 * physnode_map[32- ] = -1;
43 */ 43 */
44s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1}; 44s8 physnode_map[MAX_SECTIONS] __read_mostly = { [0 ... (MAX_SECTIONS - 1)] = -1};
45EXPORT_SYMBOL(physnode_map); 45EXPORT_SYMBOL(physnode_map);
46 46
47void memory_present(int nid, unsigned long start, unsigned long end) 47void memory_present(int nid, unsigned long start, unsigned long end)
@@ -52,8 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
52 nid, start, end); 52 nid, start, end);
53 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); 53 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
54 printk(KERN_DEBUG " "); 54 printk(KERN_DEBUG " ");
55 for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) { 55 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
56 physnode_map[pfn / PAGES_PER_ELEMENT] = nid; 56 physnode_map[pfn / PAGES_PER_SECTION] = nid;
57 printk(KERN_CONT "%lx ", pfn); 57 printk(KERN_CONT "%lx ", pfn);
58 } 58 }
59 printk(KERN_CONT "\n"); 59 printk(KERN_CONT "\n");
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index e1d10690921..b0086567271 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -123,12 +123,11 @@ static int pageattr_test(void)
123 if (print) 123 if (print)
124 printk(KERN_INFO "CPA self-test:\n"); 124 printk(KERN_INFO "CPA self-test:\n");
125 125
126 bm = vmalloc((max_pfn_mapped + 7) / 8); 126 bm = vzalloc((max_pfn_mapped + 7) / 8);
127 if (!bm) { 127 if (!bm) {
128 printk(KERN_ERR "CPA Cannot vmalloc bitmap\n"); 128 printk(KERN_ERR "CPA Cannot vmalloc bitmap\n");
129 return -ENOMEM; 129 return -ENOMEM;
130 } 130 }
131 memset(bm, 0, (max_pfn_mapped + 7) / 8);
132 131
133 failed += print_split(&sa); 132 failed += print_split(&sa);
134 srandom32(100); 133 srandom32(100);