aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-01-04 03:43:42 -0500
committerIngo Molnar <mingo@elte.hu>2011-01-04 03:43:42 -0500
commitbc030d6cb9532877c1c5a3f5e7123344fa24a285 (patch)
treed223d410b868b80d4c0deec192d354a5d06b201a /arch/x86/mm
parentd3bd058826aa8b79590cca6c8e6d1557bf576ada (diff)
parent387c31c7e5c9805b0aef8833d1731a5fe7bdea14 (diff)
Merge commit 'v2.6.37-rc8' into x86/apic
Conflicts: arch/x86/include/asm/io_apic.h Merge reason: move to a fresh -rc, resolve the conflict. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/fault.c63
-rw-r--r--arch/x86/mm/highmem_32.c76
-rw-r--r--arch/x86/mm/init_32.c45
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/iomap_32.c43
-rw-r--r--arch/x86/mm/numa_64.c7
-rw-r--r--arch/x86/mm/tlb.c7
7 files changed, 116 insertions, 126 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 79b0b372d2d0..7d90ceb882a4 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -11,6 +11,7 @@
11#include <linux/kprobes.h> /* __kprobes, ... */ 11#include <linux/kprobes.h> /* __kprobes, ... */
12#include <linux/mmiotrace.h> /* kmmio_handler, ... */ 12#include <linux/mmiotrace.h> /* kmmio_handler, ... */
13#include <linux/perf_event.h> /* perf_sw_event */ 13#include <linux/perf_event.h> /* perf_sw_event */
14#include <linux/hugetlb.h> /* hstate_index_to_shift */
14 15
15#include <asm/traps.h> /* dotraplinkage, ... */ 16#include <asm/traps.h> /* dotraplinkage, ... */
16#include <asm/pgalloc.h> /* pgd_*(), ... */ 17#include <asm/pgalloc.h> /* pgd_*(), ... */
@@ -160,15 +161,20 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
160 161
161static void 162static void
162force_sig_info_fault(int si_signo, int si_code, unsigned long address, 163force_sig_info_fault(int si_signo, int si_code, unsigned long address,
163 struct task_struct *tsk) 164 struct task_struct *tsk, int fault)
164{ 165{
166 unsigned lsb = 0;
165 siginfo_t info; 167 siginfo_t info;
166 168
167 info.si_signo = si_signo; 169 info.si_signo = si_signo;
168 info.si_errno = 0; 170 info.si_errno = 0;
169 info.si_code = si_code; 171 info.si_code = si_code;
170 info.si_addr = (void __user *)address; 172 info.si_addr = (void __user *)address;
171 info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0; 173 if (fault & VM_FAULT_HWPOISON_LARGE)
174 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
175 if (fault & VM_FAULT_HWPOISON)
176 lsb = PAGE_SHIFT;
177 info.si_addr_lsb = lsb;
172 178
173 force_sig_info(si_signo, &info, tsk); 179 force_sig_info(si_signo, &info, tsk);
174} 180}
@@ -722,7 +728,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
722 tsk->thread.error_code = error_code | (address >= TASK_SIZE); 728 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
723 tsk->thread.trap_no = 14; 729 tsk->thread.trap_no = 14;
724 730
725 force_sig_info_fault(SIGSEGV, si_code, address, tsk); 731 force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
726 732
727 return; 733 return;
728 } 734 }
@@ -807,14 +813,14 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
807 tsk->thread.trap_no = 14; 813 tsk->thread.trap_no = 14;
808 814
809#ifdef CONFIG_MEMORY_FAILURE 815#ifdef CONFIG_MEMORY_FAILURE
810 if (fault & VM_FAULT_HWPOISON) { 816 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
811 printk(KERN_ERR 817 printk(KERN_ERR
812 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n", 818 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
813 tsk->comm, tsk->pid, address); 819 tsk->comm, tsk->pid, address);
814 code = BUS_MCEERR_AR; 820 code = BUS_MCEERR_AR;
815 } 821 }
816#endif 822#endif
817 force_sig_info_fault(SIGBUS, code, address, tsk); 823 force_sig_info_fault(SIGBUS, code, address, tsk, fault);
818} 824}
819 825
820static noinline void 826static noinline void
@@ -824,7 +830,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
824 if (fault & VM_FAULT_OOM) { 830 if (fault & VM_FAULT_OOM) {
825 out_of_memory(regs, error_code, address); 831 out_of_memory(regs, error_code, address);
826 } else { 832 } else {
827 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON)) 833 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
834 VM_FAULT_HWPOISON_LARGE))
828 do_sigbus(regs, error_code, address, fault); 835 do_sigbus(regs, error_code, address, fault);
829 else 836 else
830 BUG(); 837 BUG();
@@ -912,9 +919,9 @@ spurious_fault(unsigned long error_code, unsigned long address)
912int show_unhandled_signals = 1; 919int show_unhandled_signals = 1;
913 920
914static inline int 921static inline int
915access_error(unsigned long error_code, int write, struct vm_area_struct *vma) 922access_error(unsigned long error_code, struct vm_area_struct *vma)
916{ 923{
917 if (write) { 924 if (error_code & PF_WRITE) {
918 /* write, present and write, not present: */ 925 /* write, present and write, not present: */
919 if (unlikely(!(vma->vm_flags & VM_WRITE))) 926 if (unlikely(!(vma->vm_flags & VM_WRITE)))
920 return 1; 927 return 1;
@@ -949,8 +956,10 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
949 struct task_struct *tsk; 956 struct task_struct *tsk;
950 unsigned long address; 957 unsigned long address;
951 struct mm_struct *mm; 958 struct mm_struct *mm;
952 int write;
953 int fault; 959 int fault;
960 int write = error_code & PF_WRITE;
961 unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
962 (write ? FAULT_FLAG_WRITE : 0);
954 963
955 tsk = current; 964 tsk = current;
956 mm = tsk->mm; 965 mm = tsk->mm;
@@ -1061,6 +1070,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
1061 bad_area_nosemaphore(regs, error_code, address); 1070 bad_area_nosemaphore(regs, error_code, address);
1062 return; 1071 return;
1063 } 1072 }
1073retry:
1064 down_read(&mm->mmap_sem); 1074 down_read(&mm->mmap_sem);
1065 } else { 1075 } else {
1066 /* 1076 /*
@@ -1104,9 +1114,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
1104 * we can handle it.. 1114 * we can handle it..
1105 */ 1115 */
1106good_area: 1116good_area:
1107 write = error_code & PF_WRITE; 1117 if (unlikely(access_error(error_code, vma))) {
1108
1109 if (unlikely(access_error(error_code, write, vma))) {
1110 bad_area_access_error(regs, error_code, address); 1118 bad_area_access_error(regs, error_code, address);
1111 return; 1119 return;
1112 } 1120 }
@@ -1116,21 +1124,34 @@ good_area:
1116 * make sure we exit gracefully rather than endlessly redo 1124 * make sure we exit gracefully rather than endlessly redo
1117 * the fault: 1125 * the fault:
1118 */ 1126 */
1119 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 1127 fault = handle_mm_fault(mm, vma, address, flags);
1120 1128
1121 if (unlikely(fault & VM_FAULT_ERROR)) { 1129 if (unlikely(fault & VM_FAULT_ERROR)) {
1122 mm_fault_error(regs, error_code, address, fault); 1130 mm_fault_error(regs, error_code, address, fault);
1123 return; 1131 return;
1124 } 1132 }
1125 1133
1126 if (fault & VM_FAULT_MAJOR) { 1134 /*
1127 tsk->maj_flt++; 1135 * Major/minor page fault accounting is only done on the
1128 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 1136 * initial attempt. If we go through a retry, it is extremely
1129 regs, address); 1137 * likely that the page will be found in page cache at that point.
1130 } else { 1138 */
1131 tsk->min_flt++; 1139 if (flags & FAULT_FLAG_ALLOW_RETRY) {
1132 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 1140 if (fault & VM_FAULT_MAJOR) {
1133 regs, address); 1141 tsk->maj_flt++;
1142 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
1143 regs, address);
1144 } else {
1145 tsk->min_flt++;
1146 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
1147 regs, address);
1148 }
1149 if (fault & VM_FAULT_RETRY) {
1150 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
1151 * of starvation. */
1152 flags &= ~FAULT_FLAG_ALLOW_RETRY;
1153 goto retry;
1154 }
1134 } 1155 }
1135 1156
1136 check_v8086_mode(regs, address, tsk); 1157 check_v8086_mode(regs, address, tsk);
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 5e8fa12ef861..b49962662101 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -9,6 +9,7 @@ void *kmap(struct page *page)
9 return page_address(page); 9 return page_address(page);
10 return kmap_high(page); 10 return kmap_high(page);
11} 11}
12EXPORT_SYMBOL(kmap);
12 13
13void kunmap(struct page *page) 14void kunmap(struct page *page)
14{ 15{
@@ -18,6 +19,7 @@ void kunmap(struct page *page)
18 return; 19 return;
19 kunmap_high(page); 20 kunmap_high(page);
20} 21}
22EXPORT_SYMBOL(kunmap);
21 23
22/* 24/*
23 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because 25 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
@@ -27,10 +29,10 @@ void kunmap(struct page *page)
27 * However when holding an atomic kmap it is not legal to sleep, so atomic 29 * However when holding an atomic kmap it is not legal to sleep, so atomic
28 * kmaps are appropriate for short, tight code paths only. 30 * kmaps are appropriate for short, tight code paths only.
29 */ 31 */
30void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) 32void *kmap_atomic_prot(struct page *page, pgprot_t prot)
31{ 33{
32 enum fixed_addresses idx;
33 unsigned long vaddr; 34 unsigned long vaddr;
35 int idx, type;
34 36
35 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
36 pagefault_disable(); 38 pagefault_disable();
@@ -38,8 +40,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
38 if (!PageHighMem(page)) 40 if (!PageHighMem(page))
39 return page_address(page); 41 return page_address(page);
40 42
41 debug_kmap_atomic(type); 43 type = kmap_atomic_idx_push();
42
43 idx = type + KM_TYPE_NR*smp_processor_id(); 44 idx = type + KM_TYPE_NR*smp_processor_id();
44 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
45 BUG_ON(!pte_none(*(kmap_pte-idx))); 46 BUG_ON(!pte_none(*(kmap_pte-idx)));
@@ -47,44 +48,57 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
47 48
48 return (void *)vaddr; 49 return (void *)vaddr;
49} 50}
51EXPORT_SYMBOL(kmap_atomic_prot);
52
53void *__kmap_atomic(struct page *page)
54{
55 return kmap_atomic_prot(page, kmap_prot);
56}
57EXPORT_SYMBOL(__kmap_atomic);
50 58
51void *kmap_atomic(struct page *page, enum km_type type) 59/*
60 * This is the same as kmap_atomic() but can map memory that doesn't
61 * have a struct page associated with it.
62 */
63void *kmap_atomic_pfn(unsigned long pfn)
52{ 64{
53 return kmap_atomic_prot(page, type, kmap_prot); 65 return kmap_atomic_prot_pfn(pfn, kmap_prot);
54} 66}
67EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
55 68
56void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) 69void __kunmap_atomic(void *kvaddr)
57{ 70{
58 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
59 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 72
60 73 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
61 /* 74 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
62 * Force other mappings to Oops if they'll try to access this pte 75 int idx, type;
63 * without first remap it. Keeping stale mappings around is a bad idea 76
64 * also, in case the page changes cacheability attributes or becomes 77 type = kmap_atomic_idx();
65 * a protected page in a hypervisor. 78 idx = type + KM_TYPE_NR * smp_processor_id();
66 */ 79
67 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) 80#ifdef CONFIG_DEBUG_HIGHMEM
81 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
82#endif
83 /*
84 * Force other mappings to Oops if they'll try to access this
85 * pte without first remap it. Keeping stale mappings around
86 * is a bad idea also, in case the page changes cacheability
87 * attributes or becomes a protected page in a hypervisor.
88 */
68 kpte_clear_flush(kmap_pte-idx, vaddr); 89 kpte_clear_flush(kmap_pte-idx, vaddr);
69 else { 90 kmap_atomic_idx_pop();
91 }
70#ifdef CONFIG_DEBUG_HIGHMEM 92#ifdef CONFIG_DEBUG_HIGHMEM
93 else {
71 BUG_ON(vaddr < PAGE_OFFSET); 94 BUG_ON(vaddr < PAGE_OFFSET);
72 BUG_ON(vaddr >= (unsigned long)high_memory); 95 BUG_ON(vaddr >= (unsigned long)high_memory);
73#endif
74 } 96 }
97#endif
75 98
76 pagefault_enable(); 99 pagefault_enable();
77} 100}
78 101EXPORT_SYMBOL(__kunmap_atomic);
79/*
80 * This is the same as kmap_atomic() but can map memory that doesn't
81 * have a struct page associated with it.
82 */
83void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
84{
85 return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
86}
87EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
88 102
89struct page *kmap_atomic_to_page(void *ptr) 103struct page *kmap_atomic_to_page(void *ptr)
90{ 104{
@@ -98,12 +112,6 @@ struct page *kmap_atomic_to_page(void *ptr)
98 pte = kmap_pte - (idx - FIX_KMAP_BEGIN); 112 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
99 return pte_page(*pte); 113 return pte_page(*pte);
100} 114}
101
102EXPORT_SYMBOL(kmap);
103EXPORT_SYMBOL(kunmap);
104EXPORT_SYMBOL(kmap_atomic);
105EXPORT_SYMBOL(kunmap_atomic_notypecheck);
106EXPORT_SYMBOL(kmap_atomic_prot);
107EXPORT_SYMBOL(kmap_atomic_to_page); 115EXPORT_SYMBOL(kmap_atomic_to_page);
108 116
109void __init set_highmem_pages_init(void) 117void __init set_highmem_pages_init(void)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 5d0a6711c282..0e969f9f401b 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -528,48 +528,6 @@ static void __init pagetable_init(void)
528 permanent_kmaps_init(pgd_base); 528 permanent_kmaps_init(pgd_base);
529} 529}
530 530
531#ifdef CONFIG_ACPI_SLEEP
532/*
533 * ACPI suspend needs this for resume, because things like the intel-agp
534 * driver might have split up a kernel 4MB mapping.
535 */
536char swsusp_pg_dir[PAGE_SIZE]
537 __attribute__ ((aligned(PAGE_SIZE)));
538
539static inline void save_pg_dir(void)
540{
541 copy_page(swsusp_pg_dir, swapper_pg_dir);
542}
543#else /* !CONFIG_ACPI_SLEEP */
544static inline void save_pg_dir(void)
545{
546}
547#endif /* !CONFIG_ACPI_SLEEP */
548
549void zap_low_mappings(bool early)
550{
551 int i;
552
553 /*
554 * Zap initial low-memory mappings.
555 *
556 * Note that "pgd_clear()" doesn't do it for
557 * us, because pgd_clear() is a no-op on i386.
558 */
559 for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
560#ifdef CONFIG_X86_PAE
561 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
562#else
563 set_pgd(swapper_pg_dir+i, __pgd(0));
564#endif
565 }
566
567 if (early)
568 __flush_tlb();
569 else
570 flush_tlb_all();
571}
572
573pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); 531pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
574EXPORT_SYMBOL_GPL(__supported_pte_mask); 532EXPORT_SYMBOL_GPL(__supported_pte_mask);
575 533
@@ -882,9 +840,6 @@ void __init mem_init(void)
882 840
883 if (boot_cpu_data.wp_works_ok < 0) 841 if (boot_cpu_data.wp_works_ok < 0)
884 test_wp_bit(); 842 test_wp_bit();
885
886 save_pg_dir();
887 zap_low_mappings(true);
888} 843}
889 844
890#ifdef CONFIG_MEMORY_HOTPLUG 845#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 84346200e783..71a59296af80 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -51,7 +51,6 @@
51#include <asm/numa.h> 51#include <asm/numa.h>
52#include <asm/cacheflush.h> 52#include <asm/cacheflush.h>
53#include <asm/init.h> 53#include <asm/init.h>
54#include <linux/bootmem.h>
55 54
56static int __init parse_direct_gbpages_off(char *arg) 55static int __init parse_direct_gbpages_off(char *arg)
57{ 56{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 72fc70cf6184..7b179b499fa3 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -48,21 +48,20 @@ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot)
48} 48}
49EXPORT_SYMBOL_GPL(iomap_create_wc); 49EXPORT_SYMBOL_GPL(iomap_create_wc);
50 50
51void 51void iomap_free(resource_size_t base, unsigned long size)
52iomap_free(resource_size_t base, unsigned long size)
53{ 52{
54 io_free_memtype(base, base + size); 53 io_free_memtype(base, base + size);
55} 54}
56EXPORT_SYMBOL_GPL(iomap_free); 55EXPORT_SYMBOL_GPL(iomap_free);
57 56
58void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 57void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
59{ 58{
60 enum fixed_addresses idx;
61 unsigned long vaddr; 59 unsigned long vaddr;
60 int idx, type;
62 61
63 pagefault_disable(); 62 pagefault_disable();
64 63
65 debug_kmap_atomic(type); 64 type = kmap_atomic_idx_push();
66 idx = type + KM_TYPE_NR * smp_processor_id(); 65 idx = type + KM_TYPE_NR * smp_processor_id();
67 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 66 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
68 set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); 67 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
@@ -72,10 +71,10 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
72} 71}
73 72
74/* 73/*
75 * Map 'pfn' using fixed map 'type' and protections 'prot' 74 * Map 'pfn' using protections 'prot'
76 */ 75 */
77void __iomem * 76void __iomem *
78iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) 77iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
79{ 78{
80 /* 79 /*
81 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. 80 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
@@ -86,24 +85,34 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
86 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) 85 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
87 prot = PAGE_KERNEL_UC_MINUS; 86 prot = PAGE_KERNEL_UC_MINUS;
88 87
89 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot); 88 return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
90} 89}
91EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn); 90EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
92 91
93void 92void
94iounmap_atomic(void __iomem *kvaddr, enum km_type type) 93iounmap_atomic(void __iomem *kvaddr)
95{ 94{
96 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; 95 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
97 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
98 96
99 /* 97 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
100 * Force other mappings to Oops if they'll try to access this pte 98 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
101 * without first remap it. Keeping stale mappings around is a bad idea 99 int idx, type;
102 * also, in case the page changes cacheability attributes or becomes 100
103 * a protected page in a hypervisor. 101 type = kmap_atomic_idx();
104 */ 102 idx = type + KM_TYPE_NR * smp_processor_id();
105 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) 103
104#ifdef CONFIG_DEBUG_HIGHMEM
105 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
106#endif
107 /*
108 * Force other mappings to Oops if they'll try to access this
109 * pte without first remap it. Keeping stale mappings around
110 * is a bad idea also, in case the page changes cacheability
111 * attributes or becomes a protected page in a hypervisor.
112 */
106 kpte_clear_flush(kmap_pte-idx, vaddr); 113 kpte_clear_flush(kmap_pte-idx, vaddr);
114 kmap_atomic_idx_pop();
115 }
107 116
108 pagefault_enable(); 117 pagefault_enable();
109} 118}
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 60f498511dd6..7ffc9b727efd 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -178,11 +178,8 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
178 178
179 /* extend the search scope */ 179 /* extend the search scope */
180 end = max_pfn_mapped << PAGE_SHIFT; 180 end = max_pfn_mapped << PAGE_SHIFT;
181 if (end > (MAX_DMA32_PFN<<PAGE_SHIFT)) 181 start = MAX_DMA_PFN << PAGE_SHIFT;
182 start = MAX_DMA32_PFN<<PAGE_SHIFT; 182 mem = memblock_find_in_range(start, end, size, align);
183 else
184 start = MAX_DMA_PFN<<PAGE_SHIFT;
185 mem = memblock_x86_find_in_range_node(nodeid, start, end, size, align);
186 if (mem != MEMBLOCK_ERROR) 183 if (mem != MEMBLOCK_ERROR)
187 return __va(mem); 184 return __va(mem);
188 185
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 49358481c733..6acc724d5d8f 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -223,7 +223,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
223 223
224static void __cpuinit calculate_tlb_offset(void) 224static void __cpuinit calculate_tlb_offset(void)
225{ 225{
226 int cpu, node, nr_node_vecs; 226 int cpu, node, nr_node_vecs, idx = 0;
227 /* 227 /*
228 * we are changing tlb_vector_offset for each CPU in runtime, but this 228 * we are changing tlb_vector_offset for each CPU in runtime, but this
229 * will not cause inconsistency, as the write is atomic under X86. we 229 * will not cause inconsistency, as the write is atomic under X86. we
@@ -239,7 +239,7 @@ static void __cpuinit calculate_tlb_offset(void)
239 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; 239 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
240 240
241 for_each_online_node(node) { 241 for_each_online_node(node) {
242 int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) * 242 int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
243 nr_node_vecs; 243 nr_node_vecs;
244 int cpu_offset = 0; 244 int cpu_offset = 0;
245 for_each_cpu(cpu, cpumask_of_node(node)) { 245 for_each_cpu(cpu, cpumask_of_node(node)) {
@@ -248,10 +248,11 @@ static void __cpuinit calculate_tlb_offset(void)
248 cpu_offset++; 248 cpu_offset++;
249 cpu_offset = cpu_offset % nr_node_vecs; 249 cpu_offset = cpu_offset % nr_node_vecs;
250 } 250 }
251 idx++;
251 } 252 }
252} 253}
253 254
254static int tlb_cpuhp_notify(struct notifier_block *n, 255static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
255 unsigned long action, void *hcpu) 256 unsigned long action, void *hcpu)
256{ 257{
257 switch (action & 0xf) { 258 switch (action & 0xf) {