diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-24 15:52:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-24 15:52:27 -0500 |
commit | 87b203079ed949de52f0d92aeae20e5e0116c12f (patch) | |
tree | 1878756f936963822ed2d51a15db1da5814973e7 /arch/x86/mm/fault.c | |
parent | 58105ef1857112a186696c9b8957020090226a28 (diff) | |
parent | a852cbfaaf8122827602027b1614971cfd832304 (diff) |
Merge branch 'x86/core' into core/percpu
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r-- | arch/x86/mm/fault.c | 1085 |
1 files changed, 608 insertions, 477 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8c3f3113a6ec..a03b7279efa0 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1,74 +1,79 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 1995 Linus Torvalds | 2 | * Copyright (C) 1995 Linus Torvalds |
3 | * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs. | 3 | * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs. |
4 | * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar | ||
4 | */ | 5 | */ |
5 | |||
6 | #include <linux/signal.h> | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/errno.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/ptrace.h> | ||
13 | #include <linux/mmiotrace.h> | ||
14 | #include <linux/mman.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/interrupt.h> | 6 | #include <linux/interrupt.h> |
18 | #include <linux/init.h> | 7 | #include <linux/mmiotrace.h> |
19 | #include <linux/tty.h> | 8 | #include <linux/bootmem.h> |
20 | #include <linux/vt_kern.h> /* For unblank_screen() */ | ||
21 | #include <linux/compiler.h> | 9 | #include <linux/compiler.h> |
22 | #include <linux/highmem.h> | 10 | #include <linux/highmem.h> |
23 | #include <linux/bootmem.h> /* for max_low_pfn */ | ||
24 | #include <linux/vmalloc.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/kprobes.h> | 11 | #include <linux/kprobes.h> |
27 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
13 | #include <linux/vmalloc.h> | ||
14 | #include <linux/vt_kern.h> | ||
15 | #include <linux/signal.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/ptrace.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/module.h> | ||
28 | #include <linux/kdebug.h> | 20 | #include <linux/kdebug.h> |
21 | #include <linux/errno.h> | ||
29 | #include <linux/magic.h> | 22 | #include <linux/magic.h> |
23 | #include <linux/sched.h> | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/mman.h> | ||
27 | #include <linux/tty.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/mm.h> | ||
30 | |||
31 | #include <asm-generic/sections.h> | ||
30 | 32 | ||
31 | #include <asm/system.h> | ||
32 | #include <asm/desc.h> | ||
33 | #include <asm/segment.h> | ||
34 | #include <asm/pgalloc.h> | ||
35 | #include <asm/smp.h> | ||
36 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
34 | #include <asm/pgalloc.h> | ||
35 | #include <asm/segment.h> | ||
36 | #include <asm/system.h> | ||
37 | #include <asm/proto.h> | 37 | #include <asm/proto.h> |
38 | #include <asm-generic/sections.h> | ||
39 | #include <asm/traps.h> | 38 | #include <asm/traps.h> |
39 | #include <asm/desc.h> | ||
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Page fault error code bits | 42 | * Page fault error code bits: |
43 | * bit 0 == 0 means no page found, 1 means protection fault | 43 | * |
44 | * bit 1 == 0 means read, 1 means write | 44 | * bit 0 == 0: no page found 1: protection fault |
45 | * bit 2 == 0 means kernel, 1 means user-mode | 45 | * bit 1 == 0: read access 1: write access |
46 | * bit 3 == 1 means use of reserved bit detected | 46 | * bit 2 == 0: kernel-mode access 1: user-mode access |
47 | * bit 4 == 1 means fault was an instruction fetch | 47 | * bit 3 == 1: use of reserved bit detected |
48 | * bit 4 == 1: fault was an instruction fetch | ||
48 | */ | 49 | */ |
49 | #define PF_PROT (1<<0) | 50 | enum x86_pf_error_code { |
50 | #define PF_WRITE (1<<1) | 51 | |
51 | #define PF_USER (1<<2) | 52 | PF_PROT = 1 << 0, |
52 | #define PF_RSVD (1<<3) | 53 | PF_WRITE = 1 << 1, |
53 | #define PF_INSTR (1<<4) | 54 | PF_USER = 1 << 2, |
55 | PF_RSVD = 1 << 3, | ||
56 | PF_INSTR = 1 << 4, | ||
57 | }; | ||
54 | 58 | ||
59 | /* | ||
60 | * Returns 0 if mmiotrace is disabled, or if the fault is not | ||
61 | * handled by mmiotrace: | ||
62 | */ | ||
55 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) | 63 | static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) |
56 | { | 64 | { |
57 | #ifdef CONFIG_MMIOTRACE | ||
58 | if (unlikely(is_kmmio_active())) | 65 | if (unlikely(is_kmmio_active())) |
59 | if (kmmio_handler(regs, addr) == 1) | 66 | if (kmmio_handler(regs, addr) == 1) |
60 | return -1; | 67 | return -1; |
61 | #endif | ||
62 | return 0; | 68 | return 0; |
63 | } | 69 | } |
64 | 70 | ||
65 | static inline int notify_page_fault(struct pt_regs *regs) | 71 | static inline int notify_page_fault(struct pt_regs *regs) |
66 | { | 72 | { |
67 | #ifdef CONFIG_KPROBES | ||
68 | int ret = 0; | 73 | int ret = 0; |
69 | 74 | ||
70 | /* kprobe_running() needs smp_processor_id() */ | 75 | /* kprobe_running() needs smp_processor_id() */ |
71 | if (!user_mode_vm(regs)) { | 76 | if (kprobes_built_in() && !user_mode_vm(regs)) { |
72 | preempt_disable(); | 77 | preempt_disable(); |
73 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) | 78 | if (kprobe_running() && kprobe_fault_handler(regs, 14)) |
74 | ret = 1; | 79 | ret = 1; |
@@ -76,29 +81,76 @@ static inline int notify_page_fault(struct pt_regs *regs) | |||
76 | } | 81 | } |
77 | 82 | ||
78 | return ret; | 83 | return ret; |
79 | #else | ||
80 | return 0; | ||
81 | #endif | ||
82 | } | 84 | } |
83 | 85 | ||
84 | /* | 86 | /* |
85 | * X86_32 | 87 | * Prefetch quirks: |
86 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. | 88 | * |
87 | * Check that here and ignore it. | 89 | * 32-bit mode: |
88 | * | 90 | * |
89 | * X86_64 | 91 | * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch. |
90 | * Sometimes the CPU reports invalid exceptions on prefetch. | 92 | * Check that here and ignore it. |
91 | * Check that here and ignore it. | ||
92 | * | 93 | * |
93 | * Opcode checker based on code by Richard Brunner | 94 | * 64-bit mode: |
95 | * | ||
96 | * Sometimes the CPU reports invalid exceptions on prefetch. | ||
97 | * Check that here and ignore it. | ||
98 | * | ||
99 | * Opcode checker based on code by Richard Brunner. | ||
94 | */ | 100 | */ |
95 | static int is_prefetch(struct pt_regs *regs, unsigned long error_code, | 101 | static inline int |
96 | unsigned long addr) | 102 | check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr, |
103 | unsigned char opcode, int *prefetch) | ||
104 | { | ||
105 | unsigned char instr_hi = opcode & 0xf0; | ||
106 | unsigned char instr_lo = opcode & 0x0f; | ||
107 | |||
108 | switch (instr_hi) { | ||
109 | case 0x20: | ||
110 | case 0x30: | ||
111 | /* | ||
112 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | ||
113 | * In X86_64 long mode, the CPU will signal invalid | ||
114 | * opcode if some of these prefixes are present so | ||
115 | * X86_64 will never get here anyway | ||
116 | */ | ||
117 | return ((instr_lo & 7) == 0x6); | ||
118 | #ifdef CONFIG_X86_64 | ||
119 | case 0x40: | ||
120 | /* | ||
121 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | ||
122 | * Need to figure out under what instruction mode the | ||
123 | * instruction was issued. Could check the LDT for lm, | ||
124 | * but for now it's good enough to assume that long | ||
125 | * mode only uses well known segments or kernel. | ||
126 | */ | ||
127 | return (!user_mode(regs)) || (regs->cs == __USER_CS); | ||
128 | #endif | ||
129 | case 0x60: | ||
130 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | ||
131 | return (instr_lo & 0xC) == 0x4; | ||
132 | case 0xF0: | ||
133 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | ||
134 | return !instr_lo || (instr_lo>>1) == 1; | ||
135 | case 0x00: | ||
136 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | ||
137 | if (probe_kernel_address(instr, opcode)) | ||
138 | return 0; | ||
139 | |||
140 | *prefetch = (instr_lo == 0xF) && | ||
141 | (opcode == 0x0D || opcode == 0x18); | ||
142 | return 0; | ||
143 | default: | ||
144 | return 0; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | static int | ||
149 | is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr) | ||
97 | { | 150 | { |
151 | unsigned char *max_instr; | ||
98 | unsigned char *instr; | 152 | unsigned char *instr; |
99 | int scan_more = 1; | ||
100 | int prefetch = 0; | 153 | int prefetch = 0; |
101 | unsigned char *max_instr; | ||
102 | 154 | ||
103 | /* | 155 | /* |
104 | * If it was a exec (instruction fetch) fault on NX page, then | 156 | * If it was a exec (instruction fetch) fault on NX page, then |
@@ -107,106 +159,170 @@ static int is_prefetch(struct pt_regs *regs, unsigned long error_code, | |||
107 | if (error_code & PF_INSTR) | 159 | if (error_code & PF_INSTR) |
108 | return 0; | 160 | return 0; |
109 | 161 | ||
110 | instr = (unsigned char *)convert_ip_to_linear(current, regs); | 162 | instr = (void *)convert_ip_to_linear(current, regs); |
111 | max_instr = instr + 15; | 163 | max_instr = instr + 15; |
112 | 164 | ||
113 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) | 165 | if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE) |
114 | return 0; | 166 | return 0; |
115 | 167 | ||
116 | while (scan_more && instr < max_instr) { | 168 | while (instr < max_instr) { |
117 | unsigned char opcode; | 169 | unsigned char opcode; |
118 | unsigned char instr_hi; | ||
119 | unsigned char instr_lo; | ||
120 | 170 | ||
121 | if (probe_kernel_address(instr, opcode)) | 171 | if (probe_kernel_address(instr, opcode)) |
122 | break; | 172 | break; |
123 | 173 | ||
124 | instr_hi = opcode & 0xf0; | ||
125 | instr_lo = opcode & 0x0f; | ||
126 | instr++; | 174 | instr++; |
127 | 175 | ||
128 | switch (instr_hi) { | 176 | if (!check_prefetch_opcode(regs, instr, opcode, &prefetch)) |
129 | case 0x20: | ||
130 | case 0x30: | ||
131 | /* | ||
132 | * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. | ||
133 | * In X86_64 long mode, the CPU will signal invalid | ||
134 | * opcode if some of these prefixes are present so | ||
135 | * X86_64 will never get here anyway | ||
136 | */ | ||
137 | scan_more = ((instr_lo & 7) == 0x6); | ||
138 | break; | ||
139 | #ifdef CONFIG_X86_64 | ||
140 | case 0x40: | ||
141 | /* | ||
142 | * In AMD64 long mode 0x40..0x4F are valid REX prefixes | ||
143 | * Need to figure out under what instruction mode the | ||
144 | * instruction was issued. Could check the LDT for lm, | ||
145 | * but for now it's good enough to assume that long | ||
146 | * mode only uses well known segments or kernel. | ||
147 | */ | ||
148 | scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS); | ||
149 | break; | 177 | break; |
150 | #endif | ||
151 | case 0x60: | ||
152 | /* 0x64 thru 0x67 are valid prefixes in all modes. */ | ||
153 | scan_more = (instr_lo & 0xC) == 0x4; | ||
154 | break; | ||
155 | case 0xF0: | ||
156 | /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */ | ||
157 | scan_more = !instr_lo || (instr_lo>>1) == 1; | ||
158 | break; | ||
159 | case 0x00: | ||
160 | /* Prefetch instruction is 0x0F0D or 0x0F18 */ | ||
161 | scan_more = 0; | ||
162 | |||
163 | if (probe_kernel_address(instr, opcode)) | ||
164 | break; | ||
165 | prefetch = (instr_lo == 0xF) && | ||
166 | (opcode == 0x0D || opcode == 0x18); | ||
167 | break; | ||
168 | default: | ||
169 | scan_more = 0; | ||
170 | break; | ||
171 | } | ||
172 | } | 178 | } |
173 | return prefetch; | 179 | return prefetch; |
174 | } | 180 | } |
175 | 181 | ||
176 | static void force_sig_info_fault(int si_signo, int si_code, | 182 | static void |
177 | unsigned long address, struct task_struct *tsk) | 183 | force_sig_info_fault(int si_signo, int si_code, unsigned long address, |
184 | struct task_struct *tsk) | ||
178 | { | 185 | { |
179 | siginfo_t info; | 186 | siginfo_t info; |
180 | 187 | ||
181 | info.si_signo = si_signo; | 188 | info.si_signo = si_signo; |
182 | info.si_errno = 0; | 189 | info.si_errno = 0; |
183 | info.si_code = si_code; | 190 | info.si_code = si_code; |
184 | info.si_addr = (void __user *)address; | 191 | info.si_addr = (void __user *)address; |
192 | |||
185 | force_sig_info(si_signo, &info, tsk); | 193 | force_sig_info(si_signo, &info, tsk); |
186 | } | 194 | } |
187 | 195 | ||
188 | #ifdef CONFIG_X86_64 | 196 | DEFINE_SPINLOCK(pgd_lock); |
189 | static int bad_address(void *p) | 197 | LIST_HEAD(pgd_list); |
198 | |||
199 | #ifdef CONFIG_X86_32 | ||
200 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | ||
190 | { | 201 | { |
191 | unsigned long dummy; | 202 | unsigned index = pgd_index(address); |
192 | return probe_kernel_address((unsigned long *)p, dummy); | 203 | pgd_t *pgd_k; |
204 | pud_t *pud, *pud_k; | ||
205 | pmd_t *pmd, *pmd_k; | ||
206 | |||
207 | pgd += index; | ||
208 | pgd_k = init_mm.pgd + index; | ||
209 | |||
210 | if (!pgd_present(*pgd_k)) | ||
211 | return NULL; | ||
212 | |||
213 | /* | ||
214 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | ||
215 | * and redundant with the set_pmd() on non-PAE. As would | ||
216 | * set_pud. | ||
217 | */ | ||
218 | pud = pud_offset(pgd, address); | ||
219 | pud_k = pud_offset(pgd_k, address); | ||
220 | if (!pud_present(*pud_k)) | ||
221 | return NULL; | ||
222 | |||
223 | pmd = pmd_offset(pud, address); | ||
224 | pmd_k = pmd_offset(pud_k, address); | ||
225 | if (!pmd_present(*pmd_k)) | ||
226 | return NULL; | ||
227 | |||
228 | if (!pmd_present(*pmd)) { | ||
229 | set_pmd(pmd, *pmd_k); | ||
230 | arch_flush_lazy_mmu_mode(); | ||
231 | } else { | ||
232 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | ||
233 | } | ||
234 | |||
235 | return pmd_k; | ||
236 | } | ||
237 | |||
238 | void vmalloc_sync_all(void) | ||
239 | { | ||
240 | unsigned long address; | ||
241 | |||
242 | if (SHARED_KERNEL_PMD) | ||
243 | return; | ||
244 | |||
245 | for (address = VMALLOC_START & PMD_MASK; | ||
246 | address >= TASK_SIZE && address < FIXADDR_TOP; | ||
247 | address += PMD_SIZE) { | ||
248 | |||
249 | unsigned long flags; | ||
250 | struct page *page; | ||
251 | |||
252 | spin_lock_irqsave(&pgd_lock, flags); | ||
253 | list_for_each_entry(page, &pgd_list, lru) { | ||
254 | if (!vmalloc_sync_one(page_address(page), address)) | ||
255 | break; | ||
256 | } | ||
257 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * 32-bit: | ||
263 | * | ||
264 | * Handle a fault on the vmalloc or module mapping area | ||
265 | */ | ||
266 | static noinline int vmalloc_fault(unsigned long address) | ||
267 | { | ||
268 | unsigned long pgd_paddr; | ||
269 | pmd_t *pmd_k; | ||
270 | pte_t *pte_k; | ||
271 | |||
272 | /* Make sure we are in vmalloc area: */ | ||
273 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
274 | return -1; | ||
275 | |||
276 | /* | ||
277 | * Synchronize this task's top level page-table | ||
278 | * with the 'reference' page table. | ||
279 | * | ||
280 | * Do _not_ use "current" here. We might be inside | ||
281 | * an interrupt in the middle of a task switch.. | ||
282 | */ | ||
283 | pgd_paddr = read_cr3(); | ||
284 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | ||
285 | if (!pmd_k) | ||
286 | return -1; | ||
287 | |||
288 | pte_k = pte_offset_kernel(pmd_k, address); | ||
289 | if (!pte_present(*pte_k)) | ||
290 | return -1; | ||
291 | |||
292 | return 0; | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Did it hit the DOS screen memory VA from vm86 mode? | ||
297 | */ | ||
298 | static inline void | ||
299 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | ||
300 | struct task_struct *tsk) | ||
301 | { | ||
302 | unsigned long bit; | ||
303 | |||
304 | if (!v8086_mode(regs)) | ||
305 | return; | ||
306 | |||
307 | bit = (address - 0xA0000) >> PAGE_SHIFT; | ||
308 | if (bit < 32) | ||
309 | tsk->thread.screen_bitmap |= 1 << bit; | ||
193 | } | 310 | } |
194 | #endif | ||
195 | 311 | ||
196 | static void dump_pagetable(unsigned long address) | 312 | static void dump_pagetable(unsigned long address) |
197 | { | 313 | { |
198 | #ifdef CONFIG_X86_32 | ||
199 | __typeof__(pte_val(__pte(0))) page; | 314 | __typeof__(pte_val(__pte(0))) page; |
200 | 315 | ||
201 | page = read_cr3(); | 316 | page = read_cr3(); |
202 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; | 317 | page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT]; |
318 | |||
203 | #ifdef CONFIG_X86_PAE | 319 | #ifdef CONFIG_X86_PAE |
204 | printk("*pdpt = %016Lx ", page); | 320 | printk("*pdpt = %016Lx ", page); |
205 | if ((page >> PAGE_SHIFT) < max_low_pfn | 321 | if ((page >> PAGE_SHIFT) < max_low_pfn |
206 | && page & _PAGE_PRESENT) { | 322 | && page & _PAGE_PRESENT) { |
207 | page &= PAGE_MASK; | 323 | page &= PAGE_MASK; |
208 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) | 324 | page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT) |
209 | & (PTRS_PER_PMD - 1)]; | 325 | & (PTRS_PER_PMD - 1)]; |
210 | printk(KERN_CONT "*pde = %016Lx ", page); | 326 | printk(KERN_CONT "*pde = %016Lx ", page); |
211 | page &= ~_PAGE_NX; | 327 | page &= ~_PAGE_NX; |
212 | } | 328 | } |
@@ -218,19 +334,145 @@ static void dump_pagetable(unsigned long address) | |||
218 | * We must not directly access the pte in the highpte | 334 | * We must not directly access the pte in the highpte |
219 | * case if the page table is located in highmem. | 335 | * case if the page table is located in highmem. |
220 | * And let's rather not kmap-atomic the pte, just in case | 336 | * And let's rather not kmap-atomic the pte, just in case |
221 | * it's allocated already. | 337 | * it's allocated already: |
222 | */ | 338 | */ |
223 | if ((page >> PAGE_SHIFT) < max_low_pfn | 339 | if ((page >> PAGE_SHIFT) < max_low_pfn |
224 | && (page & _PAGE_PRESENT) | 340 | && (page & _PAGE_PRESENT) |
225 | && !(page & _PAGE_PSE)) { | 341 | && !(page & _PAGE_PSE)) { |
342 | |||
226 | page &= PAGE_MASK; | 343 | page &= PAGE_MASK; |
227 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) | 344 | page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) |
228 | & (PTRS_PER_PTE - 1)]; | 345 | & (PTRS_PER_PTE - 1)]; |
229 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); | 346 | printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page); |
230 | } | 347 | } |
231 | 348 | ||
232 | printk("\n"); | 349 | printk("\n"); |
233 | #else /* CONFIG_X86_64 */ | 350 | } |
351 | |||
352 | #else /* CONFIG_X86_64: */ | ||
353 | |||
354 | void vmalloc_sync_all(void) | ||
355 | { | ||
356 | unsigned long address; | ||
357 | |||
358 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; | ||
359 | address += PGDIR_SIZE) { | ||
360 | |||
361 | const pgd_t *pgd_ref = pgd_offset_k(address); | ||
362 | unsigned long flags; | ||
363 | struct page *page; | ||
364 | |||
365 | if (pgd_none(*pgd_ref)) | ||
366 | continue; | ||
367 | |||
368 | spin_lock_irqsave(&pgd_lock, flags); | ||
369 | list_for_each_entry(page, &pgd_list, lru) { | ||
370 | pgd_t *pgd; | ||
371 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
372 | if (pgd_none(*pgd)) | ||
373 | set_pgd(pgd, *pgd_ref); | ||
374 | else | ||
375 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
376 | } | ||
377 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
378 | } | ||
379 | } | ||
380 | |||
381 | /* | ||
382 | * 64-bit: | ||
383 | * | ||
384 | * Handle a fault on the vmalloc area | ||
385 | * | ||
386 | * This assumes no large pages in there. | ||
387 | */ | ||
388 | static noinline int vmalloc_fault(unsigned long address) | ||
389 | { | ||
390 | pgd_t *pgd, *pgd_ref; | ||
391 | pud_t *pud, *pud_ref; | ||
392 | pmd_t *pmd, *pmd_ref; | ||
393 | pte_t *pte, *pte_ref; | ||
394 | |||
395 | /* Make sure we are in vmalloc area: */ | ||
396 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
397 | return -1; | ||
398 | |||
399 | /* | ||
400 | * Copy kernel mappings over when needed. This can also | ||
401 | * happen within a race in page table update. In the later | ||
402 | * case just flush: | ||
403 | */ | ||
404 | pgd = pgd_offset(current->active_mm, address); | ||
405 | pgd_ref = pgd_offset_k(address); | ||
406 | if (pgd_none(*pgd_ref)) | ||
407 | return -1; | ||
408 | |||
409 | if (pgd_none(*pgd)) | ||
410 | set_pgd(pgd, *pgd_ref); | ||
411 | else | ||
412 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
413 | |||
414 | /* | ||
415 | * Below here mismatches are bugs because these lower tables | ||
416 | * are shared: | ||
417 | */ | ||
418 | |||
419 | pud = pud_offset(pgd, address); | ||
420 | pud_ref = pud_offset(pgd_ref, address); | ||
421 | if (pud_none(*pud_ref)) | ||
422 | return -1; | ||
423 | |||
424 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) | ||
425 | BUG(); | ||
426 | |||
427 | pmd = pmd_offset(pud, address); | ||
428 | pmd_ref = pmd_offset(pud_ref, address); | ||
429 | if (pmd_none(*pmd_ref)) | ||
430 | return -1; | ||
431 | |||
432 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) | ||
433 | BUG(); | ||
434 | |||
435 | pte_ref = pte_offset_kernel(pmd_ref, address); | ||
436 | if (!pte_present(*pte_ref)) | ||
437 | return -1; | ||
438 | |||
439 | pte = pte_offset_kernel(pmd, address); | ||
440 | |||
441 | /* | ||
442 | * Don't use pte_page here, because the mappings can point | ||
443 | * outside mem_map, and the NUMA hash lookup cannot handle | ||
444 | * that: | ||
445 | */ | ||
446 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | ||
447 | BUG(); | ||
448 | |||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static const char errata93_warning[] = | ||
453 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | ||
454 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" | ||
455 | KERN_ERR "******* Please consider a BIOS update.\n" | ||
456 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; | ||
457 | |||
458 | /* | ||
459 | * No vm86 mode in 64-bit mode: | ||
460 | */ | ||
461 | static inline void | ||
462 | check_v8086_mode(struct pt_regs *regs, unsigned long address, | ||
463 | struct task_struct *tsk) | ||
464 | { | ||
465 | } | ||
466 | |||
467 | static int bad_address(void *p) | ||
468 | { | ||
469 | unsigned long dummy; | ||
470 | |||
471 | return probe_kernel_address((unsigned long *)p, dummy); | ||
472 | } | ||
473 | |||
474 | static void dump_pagetable(unsigned long address) | ||
475 | { | ||
234 | pgd_t *pgd; | 476 | pgd_t *pgd; |
235 | pud_t *pud; | 477 | pud_t *pud; |
236 | pmd_t *pmd; | 478 | pmd_t *pmd; |
@@ -239,102 +481,77 @@ static void dump_pagetable(unsigned long address) | |||
239 | pgd = (pgd_t *)read_cr3(); | 481 | pgd = (pgd_t *)read_cr3(); |
240 | 482 | ||
241 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); | 483 | pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK); |
484 | |||
242 | pgd += pgd_index(address); | 485 | pgd += pgd_index(address); |
243 | if (bad_address(pgd)) goto bad; | 486 | if (bad_address(pgd)) |
487 | goto bad; | ||
488 | |||
244 | printk("PGD %lx ", pgd_val(*pgd)); | 489 | printk("PGD %lx ", pgd_val(*pgd)); |
245 | if (!pgd_present(*pgd)) goto ret; | 490 | |
491 | if (!pgd_present(*pgd)) | ||
492 | goto out; | ||
246 | 493 | ||
247 | pud = pud_offset(pgd, address); | 494 | pud = pud_offset(pgd, address); |
248 | if (bad_address(pud)) goto bad; | 495 | if (bad_address(pud)) |
496 | goto bad; | ||
497 | |||
249 | printk("PUD %lx ", pud_val(*pud)); | 498 | printk("PUD %lx ", pud_val(*pud)); |
250 | if (!pud_present(*pud) || pud_large(*pud)) | 499 | if (!pud_present(*pud) || pud_large(*pud)) |
251 | goto ret; | 500 | goto out; |
252 | 501 | ||
253 | pmd = pmd_offset(pud, address); | 502 | pmd = pmd_offset(pud, address); |
254 | if (bad_address(pmd)) goto bad; | 503 | if (bad_address(pmd)) |
504 | goto bad; | ||
505 | |||
255 | printk("PMD %lx ", pmd_val(*pmd)); | 506 | printk("PMD %lx ", pmd_val(*pmd)); |
256 | if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret; | 507 | if (!pmd_present(*pmd) || pmd_large(*pmd)) |
508 | goto out; | ||
257 | 509 | ||
258 | pte = pte_offset_kernel(pmd, address); | 510 | pte = pte_offset_kernel(pmd, address); |
259 | if (bad_address(pte)) goto bad; | 511 | if (bad_address(pte)) |
512 | goto bad; | ||
513 | |||
260 | printk("PTE %lx", pte_val(*pte)); | 514 | printk("PTE %lx", pte_val(*pte)); |
261 | ret: | 515 | out: |
262 | printk("\n"); | 516 | printk("\n"); |
263 | return; | 517 | return; |
264 | bad: | 518 | bad: |
265 | printk("BAD\n"); | 519 | printk("BAD\n"); |
266 | #endif | ||
267 | } | ||
268 | |||
269 | #ifdef CONFIG_X86_32 | ||
270 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) | ||
271 | { | ||
272 | unsigned index = pgd_index(address); | ||
273 | pgd_t *pgd_k; | ||
274 | pud_t *pud, *pud_k; | ||
275 | pmd_t *pmd, *pmd_k; | ||
276 | |||
277 | pgd += index; | ||
278 | pgd_k = init_mm.pgd + index; | ||
279 | |||
280 | if (!pgd_present(*pgd_k)) | ||
281 | return NULL; | ||
282 | |||
283 | /* | ||
284 | * set_pgd(pgd, *pgd_k); here would be useless on PAE | ||
285 | * and redundant with the set_pmd() on non-PAE. As would | ||
286 | * set_pud. | ||
287 | */ | ||
288 | |||
289 | pud = pud_offset(pgd, address); | ||
290 | pud_k = pud_offset(pgd_k, address); | ||
291 | if (!pud_present(*pud_k)) | ||
292 | return NULL; | ||
293 | |||
294 | pmd = pmd_offset(pud, address); | ||
295 | pmd_k = pmd_offset(pud_k, address); | ||
296 | if (!pmd_present(*pmd_k)) | ||
297 | return NULL; | ||
298 | if (!pmd_present(*pmd)) { | ||
299 | set_pmd(pmd, *pmd_k); | ||
300 | arch_flush_lazy_mmu_mode(); | ||
301 | } else | ||
302 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); | ||
303 | return pmd_k; | ||
304 | } | 520 | } |
305 | #endif | ||
306 | 521 | ||
307 | #ifdef CONFIG_X86_64 | 522 | #endif /* CONFIG_X86_64 */ |
308 | static const char errata93_warning[] = | ||
309 | KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n" | ||
310 | KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n" | ||
311 | KERN_ERR "******* Please consider a BIOS update.\n" | ||
312 | KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n"; | ||
313 | #endif | ||
314 | 523 | ||
315 | /* Workaround for K8 erratum #93 & buggy BIOS. | 524 | /* |
316 | BIOS SMM functions are required to use a specific workaround | 525 | * Workaround for K8 erratum #93 & buggy BIOS. |
317 | to avoid corruption of the 64bit RIP register on C stepping K8. | 526 | * |
318 | A lot of BIOS that didn't get tested properly miss this. | 527 | * BIOS SMM functions are required to use a specific workaround |
319 | The OS sees this as a page fault with the upper 32bits of RIP cleared. | 528 | * to avoid corruption of the 64bit RIP register on C stepping K8. |
320 | Try to work around it here. | 529 | * |
321 | Note we only handle faults in kernel here. | 530 | * A lot of BIOS that didn't get tested properly miss this. |
322 | Does nothing for X86_32 | 531 | * |
532 | * The OS sees this as a page fault with the upper 32bits of RIP cleared. | ||
533 | * Try to work around it here. | ||
534 | * | ||
535 | * Note we only handle faults in kernel here. | ||
536 | * Does nothing on 32-bit. | ||
323 | */ | 537 | */ |
324 | static int is_errata93(struct pt_regs *regs, unsigned long address) | 538 | static int is_errata93(struct pt_regs *regs, unsigned long address) |
325 | { | 539 | { |
326 | #ifdef CONFIG_X86_64 | 540 | #ifdef CONFIG_X86_64 |
327 | static int warned; | 541 | static int once; |
542 | |||
328 | if (address != regs->ip) | 543 | if (address != regs->ip) |
329 | return 0; | 544 | return 0; |
545 | |||
330 | if ((address >> 32) != 0) | 546 | if ((address >> 32) != 0) |
331 | return 0; | 547 | return 0; |
548 | |||
332 | address |= 0xffffffffUL << 32; | 549 | address |= 0xffffffffUL << 32; |
333 | if ((address >= (u64)_stext && address <= (u64)_etext) || | 550 | if ((address >= (u64)_stext && address <= (u64)_etext) || |
334 | (address >= MODULES_VADDR && address <= MODULES_END)) { | 551 | (address >= MODULES_VADDR && address <= MODULES_END)) { |
335 | if (!warned) { | 552 | if (!once) { |
336 | printk(errata93_warning); | 553 | printk(errata93_warning); |
337 | warned = 1; | 554 | once = 1; |
338 | } | 555 | } |
339 | regs->ip = address; | 556 | regs->ip = address; |
340 | return 1; | 557 | return 1; |
@@ -344,16 +561,17 @@ static int is_errata93(struct pt_regs *regs, unsigned long address) | |||
344 | } | 561 | } |
345 | 562 | ||
346 | /* | 563 | /* |
347 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal | 564 | * Work around K8 erratum #100 K8 in compat mode occasionally jumps |
348 | * addresses >4GB. We catch this in the page fault handler because these | 565 | * to illegal addresses >4GB. |
349 | * addresses are not reachable. Just detect this case and return. Any code | 566 | * |
567 | * We catch this in the page fault handler because these addresses | ||
568 | * are not reachable. Just detect this case and return. Any code | ||
350 | * segment in LDT is compatibility mode. | 569 | * segment in LDT is compatibility mode. |
351 | */ | 570 | */ |
352 | static int is_errata100(struct pt_regs *regs, unsigned long address) | 571 | static int is_errata100(struct pt_regs *regs, unsigned long address) |
353 | { | 572 | { |
354 | #ifdef CONFIG_X86_64 | 573 | #ifdef CONFIG_X86_64 |
355 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && | 574 | if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) |
356 | (address >> 32)) | ||
357 | return 1; | 575 | return 1; |
358 | #endif | 576 | #endif |
359 | return 0; | 577 | return 0; |
@@ -363,8 +581,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | |||
363 | { | 581 | { |
364 | #ifdef CONFIG_X86_F00F_BUG | 582 | #ifdef CONFIG_X86_F00F_BUG |
365 | unsigned long nr; | 583 | unsigned long nr; |
584 | |||
366 | /* | 585 | /* |
367 | * Pentium F0 0F C7 C8 bug workaround. | 586 | * Pentium F0 0F C7 C8 bug workaround: |
368 | */ | 587 | */ |
369 | if (boot_cpu_data.f00f_bug) { | 588 | if (boot_cpu_data.f00f_bug) { |
370 | nr = (address - idt_descr.address) >> 3; | 589 | nr = (address - idt_descr.address) >> 3; |
@@ -378,81 +597,87 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | |||
378 | return 0; | 597 | return 0; |
379 | } | 598 | } |
380 | 599 | ||
381 | static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, | 600 | static const char nx_warning[] = KERN_CRIT |
382 | unsigned long address) | 601 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; |
602 | |||
603 | static void | ||
604 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, | ||
605 | unsigned long address) | ||
383 | { | 606 | { |
384 | #ifdef CONFIG_X86_32 | ||
385 | if (!oops_may_print()) | 607 | if (!oops_may_print()) |
386 | return; | 608 | return; |
387 | #endif | ||
388 | 609 | ||
389 | #ifdef CONFIG_X86_PAE | ||
390 | if (error_code & PF_INSTR) { | 610 | if (error_code & PF_INSTR) { |
391 | unsigned int level; | 611 | unsigned int level; |
612 | |||
392 | pte_t *pte = lookup_address(address, &level); | 613 | pte_t *pte = lookup_address(address, &level); |
393 | 614 | ||
394 | if (pte && pte_present(*pte) && !pte_exec(*pte)) | 615 | if (pte && pte_present(*pte) && !pte_exec(*pte)) |
395 | printk(KERN_CRIT "kernel tried to execute " | 616 | printk(nx_warning, current_uid()); |
396 | "NX-protected page - exploit attempt? " | ||
397 | "(uid: %d)\n", current_uid()); | ||
398 | } | 617 | } |
399 | #endif | ||
400 | 618 | ||
401 | printk(KERN_ALERT "BUG: unable to handle kernel "); | 619 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
402 | if (address < PAGE_SIZE) | 620 | if (address < PAGE_SIZE) |
403 | printk(KERN_CONT "NULL pointer dereference"); | 621 | printk(KERN_CONT "NULL pointer dereference"); |
404 | else | 622 | else |
405 | printk(KERN_CONT "paging request"); | 623 | printk(KERN_CONT "paging request"); |
624 | |||
406 | printk(KERN_CONT " at %p\n", (void *) address); | 625 | printk(KERN_CONT " at %p\n", (void *) address); |
407 | printk(KERN_ALERT "IP:"); | 626 | printk(KERN_ALERT "IP:"); |
408 | printk_address(regs->ip, 1); | 627 | printk_address(regs->ip, 1); |
628 | |||
409 | dump_pagetable(address); | 629 | dump_pagetable(address); |
410 | } | 630 | } |
411 | 631 | ||
412 | #ifdef CONFIG_X86_64 | 632 | static noinline void |
413 | static noinline void pgtable_bad(struct pt_regs *regs, | 633 | pgtable_bad(struct pt_regs *regs, unsigned long error_code, |
414 | unsigned long error_code, unsigned long address) | 634 | unsigned long address) |
415 | { | 635 | { |
416 | unsigned long flags = oops_begin(); | 636 | struct task_struct *tsk; |
417 | int sig = SIGKILL; | 637 | unsigned long flags; |
418 | struct task_struct *tsk = current; | 638 | int sig; |
639 | |||
640 | flags = oops_begin(); | ||
641 | tsk = current; | ||
642 | sig = SIGKILL; | ||
419 | 643 | ||
420 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", | 644 | printk(KERN_ALERT "%s: Corrupted page table at address %lx\n", |
421 | tsk->comm, address); | 645 | tsk->comm, address); |
422 | dump_pagetable(address); | 646 | dump_pagetable(address); |
423 | tsk = current; | 647 | |
424 | tsk->thread.cr2 = address; | 648 | tsk->thread.cr2 = address; |
425 | tsk->thread.trap_no = 14; | 649 | tsk->thread.trap_no = 14; |
426 | tsk->thread.error_code = error_code; | 650 | tsk->thread.error_code = error_code; |
651 | |||
427 | if (__die("Bad pagetable", regs, error_code)) | 652 | if (__die("Bad pagetable", regs, error_code)) |
428 | sig = 0; | 653 | sig = 0; |
654 | |||
429 | oops_end(flags, regs, sig); | 655 | oops_end(flags, regs, sig); |
430 | } | 656 | } |
431 | #endif | ||
432 | 657 | ||
433 | static noinline void no_context(struct pt_regs *regs, | 658 | static noinline void |
434 | unsigned long error_code, unsigned long address) | 659 | no_context(struct pt_regs *regs, unsigned long error_code, |
660 | unsigned long address) | ||
435 | { | 661 | { |
436 | struct task_struct *tsk = current; | 662 | struct task_struct *tsk = current; |
437 | unsigned long *stackend; | 663 | unsigned long *stackend; |
438 | |||
439 | #ifdef CONFIG_X86_64 | ||
440 | unsigned long flags; | 664 | unsigned long flags; |
441 | int sig; | 665 | int sig; |
442 | #endif | ||
443 | 666 | ||
444 | /* Are we prepared to handle this kernel fault? */ | 667 | /* Are we prepared to handle this kernel fault? */ |
445 | if (fixup_exception(regs)) | 668 | if (fixup_exception(regs)) |
446 | return; | 669 | return; |
447 | 670 | ||
448 | /* | 671 | /* |
449 | * X86_32 | 672 | * 32-bit: |
450 | * Valid to do another page fault here, because if this fault | 673 | * |
451 | * had been triggered by is_prefetch fixup_exception would have | 674 | * Valid to do another page fault here, because if this fault |
452 | * handled it. | 675 | * had been triggered by is_prefetch fixup_exception would have |
676 | * handled it. | ||
677 | * | ||
678 | * 64-bit: | ||
453 | * | 679 | * |
454 | * X86_64 | 680 | * Hall of shame of CPU/BIOS bugs. |
455 | * Hall of shame of CPU/BIOS bugs. | ||
456 | */ | 681 | */ |
457 | if (is_prefetch(regs, error_code, address)) | 682 | if (is_prefetch(regs, error_code, address)) |
458 | return; | 683 | return; |
@@ -462,54 +687,70 @@ static noinline void no_context(struct pt_regs *regs, | |||
462 | 687 | ||
463 | /* | 688 | /* |
464 | * Oops. The kernel tried to access some bad page. We'll have to | 689 | * Oops. The kernel tried to access some bad page. We'll have to |
465 | * terminate things with extreme prejudice. | 690 | * terminate things with extreme prejudice: |
466 | */ | 691 | */ |
467 | #ifdef CONFIG_X86_32 | ||
468 | bust_spinlocks(1); | ||
469 | #else | ||
470 | flags = oops_begin(); | 692 | flags = oops_begin(); |
471 | #endif | ||
472 | 693 | ||
473 | show_fault_oops(regs, error_code, address); | 694 | show_fault_oops(regs, error_code, address); |
474 | 695 | ||
475 | stackend = end_of_stack(tsk); | 696 | stackend = end_of_stack(tsk); |
476 | if (*stackend != STACK_END_MAGIC) | 697 | if (*stackend != STACK_END_MAGIC) |
477 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); | 698 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); |
478 | 699 | ||
479 | tsk->thread.cr2 = address; | 700 | tsk->thread.cr2 = address; |
480 | tsk->thread.trap_no = 14; | 701 | tsk->thread.trap_no = 14; |
481 | tsk->thread.error_code = error_code; | 702 | tsk->thread.error_code = error_code; |
482 | 703 | ||
483 | #ifdef CONFIG_X86_32 | ||
484 | die("Oops", regs, error_code); | ||
485 | bust_spinlocks(0); | ||
486 | do_exit(SIGKILL); | ||
487 | #else | ||
488 | sig = SIGKILL; | 704 | sig = SIGKILL; |
489 | if (__die("Oops", regs, error_code)) | 705 | if (__die("Oops", regs, error_code)) |
490 | sig = 0; | 706 | sig = 0; |
707 | |||
491 | /* Executive summary in case the body of the oops scrolled away */ | 708 | /* Executive summary in case the body of the oops scrolled away */ |
492 | printk(KERN_EMERG "CR2: %016lx\n", address); | 709 | printk(KERN_EMERG "CR2: %016lx\n", address); |
710 | |||
493 | oops_end(flags, regs, sig); | 711 | oops_end(flags, regs, sig); |
494 | #endif | ||
495 | } | 712 | } |
496 | 713 | ||
497 | static void __bad_area_nosemaphore(struct pt_regs *regs, | 714 | /* |
498 | unsigned long error_code, unsigned long address, | 715 | * Print out info about fatal segfaults, if the show_unhandled_signals |
499 | int si_code) | 716 | * sysctl is set: |
717 | */ | ||
718 | static inline void | ||
719 | show_signal_msg(struct pt_regs *regs, unsigned long error_code, | ||
720 | unsigned long address, struct task_struct *tsk) | ||
721 | { | ||
722 | if (!unhandled_signal(tsk, SIGSEGV)) | ||
723 | return; | ||
724 | |||
725 | if (!printk_ratelimit()) | ||
726 | return; | ||
727 | |||
728 | printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", | ||
729 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | ||
730 | tsk->comm, task_pid_nr(tsk), address, | ||
731 | (void *)regs->ip, (void *)regs->sp, error_code); | ||
732 | |||
733 | print_vma_addr(KERN_CONT " in ", regs->ip); | ||
734 | |||
735 | printk(KERN_CONT "\n"); | ||
736 | } | ||
737 | |||
738 | static void | ||
739 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, | ||
740 | unsigned long address, int si_code) | ||
500 | { | 741 | { |
501 | struct task_struct *tsk = current; | 742 | struct task_struct *tsk = current; |
502 | 743 | ||
503 | /* User mode accesses just cause a SIGSEGV */ | 744 | /* User mode accesses just cause a SIGSEGV */ |
504 | if (error_code & PF_USER) { | 745 | if (error_code & PF_USER) { |
505 | /* | 746 | /* |
506 | * It's possible to have interrupts off here. | 747 | * It's possible to have interrupts off here: |
507 | */ | 748 | */ |
508 | local_irq_enable(); | 749 | local_irq_enable(); |
509 | 750 | ||
510 | /* | 751 | /* |
511 | * Valid to do another page fault here because this one came | 752 | * Valid to do another page fault here because this one came |
512 | * from user space. | 753 | * from user space: |
513 | */ | 754 | */ |
514 | if (is_prefetch(regs, error_code, address)) | 755 | if (is_prefetch(regs, error_code, address)) |
515 | return; | 756 | return; |
@@ -517,22 +758,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs, | |||
517 | if (is_errata100(regs, address)) | 758 | if (is_errata100(regs, address)) |
518 | return; | 759 | return; |
519 | 760 | ||
520 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && | 761 | if (unlikely(show_unhandled_signals)) |
521 | printk_ratelimit()) { | 762 | show_signal_msg(regs, error_code, address, tsk); |
522 | printk( | 763 | |
523 | "%s%s[%d]: segfault at %lx ip %p sp %p error %lx", | 764 | /* Kernel addresses are always protection faults: */ |
524 | task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, | 765 | tsk->thread.cr2 = address; |
525 | tsk->comm, task_pid_nr(tsk), address, | 766 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); |
526 | (void *) regs->ip, (void *) regs->sp, error_code); | 767 | tsk->thread.trap_no = 14; |
527 | print_vma_addr(" in ", regs->ip); | ||
528 | printk("\n"); | ||
529 | } | ||
530 | 768 | ||
531 | tsk->thread.cr2 = address; | ||
532 | /* Kernel addresses are always protection faults */ | ||
533 | tsk->thread.error_code = error_code | (address >= TASK_SIZE); | ||
534 | tsk->thread.trap_no = 14; | ||
535 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); | 769 | force_sig_info_fault(SIGSEGV, si_code, address, tsk); |
770 | |||
536 | return; | 771 | return; |
537 | } | 772 | } |
538 | 773 | ||
@@ -542,15 +777,16 @@ static void __bad_area_nosemaphore(struct pt_regs *regs, | |||
542 | no_context(regs, error_code, address); | 777 | no_context(regs, error_code, address); |
543 | } | 778 | } |
544 | 779 | ||
545 | static noinline void bad_area_nosemaphore(struct pt_regs *regs, | 780 | static noinline void |
546 | unsigned long error_code, unsigned long address) | 781 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
782 | unsigned long address) | ||
547 | { | 783 | { |
548 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); | 784 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); |
549 | } | 785 | } |
550 | 786 | ||
551 | static void __bad_area(struct pt_regs *regs, | 787 | static void |
552 | unsigned long error_code, unsigned long address, | 788 | __bad_area(struct pt_regs *regs, unsigned long error_code, |
553 | int si_code) | 789 | unsigned long address, int si_code) |
554 | { | 790 | { |
555 | struct mm_struct *mm = current->mm; | 791 | struct mm_struct *mm = current->mm; |
556 | 792 | ||
@@ -563,67 +799,75 @@ static void __bad_area(struct pt_regs *regs, | |||
563 | __bad_area_nosemaphore(regs, error_code, address, si_code); | 799 | __bad_area_nosemaphore(regs, error_code, address, si_code); |
564 | } | 800 | } |
565 | 801 | ||
566 | static noinline void bad_area(struct pt_regs *regs, | 802 | static noinline void |
567 | unsigned long error_code, unsigned long address) | 803 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
568 | { | 804 | { |
569 | __bad_area(regs, error_code, address, SEGV_MAPERR); | 805 | __bad_area(regs, error_code, address, SEGV_MAPERR); |
570 | } | 806 | } |
571 | 807 | ||
572 | static noinline void bad_area_access_error(struct pt_regs *regs, | 808 | static noinline void |
573 | unsigned long error_code, unsigned long address) | 809 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, |
810 | unsigned long address) | ||
574 | { | 811 | { |
575 | __bad_area(regs, error_code, address, SEGV_ACCERR); | 812 | __bad_area(regs, error_code, address, SEGV_ACCERR); |
576 | } | 813 | } |
577 | 814 | ||
578 | /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ | 815 | /* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */ |
579 | static void out_of_memory(struct pt_regs *regs, | 816 | static void |
580 | unsigned long error_code, unsigned long address) | 817 | out_of_memory(struct pt_regs *regs, unsigned long error_code, |
818 | unsigned long address) | ||
581 | { | 819 | { |
582 | /* | 820 | /* |
583 | * We ran out of memory, call the OOM killer, and return the userspace | 821 | * We ran out of memory, call the OOM killer, and return the userspace |
584 | * (which will retry the fault, or kill us if we got oom-killed). | 822 | * (which will retry the fault, or kill us if we got oom-killed): |
585 | */ | 823 | */ |
586 | up_read(¤t->mm->mmap_sem); | 824 | up_read(¤t->mm->mmap_sem); |
825 | |||
587 | pagefault_out_of_memory(); | 826 | pagefault_out_of_memory(); |
588 | } | 827 | } |
589 | 828 | ||
590 | static void do_sigbus(struct pt_regs *regs, | 829 | static void |
591 | unsigned long error_code, unsigned long address) | 830 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
592 | { | 831 | { |
593 | struct task_struct *tsk = current; | 832 | struct task_struct *tsk = current; |
594 | struct mm_struct *mm = tsk->mm; | 833 | struct mm_struct *mm = tsk->mm; |
595 | 834 | ||
596 | up_read(&mm->mmap_sem); | 835 | up_read(&mm->mmap_sem); |
597 | 836 | ||
598 | /* Kernel mode? Handle exceptions or die */ | 837 | /* Kernel mode? Handle exceptions or die: */ |
599 | if (!(error_code & PF_USER)) | 838 | if (!(error_code & PF_USER)) |
600 | no_context(regs, error_code, address); | 839 | no_context(regs, error_code, address); |
601 | #ifdef CONFIG_X86_32 | 840 | |
602 | /* User space => ok to do another page fault */ | 841 | /* User-space => ok to do another page fault: */ |
603 | if (is_prefetch(regs, error_code, address)) | 842 | if (is_prefetch(regs, error_code, address)) |
604 | return; | 843 | return; |
605 | #endif | 844 | |
606 | tsk->thread.cr2 = address; | 845 | tsk->thread.cr2 = address; |
607 | tsk->thread.error_code = error_code; | 846 | tsk->thread.error_code = error_code; |
608 | tsk->thread.trap_no = 14; | 847 | tsk->thread.trap_no = 14; |
848 | |||
609 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | 849 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
610 | } | 850 | } |
611 | 851 | ||
612 | static noinline void mm_fault_error(struct pt_regs *regs, | 852 | static noinline void |
613 | unsigned long error_code, unsigned long address, unsigned int fault) | 853 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
854 | unsigned long address, unsigned int fault) | ||
614 | { | 855 | { |
615 | if (fault & VM_FAULT_OOM) | 856 | if (fault & VM_FAULT_OOM) { |
616 | out_of_memory(regs, error_code, address); | 857 | out_of_memory(regs, error_code, address); |
617 | else if (fault & VM_FAULT_SIGBUS) | 858 | } else { |
618 | do_sigbus(regs, error_code, address); | 859 | if (fault & VM_FAULT_SIGBUS) |
619 | else | 860 | do_sigbus(regs, error_code, address); |
620 | BUG(); | 861 | else |
862 | BUG(); | ||
863 | } | ||
621 | } | 864 | } |
622 | 865 | ||
623 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) | 866 | static int spurious_fault_check(unsigned long error_code, pte_t *pte) |
624 | { | 867 | { |
625 | if ((error_code & PF_WRITE) && !pte_write(*pte)) | 868 | if ((error_code & PF_WRITE) && !pte_write(*pte)) |
626 | return 0; | 869 | return 0; |
870 | |||
627 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) | 871 | if ((error_code & PF_INSTR) && !pte_exec(*pte)) |
628 | return 0; | 872 | return 0; |
629 | 873 | ||
@@ -631,21 +875,25 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) | |||
631 | } | 875 | } |
632 | 876 | ||
633 | /* | 877 | /* |
634 | * Handle a spurious fault caused by a stale TLB entry. This allows | 878 | * Handle a spurious fault caused by a stale TLB entry. |
635 | * us to lazily refresh the TLB when increasing the permissions of a | 879 | * |
636 | * kernel page (RO -> RW or NX -> X). Doing it eagerly is very | 880 | * This allows us to lazily refresh the TLB when increasing the |
637 | * expensive since that implies doing a full cross-processor TLB | 881 | * permissions of a kernel page (RO -> RW or NX -> X). Doing it |
638 | * flush, even if no stale TLB entries exist on other processors. | 882 | * eagerly is very expensive since that implies doing a full |
883 | * cross-processor TLB flush, even if no stale TLB entries exist | ||
884 | * on other processors. | ||
885 | * | ||
639 | * There are no security implications to leaving a stale TLB when | 886 | * There are no security implications to leaving a stale TLB when |
640 | * increasing the permissions on a page. | 887 | * increasing the permissions on a page. |
641 | */ | 888 | */ |
642 | static noinline int spurious_fault(unsigned long error_code, | 889 | static noinline int |
643 | unsigned long address) | 890 | spurious_fault(unsigned long error_code, unsigned long address) |
644 | { | 891 | { |
645 | pgd_t *pgd; | 892 | pgd_t *pgd; |
646 | pud_t *pud; | 893 | pud_t *pud; |
647 | pmd_t *pmd; | 894 | pmd_t *pmd; |
648 | pte_t *pte; | 895 | pte_t *pte; |
896 | int ret; | ||
649 | 897 | ||
650 | /* Reserved-bit violation or user access to kernel space? */ | 898 | /* Reserved-bit violation or user access to kernel space? */ |
651 | if (error_code & (PF_USER | PF_RSVD)) | 899 | if (error_code & (PF_USER | PF_RSVD)) |
@@ -673,138 +921,69 @@ static noinline int spurious_fault(unsigned long error_code, | |||
673 | if (!pte_present(*pte)) | 921 | if (!pte_present(*pte)) |
674 | return 0; | 922 | return 0; |
675 | 923 | ||
676 | return spurious_fault_check(error_code, pte); | 924 | ret = spurious_fault_check(error_code, pte); |
677 | } | 925 | if (!ret) |
678 | 926 | return 0; | |
679 | /* | ||
680 | * X86_32 | ||
681 | * Handle a fault on the vmalloc or module mapping area | ||
682 | * | ||
683 | * X86_64 | ||
684 | * Handle a fault on the vmalloc area | ||
685 | * | ||
686 | * This assumes no large pages in there. | ||
687 | */ | ||
688 | static noinline int vmalloc_fault(unsigned long address) | ||
689 | { | ||
690 | #ifdef CONFIG_X86_32 | ||
691 | unsigned long pgd_paddr; | ||
692 | pmd_t *pmd_k; | ||
693 | pte_t *pte_k; | ||
694 | |||
695 | /* Make sure we are in vmalloc area */ | ||
696 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
697 | return -1; | ||
698 | 927 | ||
699 | /* | 928 | /* |
700 | * Synchronize this task's top level page-table | 929 | * Make sure we have permissions in PMD. |
701 | * with the 'reference' page table. | 930 | * If not, then there's a bug in the page tables: |
702 | * | ||
703 | * Do _not_ use "current" here. We might be inside | ||
704 | * an interrupt in the middle of a task switch.. | ||
705 | */ | 931 | */ |
706 | pgd_paddr = read_cr3(); | 932 | ret = spurious_fault_check(error_code, (pte_t *) pmd); |
707 | pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); | 933 | WARN_ONCE(!ret, "PMD has incorrect permission bits\n"); |
708 | if (!pmd_k) | ||
709 | return -1; | ||
710 | pte_k = pte_offset_kernel(pmd_k, address); | ||
711 | if (!pte_present(*pte_k)) | ||
712 | return -1; | ||
713 | return 0; | ||
714 | #else | ||
715 | pgd_t *pgd, *pgd_ref; | ||
716 | pud_t *pud, *pud_ref; | ||
717 | pmd_t *pmd, *pmd_ref; | ||
718 | pte_t *pte, *pte_ref; | ||
719 | |||
720 | /* Make sure we are in vmalloc area */ | ||
721 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | ||
722 | return -1; | ||
723 | |||
724 | /* Copy kernel mappings over when needed. This can also | ||
725 | happen within a race in page table update. In the later | ||
726 | case just flush. */ | ||
727 | |||
728 | pgd = pgd_offset(current->active_mm, address); | ||
729 | pgd_ref = pgd_offset_k(address); | ||
730 | if (pgd_none(*pgd_ref)) | ||
731 | return -1; | ||
732 | if (pgd_none(*pgd)) | ||
733 | set_pgd(pgd, *pgd_ref); | ||
734 | else | ||
735 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
736 | |||
737 | /* Below here mismatches are bugs because these lower tables | ||
738 | are shared */ | ||
739 | 934 | ||
740 | pud = pud_offset(pgd, address); | 935 | return ret; |
741 | pud_ref = pud_offset(pgd_ref, address); | ||
742 | if (pud_none(*pud_ref)) | ||
743 | return -1; | ||
744 | if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref)) | ||
745 | BUG(); | ||
746 | pmd = pmd_offset(pud, address); | ||
747 | pmd_ref = pmd_offset(pud_ref, address); | ||
748 | if (pmd_none(*pmd_ref)) | ||
749 | return -1; | ||
750 | if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref)) | ||
751 | BUG(); | ||
752 | pte_ref = pte_offset_kernel(pmd_ref, address); | ||
753 | if (!pte_present(*pte_ref)) | ||
754 | return -1; | ||
755 | pte = pte_offset_kernel(pmd, address); | ||
756 | /* Don't use pte_page here, because the mappings can point | ||
757 | outside mem_map, and the NUMA hash lookup cannot handle | ||
758 | that. */ | ||
759 | if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) | ||
760 | BUG(); | ||
761 | return 0; | ||
762 | #endif | ||
763 | } | 936 | } |
764 | 937 | ||
765 | int show_unhandled_signals = 1; | 938 | int show_unhandled_signals = 1; |
766 | 939 | ||
767 | static inline int access_error(unsigned long error_code, int write, | 940 | static inline int |
768 | struct vm_area_struct *vma) | 941 | access_error(unsigned long error_code, int write, struct vm_area_struct *vma) |
769 | { | 942 | { |
770 | if (write) { | 943 | if (write) { |
771 | /* write, present and write, not present */ | 944 | /* write, present and write, not present: */ |
772 | if (unlikely(!(vma->vm_flags & VM_WRITE))) | 945 | if (unlikely(!(vma->vm_flags & VM_WRITE))) |
773 | return 1; | 946 | return 1; |
774 | } else if (unlikely(error_code & PF_PROT)) { | 947 | return 0; |
775 | /* read, present */ | ||
776 | return 1; | ||
777 | } else { | ||
778 | /* read, not present */ | ||
779 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | ||
780 | return 1; | ||
781 | } | 948 | } |
782 | 949 | ||
950 | /* read, present: */ | ||
951 | if (unlikely(error_code & PF_PROT)) | ||
952 | return 1; | ||
953 | |||
954 | /* read, not present: */ | ||
955 | if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))) | ||
956 | return 1; | ||
957 | |||
783 | return 0; | 958 | return 0; |
784 | } | 959 | } |
785 | 960 | ||
961 | static int fault_in_kernel_space(unsigned long address) | ||
962 | { | ||
963 | return address >= TASK_SIZE_MAX; | ||
964 | } | ||
965 | |||
786 | /* | 966 | /* |
787 | * This routine handles page faults. It determines the address, | 967 | * This routine handles page faults. It determines the address, |
788 | * and the problem, and then passes it off to one of the appropriate | 968 | * and the problem, and then passes it off to one of the appropriate |
789 | * routines. | 969 | * routines. |
790 | */ | 970 | */ |
791 | #ifdef CONFIG_X86_64 | 971 | dotraplinkage void __kprobes |
792 | asmlinkage | 972 | do_page_fault(struct pt_regs *regs, unsigned long error_code) |
793 | #endif | ||
794 | void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | ||
795 | { | 973 | { |
796 | unsigned long address; | 974 | struct vm_area_struct *vma; |
797 | struct task_struct *tsk; | 975 | struct task_struct *tsk; |
976 | unsigned long address; | ||
798 | struct mm_struct *mm; | 977 | struct mm_struct *mm; |
799 | struct vm_area_struct *vma; | ||
800 | int write; | 978 | int write; |
801 | int fault; | 979 | int fault; |
802 | 980 | ||
803 | tsk = current; | 981 | tsk = current; |
804 | mm = tsk->mm; | 982 | mm = tsk->mm; |
983 | |||
805 | prefetchw(&mm->mmap_sem); | 984 | prefetchw(&mm->mmap_sem); |
806 | 985 | ||
807 | /* get the address */ | 986 | /* Get the faulting address: */ |
808 | address = read_cr2(); | 987 | address = read_cr2(); |
809 | 988 | ||
810 | if (unlikely(kmmio_fault(regs, address))) | 989 | if (unlikely(kmmio_fault(regs, address))) |
@@ -823,30 +1002,28 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
823 | * (error_code & 4) == 0, and that the fault was not a | 1002 | * (error_code & 4) == 0, and that the fault was not a |
824 | * protection error (error_code & 9) == 0. | 1003 | * protection error (error_code & 9) == 0. |
825 | */ | 1004 | */ |
826 | #ifdef CONFIG_X86_32 | 1005 | if (unlikely(fault_in_kernel_space(address))) { |
827 | if (unlikely(address >= TASK_SIZE)) { | ||
828 | #else | ||
829 | if (unlikely(address >= TASK_SIZE64)) { | ||
830 | #endif | ||
831 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && | 1006 | if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && |
832 | vmalloc_fault(address) >= 0) | 1007 | vmalloc_fault(address) >= 0) |
833 | return; | 1008 | return; |
834 | 1009 | ||
835 | /* Can handle a stale RO->RW TLB */ | 1010 | /* Can handle a stale RO->RW TLB: */ |
836 | if (spurious_fault(error_code, address)) | 1011 | if (spurious_fault(error_code, address)) |
837 | return; | 1012 | return; |
838 | 1013 | ||
839 | /* kprobes don't want to hook the spurious faults. */ | 1014 | /* kprobes don't want to hook the spurious faults: */ |
840 | if (notify_page_fault(regs)) | 1015 | if (notify_page_fault(regs)) |
841 | return; | 1016 | return; |
842 | /* | 1017 | /* |
843 | * Don't take the mm semaphore here. If we fixup a prefetch | 1018 | * Don't take the mm semaphore here. If we fixup a prefetch |
844 | * fault we could otherwise deadlock. | 1019 | * fault we could otherwise deadlock: |
845 | */ | 1020 | */ |
846 | bad_area_nosemaphore(regs, error_code, address); | 1021 | bad_area_nosemaphore(regs, error_code, address); |
1022 | |||
847 | return; | 1023 | return; |
848 | } | 1024 | } |
849 | 1025 | ||
1026 | /* kprobes don't want to hook the spurious faults: */ | ||
850 | if (unlikely(notify_page_fault(regs))) | 1027 | if (unlikely(notify_page_fault(regs))) |
851 | return; | 1028 | return; |
852 | /* | 1029 | /* |
@@ -854,22 +1031,22 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
854 | * vmalloc fault has been handled. | 1031 | * vmalloc fault has been handled. |
855 | * | 1032 | * |
856 | * User-mode registers count as a user access even for any | 1033 | * User-mode registers count as a user access even for any |
857 | * potential system fault or CPU buglet. | 1034 | * potential system fault or CPU buglet: |
858 | */ | 1035 | */ |
859 | if (user_mode_vm(regs)) { | 1036 | if (user_mode_vm(regs)) { |
860 | local_irq_enable(); | 1037 | local_irq_enable(); |
861 | error_code |= PF_USER; | 1038 | error_code |= PF_USER; |
862 | } else if (regs->flags & X86_EFLAGS_IF) | 1039 | } else { |
863 | local_irq_enable(); | 1040 | if (regs->flags & X86_EFLAGS_IF) |
1041 | local_irq_enable(); | ||
1042 | } | ||
864 | 1043 | ||
865 | #ifdef CONFIG_X86_64 | ||
866 | if (unlikely(error_code & PF_RSVD)) | 1044 | if (unlikely(error_code & PF_RSVD)) |
867 | pgtable_bad(regs, error_code, address); | 1045 | pgtable_bad(regs, error_code, address); |
868 | #endif | ||
869 | 1046 | ||
870 | /* | 1047 | /* |
871 | * If we're in an interrupt, have no user context or are running in an | 1048 | * If we're in an interrupt, have no user context or are running |
872 | * atomic region then we must not take the fault. | 1049 | * in an atomic region then we must not take the fault: |
873 | */ | 1050 | */ |
874 | if (unlikely(in_atomic() || !mm)) { | 1051 | if (unlikely(in_atomic() || !mm)) { |
875 | bad_area_nosemaphore(regs, error_code, address); | 1052 | bad_area_nosemaphore(regs, error_code, address); |
@@ -878,19 +1055,19 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
878 | 1055 | ||
879 | /* | 1056 | /* |
880 | * When running in the kernel we expect faults to occur only to | 1057 | * When running in the kernel we expect faults to occur only to |
881 | * addresses in user space. All other faults represent errors in the | 1058 | * addresses in user space. All other faults represent errors in |
882 | * kernel and should generate an OOPS. Unfortunately, in the case of an | 1059 | * the kernel and should generate an OOPS. Unfortunately, in the |
883 | * erroneous fault occurring in a code path which already holds mmap_sem | 1060 | * case of an erroneous fault occurring in a code path which already |
884 | * we will deadlock attempting to validate the fault against the | 1061 | * holds mmap_sem we will deadlock attempting to validate the fault |
885 | * address space. Luckily the kernel only validly references user | 1062 | * against the address space. Luckily the kernel only validly |
886 | * space from well defined areas of code, which are listed in the | 1063 | * references user space from well defined areas of code, which are |
887 | * exceptions table. | 1064 | * listed in the exceptions table. |
888 | * | 1065 | * |
889 | * As the vast majority of faults will be valid we will only perform | 1066 | * As the vast majority of faults will be valid we will only perform |
890 | * the source reference check when there is a possibility of a deadlock. | 1067 | * the source reference check when there is a possibility of a |
891 | * Attempt to lock the address space, if we cannot we then validate the | 1068 | * deadlock. Attempt to lock the address space, if we cannot we then |
892 | * source. If this is invalid we can skip the address space check, | 1069 | * validate the source. If this is invalid we can skip the address |
893 | * thus avoiding the deadlock. | 1070 | * space check, thus avoiding the deadlock: |
894 | */ | 1071 | */ |
895 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { | 1072 | if (unlikely(!down_read_trylock(&mm->mmap_sem))) { |
896 | if ((error_code & PF_USER) == 0 && | 1073 | if ((error_code & PF_USER) == 0 && |
@@ -899,6 +1076,13 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
899 | return; | 1076 | return; |
900 | } | 1077 | } |
901 | down_read(&mm->mmap_sem); | 1078 | down_read(&mm->mmap_sem); |
1079 | } else { | ||
1080 | /* | ||
1081 | * The above down_read_trylock() might have succeeded in | ||
1082 | * which case we'll have missed the might_sleep() from | ||
1083 | * down_read(): | ||
1084 | */ | ||
1085 | might_sleep(); | ||
902 | } | 1086 | } |
903 | 1087 | ||
904 | vma = find_vma(mm, address); | 1088 | vma = find_vma(mm, address); |
@@ -916,7 +1100,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
916 | /* | 1100 | /* |
917 | * Accessing the stack below %sp is always a bug. | 1101 | * Accessing the stack below %sp is always a bug. |
918 | * The large cushion allows instructions like enter | 1102 | * The large cushion allows instructions like enter |
919 | * and pusha to work. ("enter $65535,$31" pushes | 1103 | * and pusha to work. ("enter $65535, $31" pushes |
920 | * 32 pointers and then decrements %sp by 65535.) | 1104 | * 32 pointers and then decrements %sp by 65535.) |
921 | */ | 1105 | */ |
922 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { | 1106 | if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { |
@@ -935,6 +1119,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
935 | */ | 1119 | */ |
936 | good_area: | 1120 | good_area: |
937 | write = error_code & PF_WRITE; | 1121 | write = error_code & PF_WRITE; |
1122 | |||
938 | if (unlikely(access_error(error_code, write, vma))) { | 1123 | if (unlikely(access_error(error_code, write, vma))) { |
939 | bad_area_access_error(regs, error_code, address); | 1124 | bad_area_access_error(regs, error_code, address); |
940 | return; | 1125 | return; |
@@ -943,75 +1128,21 @@ good_area: | |||
943 | /* | 1128 | /* |
944 | * If for any reason at all we couldn't handle the fault, | 1129 | * If for any reason at all we couldn't handle the fault, |
945 | * make sure we exit gracefully rather than endlessly redo | 1130 | * make sure we exit gracefully rather than endlessly redo |
946 | * the fault. | 1131 | * the fault: |
947 | */ | 1132 | */ |
948 | fault = handle_mm_fault(mm, vma, address, write); | 1133 | fault = handle_mm_fault(mm, vma, address, write); |
1134 | |||
949 | if (unlikely(fault & VM_FAULT_ERROR)) { | 1135 | if (unlikely(fault & VM_FAULT_ERROR)) { |
950 | mm_fault_error(regs, error_code, address, fault); | 1136 | mm_fault_error(regs, error_code, address, fault); |
951 | return; | 1137 | return; |
952 | } | 1138 | } |
1139 | |||
953 | if (fault & VM_FAULT_MAJOR) | 1140 | if (fault & VM_FAULT_MAJOR) |
954 | tsk->maj_flt++; | 1141 | tsk->maj_flt++; |
955 | else | 1142 | else |
956 | tsk->min_flt++; | 1143 | tsk->min_flt++; |
957 | 1144 | ||
958 | #ifdef CONFIG_X86_32 | 1145 | check_v8086_mode(regs, address, tsk); |
959 | /* | ||
960 | * Did it hit the DOS screen memory VA from vm86 mode? | ||
961 | */ | ||
962 | if (v8086_mode(regs)) { | ||
963 | unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT; | ||
964 | if (bit < 32) | ||
965 | tsk->thread.screen_bitmap |= 1 << bit; | ||
966 | } | ||
967 | #endif | ||
968 | up_read(&mm->mmap_sem); | ||
969 | } | ||
970 | |||
971 | DEFINE_SPINLOCK(pgd_lock); | ||
972 | LIST_HEAD(pgd_list); | ||
973 | |||
974 | void vmalloc_sync_all(void) | ||
975 | { | ||
976 | unsigned long address; | ||
977 | |||
978 | #ifdef CONFIG_X86_32 | ||
979 | if (SHARED_KERNEL_PMD) | ||
980 | return; | ||
981 | |||
982 | for (address = VMALLOC_START & PMD_MASK; | ||
983 | address >= TASK_SIZE && address < FIXADDR_TOP; | ||
984 | address += PMD_SIZE) { | ||
985 | unsigned long flags; | ||
986 | struct page *page; | ||
987 | |||
988 | spin_lock_irqsave(&pgd_lock, flags); | ||
989 | list_for_each_entry(page, &pgd_list, lru) { | ||
990 | if (!vmalloc_sync_one(page_address(page), | ||
991 | address)) | ||
992 | break; | ||
993 | } | ||
994 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
995 | } | ||
996 | #else /* CONFIG_X86_64 */ | ||
997 | for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END; | ||
998 | address += PGDIR_SIZE) { | ||
999 | const pgd_t *pgd_ref = pgd_offset_k(address); | ||
1000 | unsigned long flags; | ||
1001 | struct page *page; | ||
1002 | 1146 | ||
1003 | if (pgd_none(*pgd_ref)) | 1147 | up_read(&mm->mmap_sem); |
1004 | continue; | ||
1005 | spin_lock_irqsave(&pgd_lock, flags); | ||
1006 | list_for_each_entry(page, &pgd_list, lru) { | ||
1007 | pgd_t *pgd; | ||
1008 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | ||
1009 | if (pgd_none(*pgd)) | ||
1010 | set_pgd(pgd, *pgd_ref); | ||
1011 | else | ||
1012 | BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); | ||
1013 | } | ||
1014 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
1015 | } | ||
1016 | #endif | ||
1017 | } | 1148 | } |