aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/mm/fault_32.c16
-rw-r--r--arch/x86/mm/fault_64.c7
2 files changed, 9 insertions, 14 deletions
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index 36cb67e02b04..52c13d2e011e 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -295,13 +295,18 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
295 */ 295 */
296 trace_hardirqs_fixup(); 296 trace_hardirqs_fixup();
297 297
298 tsk = current;
299 mm = tsk->mm;
300 prefetchw(&mm->mmap_sem);
301
298 /* get the address */ 302 /* get the address */
299 address = read_cr2(); 303 address = read_cr2();
300 304
301 tsk = current;
302
303 si_code = SEGV_MAPERR; 305 si_code = SEGV_MAPERR;
304 306
307 if (notify_page_fault(regs))
308 return;
309
305 /* 310 /*
306 * We fault-in kernel-space virtual memory on-demand. The 311 * We fault-in kernel-space virtual memory on-demand. The
307 * 'reference' page table is init_mm.pgd. 312 * 'reference' page table is init_mm.pgd.
@@ -319,8 +324,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
319 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && 324 if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
320 vmalloc_fault(address) >= 0) 325 vmalloc_fault(address) >= 0)
321 return; 326 return;
322 if (notify_page_fault(regs))
323 return;
324 /* 327 /*
325 * Don't take the mm semaphore here. If we fixup a prefetch 328 * Don't take the mm semaphore here. If we fixup a prefetch
326 * fault we could otherwise deadlock. 329 * fault we could otherwise deadlock.
@@ -328,16 +331,11 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
328 goto bad_area_nosemaphore; 331 goto bad_area_nosemaphore;
329 } 332 }
330 333
331 if (notify_page_fault(regs))
332 return;
333
334 /* It's safe to allow irq's after cr2 has been saved and the vmalloc 334 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
335 fault has been handled. */ 335 fault has been handled. */
336 if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) 336 if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
337 local_irq_enable(); 337 local_irq_enable();
338 338
339 mm = tsk->mm;
340
341 /* 339 /*
342 * If we're in an interrupt, have no user context or are running in an 340 * If we're in an interrupt, have no user context or are running in an
343 * atomic region then we must not take the fault. 341 * atomic region then we must not take the fault.
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 80f8436ac8b2..c6b3ad515cf1 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -355,6 +355,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
355 355
356 si_code = SEGV_MAPERR; 356 si_code = SEGV_MAPERR;
357 357
358 if (notify_page_fault(regs))
359 return;
358 360
359 /* 361 /*
360 * We fault-in kernel-space virtual memory on-demand. The 362 * We fault-in kernel-space virtual memory on-demand. The
@@ -380,8 +382,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
380 if (vmalloc_fault(address) >= 0) 382 if (vmalloc_fault(address) >= 0)
381 return; 383 return;
382 } 384 }
383 if (notify_page_fault(regs))
384 return;
385 /* 385 /*
386 * Don't take the mm semaphore here. If we fixup a prefetch 386 * Don't take the mm semaphore here. If we fixup a prefetch
387 * fault we could otherwise deadlock. 387 * fault we could otherwise deadlock.
@@ -389,9 +389,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
389 goto bad_area_nosemaphore; 389 goto bad_area_nosemaphore;
390 } 390 }
391 391
392 if (notify_page_fault(regs))
393 return;
394
395 if (likely(regs->flags & X86_EFLAGS_IF)) 392 if (likely(regs->flags & X86_EFLAGS_IF))
396 local_irq_enable(); 393 local_irq_enable();
397 394