aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm/fault.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/tile/mm/fault.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/tile/mm/fault.c')
-rw-r--r--arch/tile/mm/fault.c53
1 files changed, 26 insertions, 27 deletions
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 704f3e8a4385..25b7b90fd620 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -24,7 +24,6 @@
24#include <linux/mman.h> 24#include <linux/mman.h>
25#include <linux/mm.h> 25#include <linux/mm.h>
26#include <linux/smp.h> 26#include <linux/smp.h>
27#include <linux/smp_lock.h>
28#include <linux/interrupt.h> 27#include <linux/interrupt.h>
29#include <linux/init.h> 28#include <linux/init.h>
30#include <linux/tty.h> 29#include <linux/tty.h>
@@ -44,8 +43,11 @@
44 43
45#include <arch/interrupts.h> 44#include <arch/interrupts.h>
46 45
47static noinline void force_sig_info_fault(int si_signo, int si_code, 46static noinline void force_sig_info_fault(const char *type, int si_signo,
48 unsigned long address, int fault_num, struct task_struct *tsk) 47 int si_code, unsigned long address,
48 int fault_num,
49 struct task_struct *tsk,
50 struct pt_regs *regs)
49{ 51{
50 siginfo_t info; 52 siginfo_t info;
51 53
@@ -60,23 +62,25 @@ static noinline void force_sig_info_fault(int si_signo, int si_code,
60 info.si_code = si_code; 62 info.si_code = si_code;
61 info.si_addr = (void __user *)address; 63 info.si_addr = (void __user *)address;
62 info.si_trapno = fault_num; 64 info.si_trapno = fault_num;
65 trace_unhandled_signal(type, regs, address, si_signo);
63 force_sig_info(si_signo, &info, tsk); 66 force_sig_info(si_signo, &info, tsk);
64} 67}
65 68
66#ifndef __tilegx__ 69#ifndef __tilegx__
67/* 70/*
68 * Synthesize the fault a PL0 process would get by doing a word-load of 71 * Synthesize the fault a PL0 process would get by doing a word-load of
69 * an unaligned address or a high kernel address. Called indirectly 72 * an unaligned address or a high kernel address.
70 * from sys_cmpxchg() in kernel/intvec.S.
71 */ 73 */
72int _sys_cmpxchg_badaddr(unsigned long address, struct pt_regs *regs) 74SYSCALL_DEFINE2(cmpxchg_badaddr, unsigned long, address,
75 struct pt_regs *, regs)
73{ 76{
74 if (address >= PAGE_OFFSET) 77 if (address >= PAGE_OFFSET)
75 force_sig_info_fault(SIGSEGV, SEGV_MAPERR, address, 78 force_sig_info_fault("atomic segfault", SIGSEGV, SEGV_MAPERR,
76 INT_DTLB_MISS, current); 79 address, INT_DTLB_MISS, current, regs);
77 else 80 else
78 force_sig_info_fault(SIGBUS, BUS_ADRALN, address, 81 force_sig_info_fault("atomic alignment fault", SIGBUS,
79 INT_UNALIGN_DATA, current); 82 BUS_ADRALN, address,
83 INT_UNALIGN_DATA, current, regs);
80 84
81 /* 85 /*
82 * Adjust pc to point at the actual instruction, which is unusual 86 * Adjust pc to point at the actual instruction, which is unusual
@@ -291,7 +295,7 @@ static int handle_page_fault(struct pt_regs *regs,
291 /* 295 /*
292 * Early on, we need to check for migrating PTE entries; 296 * Early on, we need to check for migrating PTE entries;
293 * see homecache.c. If we find a migrating PTE, we wait until 297 * see homecache.c. If we find a migrating PTE, we wait until
294 * the backing page claims to be done migrating, then we procede. 298 * the backing page claims to be done migrating, then we proceed.
295 * For kernel PTEs, we rewrite the PTE and return and retry. 299 * For kernel PTEs, we rewrite the PTE and return and retry.
296 * Otherwise, we treat the fault like a normal "no PTE" fault, 300 * Otherwise, we treat the fault like a normal "no PTE" fault,
297 * rather than trying to patch up the existing PTE. 301 * rather than trying to patch up the existing PTE.
@@ -472,8 +476,8 @@ bad_area_nosemaphore:
472 */ 476 */
473 local_irq_enable(); 477 local_irq_enable();
474 478
475 force_sig_info_fault(SIGSEGV, si_code, address, 479 force_sig_info_fault("segfault", SIGSEGV, si_code, address,
476 fault_num, tsk); 480 fault_num, tsk, regs);
477 return 0; 481 return 0;
478 } 482 }
479 483
@@ -548,7 +552,8 @@ do_sigbus:
548 if (is_kernel_mode) 552 if (is_kernel_mode)
549 goto no_context; 553 goto no_context;
550 554
551 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, fault_num, tsk); 555 force_sig_info_fault("bus error", SIGBUS, BUS_ADRERR, address,
556 fault_num, tsk, regs);
552 return 0; 557 return 0;
553} 558}
554 559
@@ -563,10 +568,10 @@ do_sigbus:
563/* 568/*
564 * When we take an ITLB or DTLB fault or access violation in the 569 * When we take an ITLB or DTLB fault or access violation in the
565 * supervisor while the critical section bit is set, the hypervisor is 570 * supervisor while the critical section bit is set, the hypervisor is
566 * reluctant to write new values into the EX_CONTEXT_1_x registers, 571 * reluctant to write new values into the EX_CONTEXT_K_x registers,
567 * since that might indicate we have not yet squirreled the SPR 572 * since that might indicate we have not yet squirreled the SPR
568 * contents away and can thus safely take a recursive interrupt. 573 * contents away and can thus safely take a recursive interrupt.
569 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_1_2. 574 * Accordingly, the hypervisor passes us the PC via SYSTEM_SAVE_K_2.
570 * 575 *
571 * Note that this routine is called before homecache_tlb_defer_enter(), 576 * Note that this routine is called before homecache_tlb_defer_enter(),
572 * which means that we can properly unlock any atomics that might 577 * which means that we can properly unlock any atomics that might
@@ -610,7 +615,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
610 * fault. We didn't set up a kernel stack on initial entry to 615 * fault. We didn't set up a kernel stack on initial entry to
611 * sys_cmpxchg, but instead had one set up by the fault, which 616 * sys_cmpxchg, but instead had one set up by the fault, which
612 * (because sys_cmpxchg never releases ICS) came to us via the 617 * (because sys_cmpxchg never releases ICS) came to us via the
613 * SYSTEM_SAVE_1_2 mechanism, and thus EX_CONTEXT_1_[01] are 618 * SYSTEM_SAVE_K_2 mechanism, and thus EX_CONTEXT_K_[01] are
614 * still referencing the original user code. We release the 619 * still referencing the original user code. We release the
615 * atomic lock and rewrite pt_regs so that it appears that we 620 * atomic lock and rewrite pt_regs so that it appears that we
616 * came from user-space directly, and after we finish the 621 * came from user-space directly, and after we finish the
@@ -656,14 +661,6 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
656 } 661 }
657 662
658 /* 663 /*
659 * NOTE: the one other type of access that might bring us here
660 * are the memory ops in __tns_atomic_acquire/__tns_atomic_release,
661 * but we don't have to check specially for them since we can
662 * always safely return to the address of the fault and retry,
663 * since no separate atomic locks are involved.
664 */
665
666 /*
667 * Now that we have released the atomic lock (if necessary), 664 * Now that we have released the atomic lock (if necessary),
668 * it's safe to spin if the PTE that caused the fault was migrating. 665 * it's safe to spin if the PTE that caused the fault was migrating.
669 */ 666 */
@@ -741,6 +738,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
741 panic("Bad fault number %d in do_page_fault", fault_num); 738 panic("Bad fault number %d in do_page_fault", fault_num);
742 } 739 }
743 740
741#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
744 if (EX1_PL(regs->ex1) != USER_PL) { 742 if (EX1_PL(regs->ex1) != USER_PL) {
745 struct async_tlb *async; 743 struct async_tlb *async;
746 switch (fault_num) { 744 switch (fault_num) {
@@ -784,6 +782,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
784 return; 782 return;
785 } 783 }
786 } 784 }
785#endif
787 786
788 handle_page_fault(regs, fault_num, is_page_fault, address, write); 787 handle_page_fault(regs, fault_num, is_page_fault, address, write);
789} 788}
@@ -810,8 +809,6 @@ static void handle_async_page_fault(struct pt_regs *regs,
810 async->address, async->is_write); 809 async->address, async->is_write);
811 } 810 }
812} 811}
813#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
814
815 812
816/* 813/*
817 * This routine effectively re-issues asynchronous page faults 814 * This routine effectively re-issues asynchronous page faults
@@ -833,6 +830,8 @@ void do_async_page_fault(struct pt_regs *regs)
833 handle_async_page_fault(regs, &current->thread.sn_async_tlb); 830 handle_async_page_fault(regs, &current->thread.sn_async_tlb);
834#endif 831#endif
835} 832}
833#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
834
836 835
837void vmalloc_sync_all(void) 836void vmalloc_sync_all(void)
838{ 837{