aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/fault_32.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-07-04 14:18:47 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-07-04 14:18:47 -0400
commit0f60bb25b4036d30fd795709be09626c58c52464 (patch)
treeb15cd9771c5cff9349d510670cc51bcc64c318a7 /arch/sh/mm/fault_32.c
parentc63c3105e4991b2991ba73a742b8b59bfdbe4acd (diff)
sh: Tidy up vmalloc fault handling.
This rewrites the vmalloc fault handling as per x86, which subsequently allows for easy future tie-in for vmalloc_sync_all(). Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/fault_32.c')
-rw-r--r--arch/sh/mm/fault_32.c153
1 files changed, 97 insertions, 56 deletions
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index ce75b8882efb..08d0117d90fa 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -2,7 +2,7 @@
2 * Page fault handler for SH with an MMU. 2 * Page fault handler for SH with an MMU.
3 * 3 *
4 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 - 2008 Paul Mundt 5 * Copyright (C) 2003 - 2009 Paul Mundt
6 * 6 *
7 * Based on linux/arch/i386/mm/fault.c: 7 * Based on linux/arch/i386/mm/fault.c:
8 * Copyright (C) 1995 Linus Torvalds 8 * Copyright (C) 1995 Linus Torvalds
@@ -35,6 +35,74 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
35 return ret; 35 return ret;
36} 36}
37 37
38static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
39{
40 unsigned index = pgd_index(address);
41 pgd_t *pgd_k;
42 pud_t *pud, *pud_k;
43 pmd_t *pmd, *pmd_k;
44
45 pgd += index;
46 pgd_k = init_mm.pgd + index;
47
48 if (!pgd_present(*pgd_k))
49 return NULL;
50
51 pud = pud_offset(pgd, address);
52 pud_k = pud_offset(pgd_k, address);
53 if (!pud_present(*pud_k))
54 return NULL;
55
56 pmd = pmd_offset(pud, address);
57 pmd_k = pmd_offset(pud_k, address);
58 if (!pmd_present(*pmd_k))
59 return NULL;
60
61 if (!pmd_present(*pmd))
62 set_pmd(pmd, *pmd_k);
63 else
64 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
65
66 return pmd_k;
67}
68
69/*
70 * Handle a fault on the vmalloc or module mapping area
71 */
72static noinline int vmalloc_fault(unsigned long address)
73{
74 pgd_t *pgd_k;
75 pmd_t *pmd_k;
76 pte_t *pte_k;
77
78 /* Make sure we are in vmalloc area: */
79 if (!(address >= VMALLOC_START && address < VMALLOC_END))
80 return -1;
81
82 /*
83 * Synchronize this task's top level page-table
84 * with the 'reference' page table.
85 *
86 * Do _not_ use "current" here. We might be inside
87 * an interrupt in the middle of a task switch..
88 */
89 pgd_k = get_TTB();
90 pmd_k = vmalloc_sync_one(__va((unsigned long)pgd_k), address);
91 if (!pmd_k)
92 return -1;
93
94 pte_k = pte_offset_kernel(pmd_k, address);
95 if (!pte_present(*pte_k))
96 return -1;
97
98 return 0;
99}
100
101static int fault_in_kernel_space(unsigned long address)
102{
103 return address >= TASK_SIZE;
104}
105
38/* 106/*
39 * This routine handles page faults. It determines the address, 107 * This routine handles page faults. It determines the address,
40 * and the problem, and then passes it off to one of the appropriate 108 * and the problem, and then passes it off to one of the appropriate
@@ -44,6 +112,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
44 unsigned long writeaccess, 112 unsigned long writeaccess,
45 unsigned long address) 113 unsigned long address)
46{ 114{
115 unsigned long vec;
47 struct task_struct *tsk; 116 struct task_struct *tsk;
48 struct mm_struct *mm; 117 struct mm_struct *mm;
49 struct vm_area_struct * vma; 118 struct vm_area_struct * vma;
@@ -51,59 +120,30 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
51 int fault; 120 int fault;
52 siginfo_t info; 121 siginfo_t info;
53 122
54 /*
55 * We don't bother with any notifier callbacks here, as they are
56 * all handled through the __do_page_fault() fast-path.
57 */
58
59 tsk = current; 123 tsk = current;
124 mm = tsk->mm;
60 si_code = SEGV_MAPERR; 125 si_code = SEGV_MAPERR;
126 vec = lookup_exception_vector();
61 127
62 if (unlikely(address >= TASK_SIZE)) { 128 /*
63 /* 129 * We fault-in kernel-space virtual memory on-demand. The
64 * Synchronize this task's top level page-table 130 * 'reference' page table is init_mm.pgd.
65 * with the 'reference' page table. 131 *
66 * 132 * NOTE! We MUST NOT take any locks for this case. We may
67 * Do _not_ use "tsk" here. We might be inside 133 * be in an interrupt or a critical region, and should
68 * an interrupt in the middle of a task switch.. 134 * only copy the information from the master page table,
69 */ 135 * nothing more.
70 int offset = pgd_index(address); 136 */
71 pgd_t *pgd, *pgd_k; 137 if (unlikely(fault_in_kernel_space(address))) {
72 pud_t *pud, *pud_k; 138 if (vmalloc_fault(address) >= 0)
73 pmd_t *pmd, *pmd_k;
74
75 pgd = get_TTB() + offset;
76 pgd_k = swapper_pg_dir + offset;
77
78 if (!pgd_present(*pgd)) {
79 if (!pgd_present(*pgd_k))
80 goto bad_area_nosemaphore;
81 set_pgd(pgd, *pgd_k);
82 return; 139 return;
83 } 140 if (notify_page_fault(regs, vec))
84
85 pud = pud_offset(pgd, address);
86 pud_k = pud_offset(pgd_k, address);
87
88 if (!pud_present(*pud)) {
89 if (!pud_present(*pud_k))
90 goto bad_area_nosemaphore;
91 set_pud(pud, *pud_k);
92 return; 141 return;
93 }
94
95 pmd = pmd_offset(pud, address);
96 pmd_k = pmd_offset(pud_k, address);
97 if (pmd_present(*pmd) || !pmd_present(*pmd_k))
98 goto bad_area_nosemaphore;
99 set_pmd(pmd, *pmd_k);
100 142
101 return; 143 goto bad_area_nosemaphore;
102 } 144 }
103 145
104 mm = tsk->mm; 146 if (unlikely(notify_page_fault(regs, vec)))
105
106 if (unlikely(notify_page_fault(regs, lookup_exception_vector())))
107 return; 147 return;
108 148
109 /* Only enable interrupts if they were on before the fault */ 149 /* Only enable interrupts if they were on before the fault */
@@ -113,8 +153,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
113 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 153 perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
114 154
115 /* 155 /*
116 * If we're in an interrupt or have no user 156 * If we're in an interrupt, have no user context or are running
117 * context, we must not take the fault.. 157 * in an atomic region then we must not take the fault:
118 */ 158 */
119 if (in_atomic() || !mm) 159 if (in_atomic() || !mm)
120 goto no_context; 160 goto no_context;
@@ -130,10 +170,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
130 goto bad_area; 170 goto bad_area;
131 if (expand_stack(vma, address)) 171 if (expand_stack(vma, address))
132 goto bad_area; 172 goto bad_area;
133/* 173
134 * Ok, we have a good vm_area for this memory access, so 174 /*
135 * we can handle it.. 175 * Ok, we have a good vm_area for this memory access, so
136 */ 176 * we can handle it..
177 */
137good_area: 178good_area:
138 si_code = SEGV_ACCERR; 179 si_code = SEGV_ACCERR;
139 if (writeaccess) { 180 if (writeaccess) {
@@ -171,10 +212,10 @@ survive:
171 up_read(&mm->mmap_sem); 212 up_read(&mm->mmap_sem);
172 return; 213 return;
173 214
174/* 215 /*
175 * Something tried to access memory that isn't in our memory map.. 216 * Something tried to access memory that isn't in our memory map..
176 * Fix it, but check if it's kernel or user first.. 217 * Fix it, but check if it's kernel or user first..
177 */ 218 */
178bad_area: 219bad_area:
179 up_read(&mm->mmap_sem); 220 up_read(&mm->mmap_sem);
180 221