aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2009-12-07 06:51:45 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2009-12-07 06:51:33 -0500
commit50d7280d430484a890ddcadc7f738b5b6dd28bf1 (patch)
tree5faf7fc17d5980f43bbddcf373eeae4c2050d4cd /arch/s390
parent7ecb344ae80bc03397ded3b004e06ecfe32becf9 (diff)
[S390] fault handler performance optimization.
Slim down the do_exception function to handle only the fast path of a fault and move the exceptional cases into a new function. That slightly increases the performance of the fault handling. Build fix for !CONFIG_COMPAT by Kamalesh Babulal <kamalesh@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/mm/fault.c258
1 files changed, 129 insertions, 129 deletions
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index fd72c269cdb4..0dcfcfb5b5be 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -34,16 +34,15 @@
34#include <asm/pgtable.h> 34#include <asm/pgtable.h>
35#include <asm/s390_ext.h> 35#include <asm/s390_ext.h>
36#include <asm/mmu_context.h> 36#include <asm/mmu_context.h>
37#include <asm/compat.h>
37#include "../kernel/entry.h" 38#include "../kernel/entry.h"
38 39
39#ifndef CONFIG_64BIT 40#ifndef CONFIG_64BIT
40#define __FAIL_ADDR_MASK 0x7ffff000 41#define __FAIL_ADDR_MASK 0x7ffff000
41#define __FIXUP_MASK 0x7fffffff
42#define __SUBCODE_MASK 0x0200 42#define __SUBCODE_MASK 0x0200
43#define __PF_RES_FIELD 0ULL 43#define __PF_RES_FIELD 0ULL
44#else /* CONFIG_64BIT */ 44#else /* CONFIG_64BIT */
45#define __FAIL_ADDR_MASK -4096L 45#define __FAIL_ADDR_MASK -4096L
46#define __FIXUP_MASK ~0L
47#define __SUBCODE_MASK 0x0600 46#define __SUBCODE_MASK 0x0600
48#define __PF_RES_FIELD 0x8000000000000000ULL 47#define __PF_RES_FIELD 0x8000000000000000ULL
49#endif /* CONFIG_64BIT */ 48#endif /* CONFIG_64BIT */
@@ -52,6 +51,10 @@
52extern int sysctl_userprocess_debug; 51extern int sysctl_userprocess_debug;
53#endif 52#endif
54 53
54#define VM_FAULT_BADCONTEXT 0x010000
55#define VM_FAULT_BADMAP 0x020000
56#define VM_FAULT_BADACCESS 0x040000
57
55static inline int notify_page_fault(struct pt_regs *regs) 58static inline int notify_page_fault(struct pt_regs *regs)
56{ 59{
57 int ret = 0; 60 int ret = 0;
@@ -122,18 +125,22 @@ static inline int user_space_fault(unsigned long trans_exc_code)
122 * Send SIGSEGV to task. This is an external routine 125 * Send SIGSEGV to task. This is an external routine
123 * to keep the stack usage of do_page_fault small. 126 * to keep the stack usage of do_page_fault small.
124 */ 127 */
125static void do_sigsegv(struct pt_regs *regs, unsigned long error_code, 128static noinline void do_sigsegv(struct pt_regs *regs, long int_code,
126 int si_code, unsigned long address) 129 int si_code, unsigned long trans_exc_code)
127{ 130{
128 struct siginfo si; 131 struct siginfo si;
132 unsigned long address;
129 133
134 address = trans_exc_code & __FAIL_ADDR_MASK;
135 current->thread.prot_addr = address;
136 current->thread.trap_no = int_code;
130#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG) 137#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
131#if defined(CONFIG_SYSCTL) 138#if defined(CONFIG_SYSCTL)
132 if (sysctl_userprocess_debug) 139 if (sysctl_userprocess_debug)
133#endif 140#endif
134 { 141 {
135 printk("User process fault: interruption code 0x%lX\n", 142 printk("User process fault: interruption code 0x%lX\n",
136 error_code); 143 int_code);
137 printk("failing address: %lX\n", address); 144 printk("failing address: %lX\n", address);
138 show_regs(regs); 145 show_regs(regs);
139 } 146 }
@@ -144,14 +151,14 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
144 force_sig_info(SIGSEGV, &si, current); 151 force_sig_info(SIGSEGV, &si, current);
145} 152}
146 153
147static void do_no_context(struct pt_regs *regs, unsigned long error_code, 154static noinline void do_no_context(struct pt_regs *regs, long int_code,
148 unsigned long trans_exc_code) 155 unsigned long trans_exc_code)
149{ 156{
150 const struct exception_table_entry *fixup; 157 const struct exception_table_entry *fixup;
151 unsigned long address; 158 unsigned long address;
152 159
153 /* Are we prepared to handle this kernel fault? */ 160 /* Are we prepared to handle this kernel fault? */
154 fixup = search_exception_tables(regs->psw.addr & __FIXUP_MASK); 161 fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
155 if (fixup) { 162 if (fixup) {
156 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; 163 regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
157 return; 164 return;
@@ -169,107 +176,127 @@ static void do_no_context(struct pt_regs *regs, unsigned long error_code,
169 printk(KERN_ALERT "Unable to handle kernel paging request" 176 printk(KERN_ALERT "Unable to handle kernel paging request"
170 " at virtual user address %p\n", (void *)address); 177 " at virtual user address %p\n", (void *)address);
171 178
172 die("Oops", regs, error_code); 179 die("Oops", regs, int_code);
173 do_exit(SIGKILL); 180 do_exit(SIGKILL);
174} 181}
175 182
176static void do_low_address(struct pt_regs *regs, unsigned long error_code, 183static noinline void do_low_address(struct pt_regs *regs, long int_code,
177 unsigned long trans_exc_code) 184 unsigned long trans_exc_code)
178{ 185{
179 /* Low-address protection hit in kernel mode means 186 /* Low-address protection hit in kernel mode means
180 NULL pointer write access in kernel mode. */ 187 NULL pointer write access in kernel mode. */
181 if (regs->psw.mask & PSW_MASK_PSTATE) { 188 if (regs->psw.mask & PSW_MASK_PSTATE) {
182 /* Low-address protection hit in user mode 'cannot happen'. */ 189 /* Low-address protection hit in user mode 'cannot happen'. */
183 die ("Low-address protection", regs, error_code); 190 die ("Low-address protection", regs, int_code);
184 do_exit(SIGKILL); 191 do_exit(SIGKILL);
185 } 192 }
186 193
187 do_no_context(regs, error_code, trans_exc_code); 194 do_no_context(regs, int_code, trans_exc_code);
188} 195}
189 196
190static void do_sigbus(struct pt_regs *regs, unsigned long error_code, 197static noinline void do_sigbus(struct pt_regs *regs, long int_code,
191 unsigned long trans_exc_code) 198 unsigned long trans_exc_code)
192{ 199{
193 struct task_struct *tsk = current; 200 struct task_struct *tsk = current;
194 struct mm_struct *mm = tsk->mm;
195 201
196 up_read(&mm->mmap_sem);
197 /* 202 /*
198 * Send a sigbus, regardless of whether we were in kernel 203 * Send a sigbus, regardless of whether we were in kernel
199 * or user mode. 204 * or user mode.
200 */ 205 */
201 tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK; 206 tsk->thread.prot_addr = trans_exc_code & __FAIL_ADDR_MASK;
202 tsk->thread.trap_no = error_code; 207 tsk->thread.trap_no = int_code;
203 force_sig(SIGBUS, tsk); 208 force_sig(SIGBUS, tsk);
204
205 /* Kernel mode? Handle exceptions or die */
206 if (!(regs->psw.mask & PSW_MASK_PSTATE))
207 do_no_context(regs, error_code, trans_exc_code);
208} 209}
209 210
210#ifdef CONFIG_S390_EXEC_PROTECT 211#ifdef CONFIG_S390_EXEC_PROTECT
211static int signal_return(struct mm_struct *mm, struct pt_regs *regs, 212static noinline int signal_return(struct pt_regs *regs, long int_code,
212 unsigned long address, unsigned long error_code) 213 unsigned long trans_exc_code)
213{ 214{
214 u16 instruction; 215 u16 instruction;
215 int rc; 216 int rc;
216#ifdef CONFIG_COMPAT
217 int compat;
218#endif
219 217
220 pagefault_disable();
221 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 218 rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
222 pagefault_enable();
223 if (rc)
224 return -EFAULT;
225 219
226 up_read(&mm->mmap_sem); 220 if (!rc && instruction == 0x0a77) {
227 clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 221 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
228#ifdef CONFIG_COMPAT 222 if (is_compat_task())
229 compat = is_compat_task(); 223 sys32_sigreturn();
230 if (compat && instruction == 0x0a77) 224 else
231 sys32_sigreturn(); 225 sys_sigreturn();
232 else if (compat && instruction == 0x0aad) 226 } else if (!rc && instruction == 0x0aad) {
233 sys32_rt_sigreturn(); 227 clear_tsk_thread_flag(current, TIF_SINGLE_STEP);
234 else 228 if (is_compat_task())
235#endif 229 sys32_rt_sigreturn();
236 if (instruction == 0x0a77) 230 else
237 sys_sigreturn(); 231 sys_rt_sigreturn();
238 else if (instruction == 0x0aad) 232 } else
239 sys_rt_sigreturn(); 233 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
240 else {
241 current->thread.prot_addr = address;
242 current->thread.trap_no = error_code;
243 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
244 }
245 return 0; 234 return 0;
246} 235}
247#endif /* CONFIG_S390_EXEC_PROTECT */ 236#endif /* CONFIG_S390_EXEC_PROTECT */
248 237
238static noinline void do_fault_error(struct pt_regs *regs, long int_code,
239 unsigned long trans_exc_code, int fault)
240{
241 int si_code;
242
243 switch (fault) {
244 case VM_FAULT_BADACCESS:
245#ifdef CONFIG_S390_EXEC_PROTECT
246 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
247 (trans_exc_code & 3) == 0) {
248 signal_return(regs, int_code, trans_exc_code);
249 break;
250 }
251#endif /* CONFIG_S390_EXEC_PROTECT */
252 case VM_FAULT_BADMAP:
253 /* Bad memory access. Check if it is kernel or user space. */
254 if (regs->psw.mask & PSW_MASK_PSTATE) {
255 /* User mode accesses just cause a SIGSEGV */
256 si_code = (fault == VM_FAULT_BADMAP) ?
257 SEGV_MAPERR : SEGV_ACCERR;
258 do_sigsegv(regs, int_code, si_code, trans_exc_code);
259 return;
260 }
261 case VM_FAULT_BADCONTEXT:
262 do_no_context(regs, int_code, trans_exc_code);
263 break;
264 default: /* fault & VM_FAULT_ERROR */
265 if (fault & VM_FAULT_OOM)
266 pagefault_out_of_memory();
267 else if (fault & VM_FAULT_SIGBUS) {
268 do_sigbus(regs, int_code, trans_exc_code);
269 /* Kernel mode? Handle exceptions or die */
270 if (!(regs->psw.mask & PSW_MASK_PSTATE))
271 do_no_context(regs, int_code, trans_exc_code);
272 } else
273 BUG();
274 break;
275 }
276}
277
249/* 278/*
250 * This routine handles page faults. It determines the address, 279 * This routine handles page faults. It determines the address,
251 * and the problem, and then passes it off to one of the appropriate 280 * and the problem, and then passes it off to one of the appropriate
252 * routines. 281 * routines.
253 * 282 *
254 * error_code: 283 * interruption code (int_code):
255 * 04 Protection -> Write-Protection (suprression) 284 * 04 Protection -> Write-Protection (suprression)
256 * 10 Segment translation -> Not present (nullification) 285 * 10 Segment translation -> Not present (nullification)
257 * 11 Page translation -> Not present (nullification) 286 * 11 Page translation -> Not present (nullification)
258 * 3b Region third trans. -> Not present (nullification) 287 * 3b Region third trans. -> Not present (nullification)
259 */ 288 */
260static inline void 289static inline int do_exception(struct pt_regs *regs, int write,
261do_exception(struct pt_regs *regs, unsigned long error_code, int write, 290 unsigned long trans_exc_code)
262 unsigned long trans_exc_code)
263{ 291{
264 struct task_struct *tsk; 292 struct task_struct *tsk;
265 struct mm_struct *mm; 293 struct mm_struct *mm;
266 struct vm_area_struct *vma; 294 struct vm_area_struct *vma;
267 unsigned long address; 295 unsigned long address;
268 int si_code;
269 int fault; 296 int fault;
270 297
271 if (notify_page_fault(regs)) 298 if (notify_page_fault(regs))
272 return; 299 return 0;
273 300
274 tsk = current; 301 tsk = current;
275 mm = tsk->mm; 302 mm = tsk->mm;
@@ -279,8 +306,9 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write,
279 * we are not in an interrupt and that there is a 306 * we are not in an interrupt and that there is a
280 * user context. 307 * user context.
281 */ 308 */
309 fault = VM_FAULT_BADCONTEXT;
282 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) 310 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
283 goto no_context; 311 goto out;
284 312
285 address = trans_exc_code & __FAIL_ADDR_MASK; 313 address = trans_exc_code & __FAIL_ADDR_MASK;
286 /* 314 /*
@@ -292,41 +320,35 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int write,
292 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 320 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
293 down_read(&mm->mmap_sem); 321 down_read(&mm->mmap_sem);
294 322
295 si_code = SEGV_MAPERR; 323 fault = VM_FAULT_BADMAP;
296 vma = find_vma(mm, address); 324 vma = find_vma(mm, address);
297 if (!vma) 325 if (!vma)
298 goto bad_area; 326 goto out_up;
299 327
328 if (unlikely(vma->vm_start > address)) {
329 if (!(vma->vm_flags & VM_GROWSDOWN))
330 goto out_up;
331 if (expand_stack(vma, address))
332 goto out_up;
333 }
334
335 /*
336 * Ok, we have a good vm_area for this memory access, so
337 * we can handle it..
338 */
339 fault = VM_FAULT_BADACCESS;
300#ifdef CONFIG_S390_EXEC_PROTECT 340#ifdef CONFIG_S390_EXEC_PROTECT
301 if (unlikely((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY && 341 if (unlikely((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_SECONDARY &&
302 (trans_exc_code & 3) == 0 && !(vma->vm_flags & VM_EXEC))) 342 (trans_exc_code & 3) == 0 && !(vma->vm_flags & VM_EXEC)))
303 if (!signal_return(mm, regs, address, error_code)) 343 goto out_up;
304 /*
305 * signal_return() has done an up_read(&mm->mmap_sem)
306 * if it returns 0.
307 */
308 return;
309#endif 344#endif
310
311 if (vma->vm_start <= address)
312 goto good_area;
313 if (!(vma->vm_flags & VM_GROWSDOWN))
314 goto bad_area;
315 if (expand_stack(vma, address))
316 goto bad_area;
317/*
318 * Ok, we have a good vm_area for this memory access, so
319 * we can handle it..
320 */
321good_area:
322 si_code = SEGV_ACCERR;
323 if (!write) { 345 if (!write) {
324 /* page not present, check vm flags */ 346 /* page not present, check vm flags */
325 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 347 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
326 goto bad_area; 348 goto out_up;
327 } else { 349 } else {
328 if (!(vma->vm_flags & VM_WRITE)) 350 if (!(vma->vm_flags & VM_WRITE))
329 goto bad_area; 351 goto out_up;
330 } 352 }
331 353
332 if (is_vm_hugetlb_page(vma)) 354 if (is_vm_hugetlb_page(vma))
@@ -337,17 +359,9 @@ good_area:
337 * the fault. 359 * the fault.
338 */ 360 */
339 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 361 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
340 if (unlikely(fault & VM_FAULT_ERROR)) { 362 if (unlikely(fault & VM_FAULT_ERROR))
341 if (fault & VM_FAULT_OOM) { 363 goto out_up;
342 up_read(&mm->mmap_sem); 364
343 pagefault_out_of_memory();
344 return;
345 } else if (fault & VM_FAULT_SIGBUS) {
346 do_sigbus(regs, error_code, address);
347 return;
348 }
349 BUG();
350 }
351 if (fault & VM_FAULT_MAJOR) { 365 if (fault & VM_FAULT_MAJOR) {
352 tsk->maj_flt++; 366 tsk->maj_flt++;
353 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, 367 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
@@ -357,67 +371,55 @@ good_area:
357 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, 371 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
358 regs, address); 372 regs, address);
359 } 373 }
360 up_read(&mm->mmap_sem);
361 /* 374 /*
362 * The instruction that caused the program check will 375 * The instruction that caused the program check will
363 * be repeated. Don't signal single step via SIGTRAP. 376 * be repeated. Don't signal single step via SIGTRAP.
364 */ 377 */
365 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 378 clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP);
366 return; 379 fault = 0;
367 380out_up:
368/*
369 * Something tried to access memory that isn't in our memory map..
370 * Fix it, but check if it's kernel or user first..
371 */
372bad_area:
373 up_read(&mm->mmap_sem); 381 up_read(&mm->mmap_sem);
374 382out:
375 /* User mode accesses just cause a SIGSEGV */ 383 return fault;
376 if (regs->psw.mask & PSW_MASK_PSTATE) {
377 tsk->thread.prot_addr = address;
378 tsk->thread.trap_no = error_code;
379 do_sigsegv(regs, error_code, si_code, address);
380 return;
381 }
382
383no_context:
384 do_no_context(regs, error_code, trans_exc_code);
385} 384}
386 385
387void __kprobes do_protection_exception(struct pt_regs *regs, 386void __kprobes do_protection_exception(struct pt_regs *regs, long int_code)
388 long error_code)
389{ 387{
390 unsigned long trans_exc_code = S390_lowcore.trans_exc_code; 388 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
389 int fault;
391 390
392 /* Protection exception is supressing, decrement psw address. */ 391 /* Protection exception is supressing, decrement psw address. */
393 regs->psw.addr -= (error_code >> 16); 392 regs->psw.addr -= (int_code >> 16);
394 /* 393 /*
395 * Check for low-address protection. This needs to be treated 394 * Check for low-address protection. This needs to be treated
396 * as a special case because the translation exception code 395 * as a special case because the translation exception code
397 * field is not guaranteed to contain valid data in this case. 396 * field is not guaranteed to contain valid data in this case.
398 */ 397 */
399 if (unlikely(!(trans_exc_code & 4))) { 398 if (unlikely(!(trans_exc_code & 4))) {
400 do_low_address(regs, error_code, trans_exc_code); 399 do_low_address(regs, int_code, trans_exc_code);
401 return; 400 return;
402 } 401 }
403 do_exception(regs, 4, 1, trans_exc_code); 402 fault = do_exception(regs, 1, trans_exc_code);
403 if (unlikely(fault))
404 do_fault_error(regs, 4, trans_exc_code, fault);
404} 405}
405 406
406void __kprobes do_dat_exception(struct pt_regs *regs, long error_code) 407void __kprobes do_dat_exception(struct pt_regs *regs, long int_code)
407{ 408{
408 do_exception(regs, error_code & 0xff, 0, S390_lowcore.trans_exc_code); 409 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
410 int fault;
411
412 fault = do_exception(regs, 0, trans_exc_code);
413 if (unlikely(fault))
414 do_fault_error(regs, int_code & 255, trans_exc_code, fault);
409} 415}
410 416
411#ifdef CONFIG_64BIT 417#ifdef CONFIG_64BIT
412void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code) 418void __kprobes do_asce_exception(struct pt_regs *regs, long int_code)
413{ 419{
414 unsigned long trans_exc_code = S390_lowcore.trans_exc_code; 420 unsigned long trans_exc_code = S390_lowcore.trans_exc_code;
415 struct mm_struct *mm; 421 struct mm_struct *mm = current->mm;
416 struct vm_area_struct *vma; 422 struct vm_area_struct *vma;
417 unsigned long address;
418
419 mm = current->mm;
420 address = trans_exc_code & __FAIL_ADDR_MASK;
421 423
422 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) 424 if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
423 goto no_context; 425 goto no_context;
@@ -425,7 +427,7 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
425 local_irq_enable(); 427 local_irq_enable();
426 428
427 down_read(&mm->mmap_sem); 429 down_read(&mm->mmap_sem);
428 vma = find_vma(mm, address); 430 vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK);
429 up_read(&mm->mmap_sem); 431 up_read(&mm->mmap_sem);
430 432
431 if (vma) { 433 if (vma) {
@@ -435,14 +437,12 @@ void __kprobes do_asce_exception(struct pt_regs *regs, unsigned long error_code)
435 437
436 /* User mode accesses just cause a SIGSEGV */ 438 /* User mode accesses just cause a SIGSEGV */
437 if (regs->psw.mask & PSW_MASK_PSTATE) { 439 if (regs->psw.mask & PSW_MASK_PSTATE) {
438 current->thread.prot_addr = address; 440 do_sigsegv(regs, int_code, SEGV_MAPERR, trans_exc_code);
439 current->thread.trap_no = error_code;
440 do_sigsegv(regs, error_code, SEGV_MAPERR, address);
441 return; 441 return;
442 } 442 }
443 443
444no_context: 444no_context:
445 do_no_context(regs, error_code, trans_exc_code); 445 do_no_context(regs, int_code, trans_exc_code);
446} 446}
447#endif 447#endif
448 448
@@ -507,7 +507,7 @@ void pfault_fini(void)
507 : : "a" (&refbk), "m" (refbk) : "cc"); 507 : : "a" (&refbk), "m" (refbk) : "cc");
508} 508}
509 509
510static void pfault_interrupt(__u16 error_code) 510static void pfault_interrupt(__u16 int_code)
511{ 511{
512 struct task_struct *tsk; 512 struct task_struct *tsk;
513 __u16 subcode; 513 __u16 subcode;