diff options
| author | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 01:57:28 -0400 |
|---|---|---|
| committer | Paul Mundt <lethal@linux-sh.org> | 2012-05-14 01:57:28 -0400 |
| commit | 5a1dc78a38bfb04159a08cd493e5b3d844939e6c (patch) | |
| tree | 860420d3d52e2666449d1e688c399876a5c16bd3 | |
| parent | f007688a50cf5724049a4a5f17023fcdb0966b54 (diff) | |
sh: Support thread fault code encoding.
This provides a simple interface modelled after sparc64/m32r to encode
the error code in the upper byte of thread_info for finer-grained
handling in the page fault path.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
| -rw-r--r-- | arch/sh/include/asm/thread_info.h | 46 | ||||
| -rw-r--r-- | arch/sh/kernel/cpu/sh3/entry.S | 11 | ||||
| -rw-r--r-- | arch/sh/mm/fault_32.c | 68 |
3 files changed, 78 insertions, 47 deletions
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h index 20ee40af16e9..25a13e534ffe 100644 --- a/arch/sh/include/asm/thread_info.h +++ b/arch/sh/include/asm/thread_info.h | |||
| @@ -10,8 +10,18 @@ | |||
| 10 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller | 10 | * - Incorporating suggestions made by Linus Torvalds and Dave Miller |
| 11 | */ | 11 | */ |
| 12 | #ifdef __KERNEL__ | 12 | #ifdef __KERNEL__ |
| 13 | |||
| 13 | #include <asm/page.h> | 14 | #include <asm/page.h> |
| 14 | 15 | ||
| 16 | /* | ||
| 17 | * Page fault error code bits | ||
| 18 | */ | ||
| 19 | #define FAULT_CODE_WRITE (1 << 0) /* write access */ | ||
| 20 | #define FAULT_CODE_INITIAL (1 << 1) /* initial page write */ | ||
| 21 | #define FAULT_CODE_ITLB (1 << 2) /* ITLB miss */ | ||
| 22 | #define FAULT_CODE_PROT (1 << 3) /* protection fault */ | ||
| 23 | #define FAULT_CODE_USER (1 << 4) /* user-mode access */ | ||
| 24 | |||
| 15 | #ifndef __ASSEMBLY__ | 25 | #ifndef __ASSEMBLY__ |
| 16 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
| 17 | 27 | ||
| @@ -107,10 +117,13 @@ extern void init_thread_xstate(void); | |||
| 107 | #endif /* __ASSEMBLY__ */ | 117 | #endif /* __ASSEMBLY__ */ |
| 108 | 118 | ||
| 109 | /* | 119 | /* |
| 110 | * thread information flags | 120 | * Thread information flags |
| 111 | * - these are process state flags that various assembly files may need to access | 121 | * |
| 112 | * - pending work-to-be-done flags are in LSW | 122 | * - Limited to 24 bits, upper byte used for fault code encoding. |
| 113 | * - other flags in MSW | 123 | * |
| 124 | * - _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or | ||
| 125 | * we blow the tst immediate size constraints and need to fix up | ||
| 126 | * arch/sh/kernel/entry-common.S. | ||
| 114 | */ | 127 | */ |
| 115 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ | 128 | #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ |
| 116 | #define TIF_SIGPENDING 1 /* signal pending */ | 129 | #define TIF_SIGPENDING 1 /* signal pending */ |
| @@ -133,12 +146,6 @@ extern void init_thread_xstate(void); | |||
| 133 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | 146 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
| 134 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) | 147 | #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) |
| 135 | 148 | ||
| 136 | /* | ||
| 137 | * _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or we | ||
| 138 | * blow the tst immediate size constraints and need to fix up | ||
| 139 | * arch/sh/kernel/entry-common.S. | ||
| 140 | */ | ||
| 141 | |||
| 142 | /* work to do in syscall trace */ | 149 | /* work to do in syscall trace */ |
| 143 | #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ | 150 | #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ |
| 144 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ | 151 | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ |
| @@ -165,6 +172,7 @@ extern void init_thread_xstate(void); | |||
| 165 | #define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */ | 172 | #define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */ |
| 166 | 173 | ||
| 167 | #ifndef __ASSEMBLY__ | 174 | #ifndef __ASSEMBLY__ |
| 175 | |||
| 168 | #define HAVE_SET_RESTORE_SIGMASK 1 | 176 | #define HAVE_SET_RESTORE_SIGMASK 1 |
| 169 | static inline void set_restore_sigmask(void) | 177 | static inline void set_restore_sigmask(void) |
| 170 | { | 178 | { |
| @@ -172,6 +180,24 @@ static inline void set_restore_sigmask(void) | |||
| 172 | ti->status |= TS_RESTORE_SIGMASK; | 180 | ti->status |= TS_RESTORE_SIGMASK; |
| 173 | set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); | 181 | set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); |
| 174 | } | 182 | } |
| 183 | |||
| 184 | #define TI_FLAG_FAULT_CODE_SHIFT 24 | ||
| 185 | |||
| 186 | /* | ||
| 187 | * Additional thread flag encoding | ||
| 188 | */ | ||
| 189 | static inline void set_thread_fault_code(unsigned int val) | ||
| 190 | { | ||
| 191 | struct thread_info *ti = current_thread_info(); | ||
| 192 | ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT))) | ||
| 193 | | (val << TI_FLAG_FAULT_CODE_SHIFT); | ||
| 194 | } | ||
| 195 | |||
| 196 | static inline unsigned int get_thread_fault_code(void) | ||
| 197 | { | ||
| 198 | struct thread_info *ti = current_thread_info(); | ||
| 199 | return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; | ||
| 200 | } | ||
| 175 | #endif /* !__ASSEMBLY__ */ | 201 | #endif /* !__ASSEMBLY__ */ |
| 176 | 202 | ||
| 177 | #endif /* __KERNEL__ */ | 203 | #endif /* __KERNEL__ */ |
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index f6a389c996cb..262db6ec067b 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | * arch/sh/kernel/cpu/sh3/entry.S | 2 | * arch/sh/kernel/cpu/sh3/entry.S |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
| 5 | * Copyright (C) 2003 - 2006 Paul Mundt | 5 | * Copyright (C) 2003 - 2012 Paul Mundt |
| 6 | * | 6 | * |
| 7 | * This file is subject to the terms and conditions of the GNU General Public | 7 | * This file is subject to the terms and conditions of the GNU General Public |
| 8 | * License. See the file "COPYING" in the main directory of this archive | 8 | * License. See the file "COPYING" in the main directory of this archive |
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <cpu/mmu_context.h> | 17 | #include <cpu/mmu_context.h> |
| 18 | #include <asm/page.h> | 18 | #include <asm/page.h> |
| 19 | #include <asm/cache.h> | 19 | #include <asm/cache.h> |
| 20 | #include <asm/thread_info.h> | ||
| 20 | 21 | ||
| 21 | ! NOTE: | 22 | ! NOTE: |
| 22 | ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address | 23 | ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address |
| @@ -114,22 +115,22 @@ ENTRY(tlb_miss_load) | |||
| 114 | .align 2 | 115 | .align 2 |
| 115 | ENTRY(tlb_miss_store) | 116 | ENTRY(tlb_miss_store) |
| 116 | bra call_handle_tlbmiss | 117 | bra call_handle_tlbmiss |
| 117 | mov #1, r5 | 118 | mov #FAULT_CODE_WRITE, r5 |
| 118 | 119 | ||
| 119 | .align 2 | 120 | .align 2 |
| 120 | ENTRY(initial_page_write) | 121 | ENTRY(initial_page_write) |
| 121 | bra call_handle_tlbmiss | 122 | bra call_handle_tlbmiss |
| 122 | mov #2, r5 | 123 | mov #FAULT_CODE_INITIAL, r5 |
| 123 | 124 | ||
| 124 | .align 2 | 125 | .align 2 |
| 125 | ENTRY(tlb_protection_violation_load) | 126 | ENTRY(tlb_protection_violation_load) |
| 126 | bra call_do_page_fault | 127 | bra call_do_page_fault |
| 127 | mov #0, r5 | 128 | mov #FAULT_CODE_PROT, r5 |
| 128 | 129 | ||
| 129 | .align 2 | 130 | .align 2 |
| 130 | ENTRY(tlb_protection_violation_store) | 131 | ENTRY(tlb_protection_violation_store) |
| 131 | bra call_do_page_fault | 132 | bra call_do_page_fault |
| 132 | mov #1, r5 | 133 | mov #(FAULT_CODE_PROT | FAULT_CODE_WRITE), r5 |
| 133 | 134 | ||
| 134 | call_handle_tlbmiss: | 135 | call_handle_tlbmiss: |
| 135 | mov.l 1f, r0 | 136 | mov.l 1f, r0 |
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 889e83b5ff22..a469b95e88fb 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
| @@ -211,7 +211,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long address) | |||
| 211 | } | 211 | } |
| 212 | 212 | ||
| 213 | static noinline void | 213 | static noinline void |
| 214 | no_context(struct pt_regs *regs, unsigned long writeaccess, | 214 | no_context(struct pt_regs *regs, unsigned long error_code, |
| 215 | unsigned long address) | 215 | unsigned long address) |
| 216 | { | 216 | { |
| 217 | /* Are we prepared to handle this kernel fault? */ | 217 | /* Are we prepared to handle this kernel fault? */ |
| @@ -229,13 +229,13 @@ no_context(struct pt_regs *regs, unsigned long writeaccess, | |||
| 229 | 229 | ||
| 230 | show_fault_oops(regs, address); | 230 | show_fault_oops(regs, address); |
| 231 | 231 | ||
| 232 | die("Oops", regs, writeaccess); | 232 | die("Oops", regs, error_code); |
| 233 | bust_spinlocks(0); | 233 | bust_spinlocks(0); |
| 234 | do_exit(SIGKILL); | 234 | do_exit(SIGKILL); |
| 235 | } | 235 | } |
| 236 | 236 | ||
| 237 | static void | 237 | static void |
| 238 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, | 238 | __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
| 239 | unsigned long address, int si_code) | 239 | unsigned long address, int si_code) |
| 240 | { | 240 | { |
| 241 | struct task_struct *tsk = current; | 241 | struct task_struct *tsk = current; |
| @@ -252,18 +252,18 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, | |||
| 252 | return; | 252 | return; |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | no_context(regs, writeaccess, address); | 255 | no_context(regs, error_code, address); |
| 256 | } | 256 | } |
| 257 | 257 | ||
| 258 | static noinline void | 258 | static noinline void |
| 259 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long writeaccess, | 259 | bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, |
| 260 | unsigned long address) | 260 | unsigned long address) |
| 261 | { | 261 | { |
| 262 | __bad_area_nosemaphore(regs, writeaccess, address, SEGV_MAPERR); | 262 | __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR); |
| 263 | } | 263 | } |
| 264 | 264 | ||
| 265 | static void | 265 | static void |
| 266 | __bad_area(struct pt_regs *regs, unsigned long writeaccess, | 266 | __bad_area(struct pt_regs *regs, unsigned long error_code, |
| 267 | unsigned long address, int si_code) | 267 | unsigned long address, int si_code) |
| 268 | { | 268 | { |
| 269 | struct mm_struct *mm = current->mm; | 269 | struct mm_struct *mm = current->mm; |
| @@ -274,20 +274,20 @@ __bad_area(struct pt_regs *regs, unsigned long writeaccess, | |||
| 274 | */ | 274 | */ |
| 275 | up_read(&mm->mmap_sem); | 275 | up_read(&mm->mmap_sem); |
| 276 | 276 | ||
| 277 | __bad_area_nosemaphore(regs, writeaccess, address, si_code); | 277 | __bad_area_nosemaphore(regs, error_code, address, si_code); |
| 278 | } | 278 | } |
| 279 | 279 | ||
| 280 | static noinline void | 280 | static noinline void |
| 281 | bad_area(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) | 281 | bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
| 282 | { | 282 | { |
| 283 | __bad_area(regs, writeaccess, address, SEGV_MAPERR); | 283 | __bad_area(regs, error_code, address, SEGV_MAPERR); |
| 284 | } | 284 | } |
| 285 | 285 | ||
| 286 | static noinline void | 286 | static noinline void |
| 287 | bad_area_access_error(struct pt_regs *regs, unsigned long writeaccess, | 287 | bad_area_access_error(struct pt_regs *regs, unsigned long error_code, |
| 288 | unsigned long address) | 288 | unsigned long address) |
| 289 | { | 289 | { |
| 290 | __bad_area(regs, writeaccess, address, SEGV_ACCERR); | 290 | __bad_area(regs, error_code, address, SEGV_ACCERR); |
| 291 | } | 291 | } |
| 292 | 292 | ||
| 293 | static void out_of_memory(void) | 293 | static void out_of_memory(void) |
| @@ -302,7 +302,7 @@ static void out_of_memory(void) | |||
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | static void | 304 | static void |
| 305 | do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) | 305 | do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address) |
| 306 | { | 306 | { |
| 307 | struct task_struct *tsk = current; | 307 | struct task_struct *tsk = current; |
| 308 | struct mm_struct *mm = tsk->mm; | 308 | struct mm_struct *mm = tsk->mm; |
| @@ -311,13 +311,13 @@ do_sigbus(struct pt_regs *regs, unsigned long writeaccess, unsigned long address | |||
| 311 | 311 | ||
| 312 | /* Kernel mode? Handle exceptions or die: */ | 312 | /* Kernel mode? Handle exceptions or die: */ |
| 313 | if (!user_mode(regs)) | 313 | if (!user_mode(regs)) |
| 314 | no_context(regs, writeaccess, address); | 314 | no_context(regs, error_code, address); |
| 315 | 315 | ||
| 316 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); | 316 | force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk); |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | static noinline int | 319 | static noinline int |
| 320 | mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, | 320 | mm_fault_error(struct pt_regs *regs, unsigned long error_code, |
| 321 | unsigned long address, unsigned int fault) | 321 | unsigned long address, unsigned int fault) |
| 322 | { | 322 | { |
| 323 | /* | 323 | /* |
| @@ -328,7 +328,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, | |||
| 328 | if (!(fault & VM_FAULT_RETRY)) | 328 | if (!(fault & VM_FAULT_RETRY)) |
| 329 | up_read(¤t->mm->mmap_sem); | 329 | up_read(¤t->mm->mmap_sem); |
| 330 | if (!user_mode(regs)) | 330 | if (!user_mode(regs)) |
| 331 | no_context(regs, writeaccess, address); | 331 | no_context(regs, error_code, address); |
| 332 | return 1; | 332 | return 1; |
| 333 | } | 333 | } |
| 334 | 334 | ||
| @@ -339,14 +339,14 @@ mm_fault_error(struct pt_regs *regs, unsigned long writeaccess, | |||
| 339 | /* Kernel mode? Handle exceptions or die: */ | 339 | /* Kernel mode? Handle exceptions or die: */ |
| 340 | if (!user_mode(regs)) { | 340 | if (!user_mode(regs)) { |
| 341 | up_read(¤t->mm->mmap_sem); | 341 | up_read(¤t->mm->mmap_sem); |
| 342 | no_context(regs, writeaccess, address); | 342 | no_context(regs, error_code, address); |
| 343 | return 1; | 343 | return 1; |
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | out_of_memory(); | 346 | out_of_memory(); |
| 347 | } else { | 347 | } else { |
| 348 | if (fault & VM_FAULT_SIGBUS) | 348 | if (fault & VM_FAULT_SIGBUS) |
| 349 | do_sigbus(regs, writeaccess, address); | 349 | do_sigbus(regs, error_code, address); |
| 350 | else | 350 | else |
| 351 | BUG(); | 351 | BUG(); |
| 352 | } | 352 | } |
| @@ -381,7 +381,7 @@ static int fault_in_kernel_space(unsigned long address) | |||
| 381 | * routines. | 381 | * routines. |
| 382 | */ | 382 | */ |
| 383 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | 383 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, |
| 384 | unsigned long writeaccess, | 384 | unsigned long error_code, |
| 385 | unsigned long address) | 385 | unsigned long address) |
| 386 | { | 386 | { |
| 387 | unsigned long vec; | 387 | unsigned long vec; |
| @@ -389,8 +389,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
| 389 | struct mm_struct *mm; | 389 | struct mm_struct *mm; |
| 390 | struct vm_area_struct * vma; | 390 | struct vm_area_struct * vma; |
| 391 | int fault; | 391 | int fault; |
| 392 | int write = error_code & FAULT_CODE_WRITE; | ||
| 392 | unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | 393 | unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | |
| 393 | (writeaccess ? FAULT_FLAG_WRITE : 0)); | 394 | (write ? FAULT_FLAG_WRITE : 0)); |
| 394 | 395 | ||
| 395 | tsk = current; | 396 | tsk = current; |
| 396 | mm = tsk->mm; | 397 | mm = tsk->mm; |
| @@ -411,7 +412,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
| 411 | if (notify_page_fault(regs, vec)) | 412 | if (notify_page_fault(regs, vec)) |
| 412 | return; | 413 | return; |
| 413 | 414 | ||
| 414 | bad_area_nosemaphore(regs, writeaccess, address); | 415 | bad_area_nosemaphore(regs, error_code, address); |
| 415 | return; | 416 | return; |
| 416 | } | 417 | } |
| 417 | 418 | ||
| @@ -429,7 +430,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | |||
| 429 | * in an atomic region then we must not take the fault: | 430 | * in an atomic region then we must not take the fault: |
| 430 | */ | 431 | */ |
| 431 | if (unlikely(in_atomic() || !mm)) { | 432 | if (unlikely(in_atomic() || !mm)) { |
| 432 | bad_area_nosemaphore(regs, writeaccess, address); | 433 | bad_area_nosemaphore(regs, error_code, address); |
| 433 | return; | 434 | return; |
| 434 | } | 435 | } |
| 435 | 436 | ||
| @@ -438,17 +439,17 @@ retry: | |||
| 438 | 439 | ||
| 439 | vma = find_vma(mm, address); | 440 | vma = find_vma(mm, address); |
| 440 | if (unlikely(!vma)) { | 441 | if (unlikely(!vma)) { |
| 441 | bad_area(regs, writeaccess, address); | 442 | bad_area(regs, error_code, address); |
| 442 | return; | 443 | return; |
| 443 | } | 444 | } |
| 444 | if (likely(vma->vm_start <= address)) | 445 | if (likely(vma->vm_start <= address)) |
| 445 | goto good_area; | 446 | goto good_area; |
| 446 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { | 447 | if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { |
| 447 | bad_area(regs, writeaccess, address); | 448 | bad_area(regs, error_code, address); |
| 448 | return; | 449 | return; |
| 449 | } | 450 | } |
| 450 | if (unlikely(expand_stack(vma, address))) { | 451 | if (unlikely(expand_stack(vma, address))) { |
| 451 | bad_area(regs, writeaccess, address); | 452 | bad_area(regs, error_code, address); |
| 452 | return; | 453 | return; |
| 453 | } | 454 | } |
| 454 | 455 | ||
| @@ -457,11 +458,13 @@ retry: | |||
| 457 | * we can handle it.. | 458 | * we can handle it.. |
| 458 | */ | 459 | */ |
| 459 | good_area: | 460 | good_area: |
| 460 | if (unlikely(access_error(writeaccess, vma))) { | 461 | if (unlikely(access_error(error_code, vma))) { |
| 461 | bad_area_access_error(regs, writeaccess, address); | 462 | bad_area_access_error(regs, error_code, address); |
| 462 | return; | 463 | return; |
| 463 | } | 464 | } |
| 464 | 465 | ||
| 466 | set_thread_fault_code(error_code); | ||
| 467 | |||
| 465 | /* | 468 | /* |
| 466 | * If for any reason at all we couldn't handle the fault, | 469 | * If for any reason at all we couldn't handle the fault, |
| 467 | * make sure we exit gracefully rather than endlessly redo | 470 | * make sure we exit gracefully rather than endlessly redo |
| @@ -470,7 +473,7 @@ good_area: | |||
| 470 | fault = handle_mm_fault(mm, vma, address, flags); | 473 | fault = handle_mm_fault(mm, vma, address, flags); |
| 471 | 474 | ||
| 472 | if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) | 475 | if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR))) |
| 473 | if (mm_fault_error(regs, writeaccess, address, fault)) | 476 | if (mm_fault_error(regs, error_code, address, fault)) |
| 474 | return; | 477 | return; |
| 475 | 478 | ||
| 476 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | 479 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
| @@ -502,7 +505,7 @@ good_area: | |||
| 502 | * Called with interrupts disabled. | 505 | * Called with interrupts disabled. |
| 503 | */ | 506 | */ |
| 504 | asmlinkage int __kprobes | 507 | asmlinkage int __kprobes |
| 505 | handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | 508 | handle_tlbmiss(struct pt_regs *regs, unsigned long error_code, |
| 506 | unsigned long address) | 509 | unsigned long address) |
| 507 | { | 510 | { |
| 508 | pgd_t *pgd; | 511 | pgd_t *pgd; |
| @@ -535,10 +538,10 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | |||
| 535 | entry = *pte; | 538 | entry = *pte; |
| 536 | if (unlikely(pte_none(entry) || pte_not_present(entry))) | 539 | if (unlikely(pte_none(entry) || pte_not_present(entry))) |
| 537 | return 1; | 540 | return 1; |
| 538 | if (unlikely(writeaccess && !pte_write(entry))) | 541 | if (unlikely(error_code && !pte_write(entry))) |
| 539 | return 1; | 542 | return 1; |
| 540 | 543 | ||
| 541 | if (writeaccess) | 544 | if (error_code) |
| 542 | entry = pte_mkdirty(entry); | 545 | entry = pte_mkdirty(entry); |
| 543 | entry = pte_mkyoung(entry); | 546 | entry = pte_mkyoung(entry); |
| 544 | 547 | ||
| @@ -550,10 +553,11 @@ handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, | |||
| 550 | * the case of an initial page write exception, so we need to | 553 | * the case of an initial page write exception, so we need to |
| 551 | * flush it in order to avoid potential TLB entry duplication. | 554 | * flush it in order to avoid potential TLB entry duplication. |
| 552 | */ | 555 | */ |
| 553 | if (writeaccess == 2) | 556 | if (error_code == FAULT_CODE_INITIAL) |
| 554 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); | 557 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); |
| 555 | #endif | 558 | #endif |
| 556 | 559 | ||
| 560 | set_thread_fault_code(error_code); | ||
| 557 | update_mmu_cache(NULL, address, pte); | 561 | update_mmu_cache(NULL, address, pte); |
| 558 | 562 | ||
| 559 | return 0; | 563 | return 0; |
