diff options
Diffstat (limited to 'arch/powerpc/mm')
| -rw-r--r-- | arch/powerpc/mm/fault.c | 41 | ||||
| -rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 30 | ||||
| -rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 36 | ||||
| -rw-r--r-- | arch/powerpc/mm/init_64.c | 3 |
4 files changed, 78 insertions, 32 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 229951ffc351..8726779e1409 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/perf_event.h> | 32 | #include <linux/perf_event.h> |
| 33 | #include <linux/magic.h> | 33 | #include <linux/magic.h> |
| 34 | #include <linux/ratelimit.h> | 34 | #include <linux/ratelimit.h> |
| 35 | #include <linux/context_tracking.h> | ||
| 35 | 36 | ||
| 36 | #include <asm/firmware.h> | 37 | #include <asm/firmware.h> |
| 37 | #include <asm/page.h> | 38 | #include <asm/page.h> |
| @@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) | |||
| 196 | int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | 197 | int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, |
| 197 | unsigned long error_code) | 198 | unsigned long error_code) |
| 198 | { | 199 | { |
| 200 | enum ctx_state prev_state = exception_enter(); | ||
| 199 | struct vm_area_struct * vma; | 201 | struct vm_area_struct * vma; |
| 200 | struct mm_struct *mm = current->mm; | 202 | struct mm_struct *mm = current->mm; |
| 201 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 203 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
| @@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 204 | int trap = TRAP(regs); | 206 | int trap = TRAP(regs); |
| 205 | int is_exec = trap == 0x400; | 207 | int is_exec = trap == 0x400; |
| 206 | int fault; | 208 | int fault; |
| 209 | int rc = 0; | ||
| 207 | 210 | ||
| 208 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 211 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
| 209 | /* | 212 | /* |
| @@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 230 | * look at it | 233 | * look at it |
| 231 | */ | 234 | */ |
| 232 | if (error_code & ICSWX_DSI_UCT) { | 235 | if (error_code & ICSWX_DSI_UCT) { |
| 233 | int rc = acop_handle_fault(regs, address, error_code); | 236 | rc = acop_handle_fault(regs, address, error_code); |
| 234 | if (rc) | 237 | if (rc) |
| 235 | return rc; | 238 | goto bail; |
| 236 | } | 239 | } |
| 237 | #endif /* CONFIG_PPC_ICSWX */ | 240 | #endif /* CONFIG_PPC_ICSWX */ |
| 238 | 241 | ||
| 239 | if (notify_page_fault(regs)) | 242 | if (notify_page_fault(regs)) |
| 240 | return 0; | 243 | goto bail; |
| 241 | 244 | ||
| 242 | if (unlikely(debugger_fault_handler(regs))) | 245 | if (unlikely(debugger_fault_handler(regs))) |
| 243 | return 0; | 246 | goto bail; |
| 244 | 247 | ||
| 245 | /* On a kernel SLB miss we can only check for a valid exception entry */ | 248 | /* On a kernel SLB miss we can only check for a valid exception entry */ |
| 246 | if (!user_mode(regs) && (address >= TASK_SIZE)) | 249 | if (!user_mode(regs) && (address >= TASK_SIZE)) { |
| 247 | return SIGSEGV; | 250 | rc = SIGSEGV; |
| 251 | goto bail; | ||
| 252 | } | ||
| 248 | 253 | ||
| 249 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ | 254 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ |
| 250 | defined(CONFIG_PPC_BOOK3S_64)) | 255 | defined(CONFIG_PPC_BOOK3S_64)) |
| 251 | if (error_code & DSISR_DABRMATCH) { | 256 | if (error_code & DSISR_DABRMATCH) { |
| 252 | /* breakpoint match */ | 257 | /* breakpoint match */ |
| 253 | do_break(regs, address, error_code); | 258 | do_break(regs, address, error_code); |
| 254 | return 0; | 259 | goto bail; |
| 255 | } | 260 | } |
| 256 | #endif | 261 | #endif |
| 257 | 262 | ||
| @@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
| 260 | local_irq_enable(); | 265 | local_irq_enable(); |
| 261 | 266 | ||
| 262 | if (in_atomic() || mm == NULL) { | 267 | if (in_atomic() || mm == NULL) { |
| 263 | if (!user_mode(regs)) | 268 | if (!user_mode(regs)) { |
| 264 | return SIGSEGV; | 269 | rc = SIGSEGV; |
| 270 | goto bail; | ||
| 271 | } | ||
| 265 | /* in_atomic() in user mode is really bad, | 272 | /* in_atomic() in user mode is really bad, |
| 266 | as is current->mm == NULL. */ | 273 | as is current->mm == NULL. */ |
| 267 | printk(KERN_EMERG "Page fault in user mode with " | 274 | printk(KERN_EMERG "Page fault in user mode with " |
| @@ -417,9 +424,11 @@ good_area: | |||
| 417 | */ | 424 | */ |
| 418 | fault = handle_mm_fault(mm, vma, address, flags); | 425 | fault = handle_mm_fault(mm, vma, address, flags); |
| 419 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { | 426 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { |
| 420 | int rc = mm_fault_error(regs, address, fault); | 427 | rc = mm_fault_error(regs, address, fault); |
| 421 | if (rc >= MM_FAULT_RETURN) | 428 | if (rc >= MM_FAULT_RETURN) |
| 422 | return rc; | 429 | goto bail; |
| 430 | else | ||
| 431 | rc = 0; | ||
| 423 | } | 432 | } |
| 424 | 433 | ||
| 425 | /* | 434 | /* |
| @@ -454,7 +463,7 @@ good_area: | |||
| 454 | } | 463 | } |
| 455 | 464 | ||
| 456 | up_read(&mm->mmap_sem); | 465 | up_read(&mm->mmap_sem); |
| 457 | return 0; | 466 | goto bail; |
| 458 | 467 | ||
| 459 | bad_area: | 468 | bad_area: |
| 460 | up_read(&mm->mmap_sem); | 469 | up_read(&mm->mmap_sem); |
| @@ -463,7 +472,7 @@ bad_area_nosemaphore: | |||
| 463 | /* User mode accesses cause a SIGSEGV */ | 472 | /* User mode accesses cause a SIGSEGV */ |
| 464 | if (user_mode(regs)) { | 473 | if (user_mode(regs)) { |
| 465 | _exception(SIGSEGV, regs, code, address); | 474 | _exception(SIGSEGV, regs, code, address); |
| 466 | return 0; | 475 | goto bail; |
| 467 | } | 476 | } |
| 468 | 477 | ||
| 469 | if (is_exec && (error_code & DSISR_PROTFAULT)) | 478 | if (is_exec && (error_code & DSISR_PROTFAULT)) |
| @@ -471,7 +480,11 @@ bad_area_nosemaphore: | |||
| 471 | " page (%lx) - exploit attempt? (uid: %d)\n", | 480 | " page (%lx) - exploit attempt? (uid: %d)\n", |
| 472 | address, from_kuid(&init_user_ns, current_uid())); | 481 | address, from_kuid(&init_user_ns, current_uid())); |
| 473 | 482 | ||
| 474 | return SIGSEGV; | 483 | rc = SIGSEGV; |
| 484 | |||
| 485 | bail: | ||
| 486 | exception_exit(prev_state); | ||
| 487 | return rc; | ||
| 475 | 488 | ||
| 476 | } | 489 | } |
| 477 | 490 | ||
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 6a2aead5b0e5..4c122c3f1623 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
| @@ -336,11 +336,18 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, | |||
| 336 | 336 | ||
| 337 | hpte_v = hptep->v; | 337 | hpte_v = hptep->v; |
| 338 | actual_psize = hpte_actual_psize(hptep, psize); | 338 | actual_psize = hpte_actual_psize(hptep, psize); |
| 339 | /* | ||
| 340 | * We need to invalidate the TLB always because hpte_remove doesn't do | ||
| 341 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | ||
| 342 | * random entry from it. When we do that we don't invalidate the TLB | ||
| 343 | * (hpte_remove) because we assume the old translation is still | ||
| 344 | * technically "valid". | ||
| 345 | */ | ||
| 339 | if (actual_psize < 0) { | 346 | if (actual_psize < 0) { |
| 340 | native_unlock_hpte(hptep); | 347 | actual_psize = psize; |
| 341 | return -1; | 348 | ret = -1; |
| 349 | goto err_out; | ||
| 342 | } | 350 | } |
| 343 | /* Even if we miss, we need to invalidate the TLB */ | ||
| 344 | if (!HPTE_V_COMPARE(hpte_v, want_v)) { | 351 | if (!HPTE_V_COMPARE(hpte_v, want_v)) { |
| 345 | DBG_LOW(" -> miss\n"); | 352 | DBG_LOW(" -> miss\n"); |
| 346 | ret = -1; | 353 | ret = -1; |
| @@ -350,6 +357,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, | |||
| 350 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | | 357 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | |
| 351 | (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); | 358 | (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); |
| 352 | } | 359 | } |
| 360 | err_out: | ||
| 353 | native_unlock_hpte(hptep); | 361 | native_unlock_hpte(hptep); |
| 354 | 362 | ||
| 355 | /* Ensure it is out of the tlb too. */ | 363 | /* Ensure it is out of the tlb too. */ |
| @@ -409,7 +417,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, | |||
| 409 | hptep = htab_address + slot; | 417 | hptep = htab_address + slot; |
| 410 | actual_psize = hpte_actual_psize(hptep, psize); | 418 | actual_psize = hpte_actual_psize(hptep, psize); |
| 411 | if (actual_psize < 0) | 419 | if (actual_psize < 0) |
| 412 | return; | 420 | actual_psize = psize; |
| 413 | 421 | ||
| 414 | /* Update the HPTE */ | 422 | /* Update the HPTE */ |
| 415 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | | 423 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | |
| @@ -437,21 +445,27 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, | |||
| 437 | hpte_v = hptep->v; | 445 | hpte_v = hptep->v; |
| 438 | 446 | ||
| 439 | actual_psize = hpte_actual_psize(hptep, psize); | 447 | actual_psize = hpte_actual_psize(hptep, psize); |
| 448 | /* | ||
| 449 | * We need to invalidate the TLB always because hpte_remove doesn't do | ||
| 450 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | ||
| 451 | * random entry from it. When we do that we don't invalidate the TLB | ||
| 452 | * (hpte_remove) because we assume the old translation is still | ||
| 453 | * technically "valid". | ||
| 454 | */ | ||
| 440 | if (actual_psize < 0) { | 455 | if (actual_psize < 0) { |
| 456 | actual_psize = psize; | ||
| 441 | native_unlock_hpte(hptep); | 457 | native_unlock_hpte(hptep); |
| 442 | local_irq_restore(flags); | 458 | goto err_out; |
| 443 | return; | ||
| 444 | } | 459 | } |
| 445 | /* Even if we miss, we need to invalidate the TLB */ | ||
| 446 | if (!HPTE_V_COMPARE(hpte_v, want_v)) | 460 | if (!HPTE_V_COMPARE(hpte_v, want_v)) |
| 447 | native_unlock_hpte(hptep); | 461 | native_unlock_hpte(hptep); |
| 448 | else | 462 | else |
| 449 | /* Invalidate the hpte. NOTE: this also unlocks it */ | 463 | /* Invalidate the hpte. NOTE: this also unlocks it */ |
| 450 | hptep->v = 0; | 464 | hptep->v = 0; |
| 451 | 465 | ||
| 466 | err_out: | ||
| 452 | /* Invalidate the TLB */ | 467 | /* Invalidate the TLB */ |
| 453 | tlbie(vpn, psize, actual_psize, ssize, local); | 468 | tlbie(vpn, psize, actual_psize, ssize, local); |
| 454 | |||
| 455 | local_irq_restore(flags); | 469 | local_irq_restore(flags); |
| 456 | } | 470 | } |
| 457 | 471 | ||
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 88ac0eeaadde..e303a6d74e3a 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
| 34 | #include <linux/signal.h> | 34 | #include <linux/signal.h> |
| 35 | #include <linux/memblock.h> | 35 | #include <linux/memblock.h> |
| 36 | #include <linux/context_tracking.h> | ||
| 36 | 37 | ||
| 37 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
| 38 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
| @@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access, | |||
| 954 | */ | 955 | */ |
| 955 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | 956 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) |
| 956 | { | 957 | { |
| 958 | enum ctx_state prev_state = exception_enter(); | ||
| 957 | pgd_t *pgdir; | 959 | pgd_t *pgdir; |
| 958 | unsigned long vsid; | 960 | unsigned long vsid; |
| 959 | struct mm_struct *mm; | 961 | struct mm_struct *mm; |
| @@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 973 | mm = current->mm; | 975 | mm = current->mm; |
| 974 | if (! mm) { | 976 | if (! mm) { |
| 975 | DBG_LOW(" user region with no mm !\n"); | 977 | DBG_LOW(" user region with no mm !\n"); |
| 976 | return 1; | 978 | rc = 1; |
| 979 | goto bail; | ||
| 977 | } | 980 | } |
| 978 | psize = get_slice_psize(mm, ea); | 981 | psize = get_slice_psize(mm, ea); |
| 979 | ssize = user_segment_size(ea); | 982 | ssize = user_segment_size(ea); |
| @@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 992 | /* Not a valid range | 995 | /* Not a valid range |
| 993 | * Send the problem up to do_page_fault | 996 | * Send the problem up to do_page_fault |
| 994 | */ | 997 | */ |
| 995 | return 1; | 998 | rc = 1; |
| 999 | goto bail; | ||
| 996 | } | 1000 | } |
| 997 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); | 1001 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); |
| 998 | 1002 | ||
| 999 | /* Bad address. */ | 1003 | /* Bad address. */ |
| 1000 | if (!vsid) { | 1004 | if (!vsid) { |
| 1001 | DBG_LOW("Bad address!\n"); | 1005 | DBG_LOW("Bad address!\n"); |
| 1002 | return 1; | 1006 | rc = 1; |
| 1007 | goto bail; | ||
| 1003 | } | 1008 | } |
| 1004 | /* Get pgdir */ | 1009 | /* Get pgdir */ |
| 1005 | pgdir = mm->pgd; | 1010 | pgdir = mm->pgd; |
| 1006 | if (pgdir == NULL) | 1011 | if (pgdir == NULL) { |
| 1007 | return 1; | 1012 | rc = 1; |
| 1013 | goto bail; | ||
| 1014 | } | ||
| 1008 | 1015 | ||
| 1009 | /* Check CPU locality */ | 1016 | /* Check CPU locality */ |
| 1010 | tmp = cpumask_of(smp_processor_id()); | 1017 | tmp = cpumask_of(smp_processor_id()); |
| @@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 1027 | ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); | 1034 | ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); |
| 1028 | if (ptep == NULL || !pte_present(*ptep)) { | 1035 | if (ptep == NULL || !pte_present(*ptep)) { |
| 1029 | DBG_LOW(" no PTE !\n"); | 1036 | DBG_LOW(" no PTE !\n"); |
| 1030 | return 1; | 1037 | rc = 1; |
| 1038 | goto bail; | ||
| 1031 | } | 1039 | } |
| 1032 | 1040 | ||
| 1033 | /* Add _PAGE_PRESENT to the required access perm */ | 1041 | /* Add _PAGE_PRESENT to the required access perm */ |
| @@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 1038 | */ | 1046 | */ |
| 1039 | if (access & ~pte_val(*ptep)) { | 1047 | if (access & ~pte_val(*ptep)) { |
| 1040 | DBG_LOW(" no access !\n"); | 1048 | DBG_LOW(" no access !\n"); |
| 1041 | return 1; | 1049 | rc = 1; |
| 1050 | goto bail; | ||
| 1042 | } | 1051 | } |
| 1043 | 1052 | ||
| 1044 | #ifdef CONFIG_HUGETLB_PAGE | 1053 | #ifdef CONFIG_HUGETLB_PAGE |
| 1045 | if (hugeshift) | 1054 | if (hugeshift) { |
| 1046 | return __hash_page_huge(ea, access, vsid, ptep, trap, local, | 1055 | rc = __hash_page_huge(ea, access, vsid, ptep, trap, local, |
| 1047 | ssize, hugeshift, psize); | 1056 | ssize, hugeshift, psize); |
| 1057 | goto bail; | ||
| 1058 | } | ||
| 1048 | #endif /* CONFIG_HUGETLB_PAGE */ | 1059 | #endif /* CONFIG_HUGETLB_PAGE */ |
| 1049 | 1060 | ||
| 1050 | #ifndef CONFIG_PPC_64K_PAGES | 1061 | #ifndef CONFIG_PPC_64K_PAGES |
| @@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 1124 | pte_val(*(ptep + PTRS_PER_PTE))); | 1135 | pte_val(*(ptep + PTRS_PER_PTE))); |
| 1125 | #endif | 1136 | #endif |
| 1126 | DBG_LOW(" -> rc=%d\n", rc); | 1137 | DBG_LOW(" -> rc=%d\n", rc); |
| 1138 | |||
| 1139 | bail: | ||
| 1140 | exception_exit(prev_state); | ||
| 1127 | return rc; | 1141 | return rc; |
| 1128 | } | 1142 | } |
| 1129 | EXPORT_SYMBOL_GPL(hash_page); | 1143 | EXPORT_SYMBOL_GPL(hash_page); |
| @@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local) | |||
| 1259 | */ | 1273 | */ |
| 1260 | void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) | 1274 | void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) |
| 1261 | { | 1275 | { |
| 1276 | enum ctx_state prev_state = exception_enter(); | ||
| 1277 | |||
| 1262 | if (user_mode(regs)) { | 1278 | if (user_mode(regs)) { |
| 1263 | #ifdef CONFIG_PPC_SUBPAGE_PROT | 1279 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
| 1264 | if (rc == -2) | 1280 | if (rc == -2) |
| @@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) | |||
| 1268 | _exception(SIGBUS, regs, BUS_ADRERR, address); | 1284 | _exception(SIGBUS, regs, BUS_ADRERR, address); |
| 1269 | } else | 1285 | } else |
| 1270 | bad_page_fault(regs, address, SIGBUS); | 1286 | bad_page_fault(regs, address, SIGBUS); |
| 1287 | |||
| 1288 | exception_exit(prev_state); | ||
| 1271 | } | 1289 | } |
| 1272 | 1290 | ||
| 1273 | long hpte_insert_repeating(unsigned long hash, unsigned long vpn, | 1291 | long hpte_insert_repeating(unsigned long hash, unsigned long vpn, |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index c2787bf779ca..a90b9c458990 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
| @@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start, | |||
| 215 | unsigned long phys) | 215 | unsigned long phys) |
| 216 | { | 216 | { |
| 217 | int mapped = htab_bolt_mapping(start, start + page_size, phys, | 217 | int mapped = htab_bolt_mapping(start, start + page_size, phys, |
| 218 | PAGE_KERNEL, mmu_vmemmap_psize, | 218 | pgprot_val(PAGE_KERNEL), |
| 219 | mmu_vmemmap_psize, | ||
| 219 | mmu_kernel_ssize); | 220 | mmu_kernel_ssize); |
| 220 | BUG_ON(mapped < 0); | 221 | BUG_ON(mapped < 0); |
| 221 | } | 222 | } |
