diff options
Diffstat (limited to 'arch/powerpc/kernel')
36 files changed, 297 insertions, 391 deletions
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 34f55524d456..86150fbb42c3 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -908,7 +908,7 @@ int fix_alignment(struct pt_regs *regs) | |||
908 | flush_fp_to_thread(current); | 908 | flush_fp_to_thread(current); |
909 | } | 909 | } |
910 | 910 | ||
911 | if ((nb == 16)) { | 911 | if (nb == 16) { |
912 | if (flags & F) { | 912 | if (flags & F) { |
913 | /* Special case for 16-byte FP loads and stores */ | 913 | /* Special case for 16-byte FP loads and stores */ |
914 | PPC_WARN_ALIGNMENT(fp_pair, regs); | 914 | PPC_WARN_ALIGNMENT(fp_pair, regs); |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 9d7dede2847c..c161ef3f28a1 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -726,12 +726,5 @@ int main(void) | |||
726 | arch.timing_last_enter.tv32.tbl)); | 726 | arch.timing_last_enter.tv32.tbl)); |
727 | #endif | 727 | #endif |
728 | 728 | ||
729 | #ifdef CONFIG_PPC_POWERNV | ||
730 | DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3)); | ||
731 | DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0)); | ||
732 | DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1)); | ||
733 | DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt)); | ||
734 | #endif | ||
735 | |||
736 | return 0; | 729 | return 0; |
737 | } | 730 | } |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index c78e6dac4d7d..cfa0f81a5bb0 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -12,7 +12,6 @@ | |||
12 | #undef DEBUG | 12 | #undef DEBUG |
13 | 13 | ||
14 | #include <linux/crash_dump.h> | 14 | #include <linux/crash_dump.h> |
15 | #include <linux/bootmem.h> | ||
16 | #include <linux/io.h> | 15 | #include <linux/io.h> |
17 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
18 | #include <asm/code-patching.h> | 17 | #include <asm/code-patching.h> |
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index d55c76c571f3..f4217819cc31 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c | |||
@@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs) | |||
41 | 41 | ||
42 | may_hard_irq_enable(); | 42 | may_hard_irq_enable(); |
43 | 43 | ||
44 | __get_cpu_var(irq_stat).doorbell_irqs++; | 44 | __this_cpu_inc(irq_stat.doorbell_irqs); |
45 | 45 | ||
46 | smp_ipi_demux(); | 46 | smp_ipi_demux(); |
47 | 47 | ||
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 2248a1999c64..e1b6d8e17289 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -143,6 +143,8 @@ static int __init eeh_setup(char *str) | |||
143 | { | 143 | { |
144 | if (!strcmp(str, "off")) | 144 | if (!strcmp(str, "off")) |
145 | eeh_add_flag(EEH_FORCE_DISABLED); | 145 | eeh_add_flag(EEH_FORCE_DISABLED); |
146 | else if (!strcmp(str, "early_log")) | ||
147 | eeh_add_flag(EEH_EARLY_DUMP_LOG); | ||
146 | 148 | ||
147 | return 1; | 149 | return 1; |
148 | } | 150 | } |
@@ -758,30 +760,41 @@ static void eeh_reset_pe_once(struct eeh_pe *pe) | |||
758 | int eeh_reset_pe(struct eeh_pe *pe) | 760 | int eeh_reset_pe(struct eeh_pe *pe) |
759 | { | 761 | { |
760 | int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); | 762 | int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); |
761 | int i, rc; | 763 | int i, state, ret; |
764 | |||
765 | /* Mark as reset and block config space */ | ||
766 | eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED); | ||
762 | 767 | ||
763 | /* Take three shots at resetting the bus */ | 768 | /* Take three shots at resetting the bus */ |
764 | for (i=0; i<3; i++) { | 769 | for (i = 0; i < 3; i++) { |
765 | eeh_reset_pe_once(pe); | 770 | eeh_reset_pe_once(pe); |
766 | 771 | ||
767 | /* | 772 | /* |
768 | * EEH_PE_ISOLATED is expected to be removed after | 773 | * EEH_PE_ISOLATED is expected to be removed after |
769 | * BAR restore. | 774 | * BAR restore. |
770 | */ | 775 | */ |
771 | rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); | 776 | state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); |
772 | if ((rc & flags) == flags) | 777 | if ((state & flags) == flags) { |
773 | return 0; | 778 | ret = 0; |
779 | goto out; | ||
780 | } | ||
774 | 781 | ||
775 | if (rc < 0) { | 782 | if (state < 0) { |
776 | pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x", | 783 | pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x", |
777 | __func__, pe->phb->global_number, pe->addr); | 784 | __func__, pe->phb->global_number, pe->addr); |
778 | return -1; | 785 | ret = -ENOTRECOVERABLE; |
786 | goto out; | ||
779 | } | 787 | } |
780 | pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n", | 788 | |
781 | i+1, pe->phb->global_number, pe->addr, rc); | 789 | /* We might run out of credits */ |
790 | ret = -EIO; | ||
791 | pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n", | ||
792 | __func__, state, pe->phb->global_number, pe->addr, (i + 1)); | ||
782 | } | 793 | } |
783 | 794 | ||
784 | return -1; | 795 | out: |
796 | eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED); | ||
797 | return ret; | ||
785 | } | 798 | } |
786 | 799 | ||
787 | /** | 800 | /** |
@@ -920,11 +933,8 @@ int eeh_init(void) | |||
920 | pr_warn("%s: Platform EEH operation not found\n", | 933 | pr_warn("%s: Platform EEH operation not found\n", |
921 | __func__); | 934 | __func__); |
922 | return -EEXIST; | 935 | return -EEXIST; |
923 | } else if ((ret = eeh_ops->init())) { | 936 | } else if ((ret = eeh_ops->init())) |
924 | pr_warn("%s: Failed to call platform init function (%d)\n", | ||
925 | __func__, ret); | ||
926 | return ret; | 937 | return ret; |
927 | } | ||
928 | 938 | ||
929 | /* Initialize EEH event */ | 939 | /* Initialize EEH event */ |
930 | ret = eeh_event_init(); | 940 | ret = eeh_event_init(); |
@@ -1209,6 +1219,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state) | |||
1209 | static struct pci_device_id eeh_reset_ids[] = { | 1219 | static struct pci_device_id eeh_reset_ids[] = { |
1210 | { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ | 1220 | { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ |
1211 | { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ | 1221 | { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ |
1222 | { PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */ | ||
1212 | { 0 } | 1223 | { 0 } |
1213 | }; | 1224 | }; |
1214 | 1225 | ||
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 6535936bdf27..b17e793ba67e 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c | |||
@@ -528,13 +528,11 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe) | |||
528 | eeh_pe_dev_traverse(pe, eeh_report_error, &result); | 528 | eeh_pe_dev_traverse(pe, eeh_report_error, &result); |
529 | 529 | ||
530 | /* Issue reset */ | 530 | /* Issue reset */ |
531 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | ||
532 | ret = eeh_reset_pe(pe); | 531 | ret = eeh_reset_pe(pe); |
533 | if (ret) { | 532 | if (ret) { |
534 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED); | 533 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING); |
535 | return ret; | 534 | return ret; |
536 | } | 535 | } |
537 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); | ||
538 | 536 | ||
539 | /* Unfreeze the PE */ | 537 | /* Unfreeze the PE */ |
540 | ret = eeh_clear_pe_frozen_state(pe, true); | 538 | ret = eeh_clear_pe_frozen_state(pe, true); |
@@ -601,19 +599,15 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) | |||
601 | * config accesses. So we prefer to block them. However, controlled | 599 | * config accesses. So we prefer to block them. However, controlled |
602 | * PCI config accesses initiated from EEH itself are allowed. | 600 | * PCI config accesses initiated from EEH itself are allowed. |
603 | */ | 601 | */ |
604 | eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED); | ||
605 | rc = eeh_reset_pe(pe); | 602 | rc = eeh_reset_pe(pe); |
606 | if (rc) { | 603 | if (rc) |
607 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); | ||
608 | return rc; | 604 | return rc; |
609 | } | ||
610 | 605 | ||
611 | pci_lock_rescan_remove(); | 606 | pci_lock_rescan_remove(); |
612 | 607 | ||
613 | /* Restore PE */ | 608 | /* Restore PE */ |
614 | eeh_ops->configure_bridge(pe); | 609 | eeh_ops->configure_bridge(pe); |
615 | eeh_pe_restore_bars(pe); | 610 | eeh_pe_restore_bars(pe); |
616 | eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED); | ||
617 | 611 | ||
618 | /* Clear frozen state */ | 612 | /* Clear frozen state */ |
619 | rc = eeh_clear_pe_frozen_state(pe, false); | 613 | rc = eeh_clear_pe_frozen_state(pe, false); |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 22b45a4955cd..10a093579191 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -1424,12 +1424,18 @@ _GLOBAL(ftrace_graph_caller) | |||
1424 | lwz r4, 44(r1) | 1424 | lwz r4, 44(r1) |
1425 | subi r4, r4, MCOUNT_INSN_SIZE | 1425 | subi r4, r4, MCOUNT_INSN_SIZE |
1426 | 1426 | ||
1427 | /* get the parent address */ | 1427 | /* Grab the LR out of the caller stack frame */ |
1428 | addi r3, r1, 52 | 1428 | lwz r3,52(r1) |
1429 | 1429 | ||
1430 | bl prepare_ftrace_return | 1430 | bl prepare_ftrace_return |
1431 | nop | 1431 | nop |
1432 | 1432 | ||
1433 | /* | ||
1434 | * prepare_ftrace_return gives us the address we divert to. | ||
1435 | * Change the LR in the callers stack frame to this. | ||
1436 | */ | ||
1437 | stw r3,52(r1) | ||
1438 | |||
1433 | MCOUNT_RESTORE_FRAME | 1439 | MCOUNT_RESTORE_FRAME |
1434 | /* old link register ends up in ctr reg */ | 1440 | /* old link register ends up in ctr reg */ |
1435 | bctr | 1441 | bctr |
@@ -1457,4 +1463,4 @@ _GLOBAL(return_to_handler) | |||
1457 | blr | 1463 | blr |
1458 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1464 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1459 | 1465 | ||
1460 | #endif /* CONFIG_MCOUNT */ | 1466 | #endif /* CONFIG_FUNCTION_TRACER */ |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 0905c8da90f1..194e46dcf08d 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -1227,13 +1227,20 @@ _GLOBAL(ftrace_graph_caller) | |||
1227 | ld r4, 128(r1) | 1227 | ld r4, 128(r1) |
1228 | subi r4, r4, MCOUNT_INSN_SIZE | 1228 | subi r4, r4, MCOUNT_INSN_SIZE |
1229 | 1229 | ||
1230 | /* get the parent address */ | 1230 | /* Grab the LR out of the caller stack frame */ |
1231 | ld r11, 112(r1) | 1231 | ld r11, 112(r1) |
1232 | addi r3, r11, 16 | 1232 | ld r3, 16(r11) |
1233 | 1233 | ||
1234 | bl prepare_ftrace_return | 1234 | bl prepare_ftrace_return |
1235 | nop | 1235 | nop |
1236 | 1236 | ||
1237 | /* | ||
1238 | * prepare_ftrace_return gives us the address we divert to. | ||
1239 | * Change the LR in the callers stack frame to this. | ||
1240 | */ | ||
1241 | ld r11, 112(r1) | ||
1242 | std r3, 16(r11) | ||
1243 | |||
1237 | ld r0, 128(r1) | 1244 | ld r0, 128(r1) |
1238 | mtlr r0 | 1245 | mtlr r0 |
1239 | addi r1, r1, 112 | 1246 | addi r1, r1, 112 |
@@ -1241,28 +1248,6 @@ _GLOBAL(ftrace_graph_caller) | |||
1241 | 1248 | ||
1242 | _GLOBAL(return_to_handler) | 1249 | _GLOBAL(return_to_handler) |
1243 | /* need to save return values */ | 1250 | /* need to save return values */ |
1244 | std r4, -24(r1) | ||
1245 | std r3, -16(r1) | ||
1246 | std r31, -8(r1) | ||
1247 | mr r31, r1 | ||
1248 | stdu r1, -112(r1) | ||
1249 | |||
1250 | bl ftrace_return_to_handler | ||
1251 | nop | ||
1252 | |||
1253 | /* return value has real return address */ | ||
1254 | mtlr r3 | ||
1255 | |||
1256 | ld r1, 0(r1) | ||
1257 | ld r4, -24(r1) | ||
1258 | ld r3, -16(r1) | ||
1259 | ld r31, -8(r1) | ||
1260 | |||
1261 | /* Jump back to real return address */ | ||
1262 | blr | ||
1263 | |||
1264 | _GLOBAL(mod_return_to_handler) | ||
1265 | /* need to save return values */ | ||
1266 | std r4, -32(r1) | 1251 | std r4, -32(r1) |
1267 | std r3, -24(r1) | 1252 | std r3, -24(r1) |
1268 | /* save TOC */ | 1253 | /* save TOC */ |
@@ -1272,7 +1257,7 @@ _GLOBAL(mod_return_to_handler) | |||
1272 | stdu r1, -112(r1) | 1257 | stdu r1, -112(r1) |
1273 | 1258 | ||
1274 | /* | 1259 | /* |
1275 | * We are in a module using the module's TOC. | 1260 | * We might be called from a module. |
1276 | * Switch to our TOC to run inside the core kernel. | 1261 | * Switch to our TOC to run inside the core kernel. |
1277 | */ | 1262 | */ |
1278 | ld r2, PACATOC(r13) | 1263 | ld r2, PACATOC(r13) |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 72e783ea0681..db08382e19f1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -131,6 +131,8 @@ BEGIN_FTR_SECTION | |||
131 | 1: | 131 | 1: |
132 | #endif | 132 | #endif |
133 | 133 | ||
134 | /* Return SRR1 from power7_nap() */ | ||
135 | mfspr r3,SPRN_SRR1 | ||
134 | beq cr1,2f | 136 | beq cr1,2f |
135 | b power7_wakeup_noloss | 137 | b power7_wakeup_noloss |
136 | 2: b power7_wakeup_loss | 138 | 2: b power7_wakeup_loss |
@@ -292,15 +294,26 @@ decrementer_pSeries: | |||
292 | . = 0xc00 | 294 | . = 0xc00 |
293 | .globl system_call_pSeries | 295 | .globl system_call_pSeries |
294 | system_call_pSeries: | 296 | system_call_pSeries: |
295 | HMT_MEDIUM | 297 | /* |
298 | * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems | ||
299 | * that support it) before changing to HMT_MEDIUM. That allows the KVM | ||
300 | * code to save that value into the guest state (it is the guest's PPR | ||
301 | * value). Otherwise just change to HMT_MEDIUM as userspace has | ||
302 | * already saved the PPR. | ||
303 | */ | ||
296 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | 304 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
297 | SET_SCRATCH0(r13) | 305 | SET_SCRATCH0(r13) |
298 | GET_PACA(r13) | 306 | GET_PACA(r13) |
299 | std r9,PACA_EXGEN+EX_R9(r13) | 307 | std r9,PACA_EXGEN+EX_R9(r13) |
308 | OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); | ||
309 | HMT_MEDIUM; | ||
300 | std r10,PACA_EXGEN+EX_R10(r13) | 310 | std r10,PACA_EXGEN+EX_R10(r13) |
311 | OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR); | ||
301 | mfcr r9 | 312 | mfcr r9 |
302 | KVMTEST(0xc00) | 313 | KVMTEST(0xc00) |
303 | GET_SCRATCH0(r13) | 314 | GET_SCRATCH0(r13) |
315 | #else | ||
316 | HMT_MEDIUM; | ||
304 | #endif | 317 | #endif |
305 | SYSCALL_PSERIES_1 | 318 | SYSCALL_PSERIES_1 |
306 | SYSCALL_PSERIES_2_RFID | 319 | SYSCALL_PSERIES_2_RFID |
@@ -1301,23 +1314,6 @@ hmi_exception_after_realmode: | |||
1301 | EXCEPTION_PROLOG_0(PACA_EXGEN) | 1314 | EXCEPTION_PROLOG_0(PACA_EXGEN) |
1302 | b hmi_exception_hv | 1315 | b hmi_exception_hv |
1303 | 1316 | ||
1304 | #ifdef CONFIG_PPC_POWERNV | ||
1305 | _GLOBAL(opal_mc_secondary_handler) | ||
1306 | HMT_MEDIUM_PPR_DISCARD | ||
1307 | SET_SCRATCH0(r13) | ||
1308 | GET_PACA(r13) | ||
1309 | clrldi r3,r3,2 | ||
1310 | tovirt(r3,r3) | ||
1311 | std r3,PACA_OPAL_MC_EVT(r13) | ||
1312 | ld r13,OPAL_MC_SRR0(r3) | ||
1313 | mtspr SPRN_SRR0,r13 | ||
1314 | ld r13,OPAL_MC_SRR1(r3) | ||
1315 | mtspr SPRN_SRR1,r13 | ||
1316 | ld r3,OPAL_MC_GPR3(r3) | ||
1317 | GET_SCRATCH0(r13) | ||
1318 | b machine_check_pSeries | ||
1319 | #endif /* CONFIG_PPC_POWERNV */ | ||
1320 | |||
1321 | 1317 | ||
1322 | #define MACHINE_CHECK_HANDLER_WINDUP \ | 1318 | #define MACHINE_CHECK_HANDLER_WINDUP \ |
1323 | /* Clear MSR_RI before setting SRR0 and SRR1. */\ | 1319 | /* Clear MSR_RI before setting SRR0 and SRR1. */\ |
@@ -1571,9 +1567,11 @@ do_hash_page: | |||
1571 | * r3 contains the faulting address | 1567 | * r3 contains the faulting address |
1572 | * r4 contains the required access permissions | 1568 | * r4 contains the required access permissions |
1573 | * r5 contains the trap number | 1569 | * r5 contains the trap number |
1570 | * r6 contains dsisr | ||
1574 | * | 1571 | * |
1575 | * at return r3 = 0 for success, 1 for page fault, negative for error | 1572 | * at return r3 = 0 for success, 1 for page fault, negative for error |
1576 | */ | 1573 | */ |
1574 | ld r6,_DSISR(r1) | ||
1577 | bl hash_page /* build HPTE if possible */ | 1575 | bl hash_page /* build HPTE if possible */ |
1578 | cmpdi r3,0 /* see if hash_page succeeded */ | 1576 | cmpdi r3,0 /* see if hash_page succeeded */ |
1579 | 1577 | ||
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index e66af6d265e8..44d4d8eb3c85 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -510,79 +510,36 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
510 | } | 510 | } |
511 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 511 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
512 | 512 | ||
513 | #ifdef CONFIG_PPC64 | ||
514 | extern void mod_return_to_handler(void); | ||
515 | #endif | ||
516 | |||
517 | /* | 513 | /* |
518 | * Hook the return address and push it in the stack of return addrs | 514 | * Hook the return address and push it in the stack of return addrs |
519 | * in current thread info. | 515 | * in current thread info. Return the address we want to divert to. |
520 | */ | 516 | */ |
521 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | 517 | unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) |
522 | { | 518 | { |
523 | unsigned long old; | ||
524 | int faulted; | ||
525 | struct ftrace_graph_ent trace; | 519 | struct ftrace_graph_ent trace; |
526 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 520 | unsigned long return_hooker; |
527 | 521 | ||
528 | if (unlikely(ftrace_graph_is_dead())) | 522 | if (unlikely(ftrace_graph_is_dead())) |
529 | return; | 523 | goto out; |
530 | 524 | ||
531 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 525 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
532 | return; | 526 | goto out; |
533 | |||
534 | #ifdef CONFIG_PPC64 | ||
535 | /* non core kernel code needs to save and restore the TOC */ | ||
536 | if (REGION_ID(self_addr) != KERNEL_REGION_ID) | ||
537 | return_hooker = (unsigned long)&mod_return_to_handler; | ||
538 | #endif | ||
539 | |||
540 | return_hooker = ppc_function_entry((void *)return_hooker); | ||
541 | 527 | ||
542 | /* | 528 | return_hooker = ppc_function_entry(return_to_handler); |
543 | * Protect against fault, even if it shouldn't | ||
544 | * happen. This tool is too much intrusive to | ||
545 | * ignore such a protection. | ||
546 | */ | ||
547 | asm volatile( | ||
548 | "1: " PPC_LL "%[old], 0(%[parent])\n" | ||
549 | "2: " PPC_STL "%[return_hooker], 0(%[parent])\n" | ||
550 | " li %[faulted], 0\n" | ||
551 | "3:\n" | ||
552 | |||
553 | ".section .fixup, \"ax\"\n" | ||
554 | "4: li %[faulted], 1\n" | ||
555 | " b 3b\n" | ||
556 | ".previous\n" | ||
557 | |||
558 | ".section __ex_table,\"a\"\n" | ||
559 | PPC_LONG_ALIGN "\n" | ||
560 | PPC_LONG "1b,4b\n" | ||
561 | PPC_LONG "2b,4b\n" | ||
562 | ".previous" | ||
563 | |||
564 | : [old] "=&r" (old), [faulted] "=r" (faulted) | ||
565 | : [parent] "r" (parent), [return_hooker] "r" (return_hooker) | ||
566 | : "memory" | ||
567 | ); | ||
568 | |||
569 | if (unlikely(faulted)) { | ||
570 | ftrace_graph_stop(); | ||
571 | WARN_ON(1); | ||
572 | return; | ||
573 | } | ||
574 | 529 | ||
575 | trace.func = self_addr; | 530 | trace.func = ip; |
576 | trace.depth = current->curr_ret_stack + 1; | 531 | trace.depth = current->curr_ret_stack + 1; |
577 | 532 | ||
578 | /* Only trace if the calling function expects to */ | 533 | /* Only trace if the calling function expects to */ |
579 | if (!ftrace_graph_entry(&trace)) { | 534 | if (!ftrace_graph_entry(&trace)) |
580 | *parent = old; | 535 | goto out; |
581 | return; | 536 | |
582 | } | 537 | if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) |
538 | goto out; | ||
583 | 539 | ||
584 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) | 540 | parent = return_hooker; |
585 | *parent = old; | 541 | out: |
542 | return parent; | ||
586 | } | 543 | } |
587 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 544 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
588 | 545 | ||
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index fafff8dbd5d9..d99aac0d69f1 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S | |||
@@ -33,13 +33,31 @@ | |||
33 | 33 | ||
34 | /* Macro to make the code more readable. */ | 34 | /* Macro to make the code more readable. */ |
35 | #ifdef CONFIG_8xx_CPU6 | 35 | #ifdef CONFIG_8xx_CPU6 |
36 | #define DO_8xx_CPU6(val, reg) \ | 36 | #define SPRN_MI_TWC_ADDR 0x2b80 |
37 | li reg, val; \ | 37 | #define SPRN_MI_RPN_ADDR 0x2d80 |
38 | stw reg, 12(r0); \ | 38 | #define SPRN_MD_TWC_ADDR 0x3b80 |
39 | lwz reg, 12(r0); | 39 | #define SPRN_MD_RPN_ADDR 0x3d80 |
40 | |||
41 | #define MTSPR_CPU6(spr, reg, treg) \ | ||
42 | li treg, spr##_ADDR; \ | ||
43 | stw treg, 12(r0); \ | ||
44 | lwz treg, 12(r0); \ | ||
45 | mtspr spr, reg | ||
40 | #else | 46 | #else |
41 | #define DO_8xx_CPU6(val, reg) | 47 | #define MTSPR_CPU6(spr, reg, treg) \ |
48 | mtspr spr, reg | ||
42 | #endif | 49 | #endif |
50 | |||
51 | /* | ||
52 | * Value for the bits that have fixed value in RPN entries. | ||
53 | * Also used for tagging DAR for DTLBerror. | ||
54 | */ | ||
55 | #ifdef CONFIG_PPC_16K_PAGES | ||
56 | #define RPN_PATTERN (0x00f0 | MD_SPS16K) | ||
57 | #else | ||
58 | #define RPN_PATTERN 0x00f0 | ||
59 | #endif | ||
60 | |||
43 | __HEAD | 61 | __HEAD |
44 | _ENTRY(_stext); | 62 | _ENTRY(_stext); |
45 | _ENTRY(_start); | 63 | _ENTRY(_start); |
@@ -65,13 +83,6 @@ _ENTRY(_start); | |||
65 | * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to | 83 | * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to |
66 | * the "internal" processor registers before MMU_init is called. | 84 | * the "internal" processor registers before MMU_init is called. |
67 | * | 85 | * |
68 | * The TLB code currently contains a major hack. Since I use the condition | ||
69 | * code register, I have to save and restore it. I am out of registers, so | ||
70 | * I just store it in memory location 0 (the TLB handlers are not reentrant). | ||
71 | * To avoid making any decisions, I need to use the "segment" valid bit | ||
72 | * in the first level table, but that would require many changes to the | ||
73 | * Linux page directory/table functions that I don't want to do right now. | ||
74 | * | ||
75 | * -- Dan | 86 | * -- Dan |
76 | */ | 87 | */ |
77 | .globl __start | 88 | .globl __start |
@@ -211,7 +222,7 @@ MachineCheck: | |||
211 | EXCEPTION_PROLOG | 222 | EXCEPTION_PROLOG |
212 | mfspr r4,SPRN_DAR | 223 | mfspr r4,SPRN_DAR |
213 | stw r4,_DAR(r11) | 224 | stw r4,_DAR(r11) |
214 | li r5,0x00f0 | 225 | li r5,RPN_PATTERN |
215 | mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ | 226 | mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ |
216 | mfspr r5,SPRN_DSISR | 227 | mfspr r5,SPRN_DSISR |
217 | stw r5,_DSISR(r11) | 228 | stw r5,_DSISR(r11) |
@@ -219,30 +230,16 @@ MachineCheck: | |||
219 | EXC_XFER_STD(0x200, machine_check_exception) | 230 | EXC_XFER_STD(0x200, machine_check_exception) |
220 | 231 | ||
221 | /* Data access exception. | 232 | /* Data access exception. |
222 | * This is "never generated" by the MPC8xx. We jump to it for other | 233 | * This is "never generated" by the MPC8xx. |
223 | * translation errors. | ||
224 | */ | 234 | */ |
225 | . = 0x300 | 235 | . = 0x300 |
226 | DataAccess: | 236 | DataAccess: |
227 | EXCEPTION_PROLOG | ||
228 | mfspr r10,SPRN_DSISR | ||
229 | stw r10,_DSISR(r11) | ||
230 | mr r5,r10 | ||
231 | mfspr r4,SPRN_DAR | ||
232 | li r10,0x00f0 | ||
233 | mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ | ||
234 | EXC_XFER_LITE(0x300, handle_page_fault) | ||
235 | 237 | ||
236 | /* Instruction access exception. | 238 | /* Instruction access exception. |
237 | * This is "never generated" by the MPC8xx. We jump to it for other | 239 | * This is "never generated" by the MPC8xx. |
238 | * translation errors. | ||
239 | */ | 240 | */ |
240 | . = 0x400 | 241 | . = 0x400 |
241 | InstructionAccess: | 242 | InstructionAccess: |
242 | EXCEPTION_PROLOG | ||
243 | mr r4,r12 | ||
244 | mr r5,r9 | ||
245 | EXC_XFER_LITE(0x400, handle_page_fault) | ||
246 | 243 | ||
247 | /* External interrupt */ | 244 | /* External interrupt */ |
248 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) | 245 | EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) |
@@ -253,7 +250,7 @@ Alignment: | |||
253 | EXCEPTION_PROLOG | 250 | EXCEPTION_PROLOG |
254 | mfspr r4,SPRN_DAR | 251 | mfspr r4,SPRN_DAR |
255 | stw r4,_DAR(r11) | 252 | stw r4,_DAR(r11) |
256 | li r5,0x00f0 | 253 | li r5,RPN_PATTERN |
257 | mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ | 254 | mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ |
258 | mfspr r5,SPRN_DSISR | 255 | mfspr r5,SPRN_DSISR |
259 | stw r5,_DSISR(r11) | 256 | stw r5,_DSISR(r11) |
@@ -292,8 +289,8 @@ SystemCall: | |||
292 | . = 0x1100 | 289 | . = 0x1100 |
293 | /* | 290 | /* |
294 | * For the MPC8xx, this is a software tablewalk to load the instruction | 291 | * For the MPC8xx, this is a software tablewalk to load the instruction |
295 | * TLB. It is modelled after the example in the Motorola manual. The task | 292 | * TLB. The task switch loads the M_TW register with the pointer to the first |
296 | * switch loads the M_TWB register with the pointer to the first level table. | 293 | * level table. |
297 | * If we discover there is no second level table (value is zero) or if there | 294 | * If we discover there is no second level table (value is zero) or if there |
298 | * is an invalid pte, we load that into the TLB, which causes another fault | 295 | * is an invalid pte, we load that into the TLB, which causes another fault |
299 | * into the TLB Error interrupt where we can handle such problems. | 296 | * into the TLB Error interrupt where we can handle such problems. |
@@ -302,20 +299,17 @@ SystemCall: | |||
302 | */ | 299 | */ |
303 | InstructionTLBMiss: | 300 | InstructionTLBMiss: |
304 | #ifdef CONFIG_8xx_CPU6 | 301 | #ifdef CONFIG_8xx_CPU6 |
305 | stw r3, 8(r0) | 302 | mtspr SPRN_DAR, r3 |
306 | #endif | 303 | #endif |
307 | EXCEPTION_PROLOG_0 | 304 | EXCEPTION_PROLOG_0 |
308 | mtspr SPRN_SPRG_SCRATCH2, r10 | 305 | mtspr SPRN_SPRG_SCRATCH2, r10 |
309 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ | 306 | mfspr r10, SPRN_SRR0 /* Get effective address of fault */ |
310 | #ifdef CONFIG_8xx_CPU15 | 307 | #ifdef CONFIG_8xx_CPU15 |
311 | addi r11, r10, 0x1000 | 308 | addi r11, r10, PAGE_SIZE |
312 | tlbie r11 | 309 | tlbie r11 |
313 | addi r11, r10, -0x1000 | 310 | addi r11, r10, -PAGE_SIZE |
314 | tlbie r11 | 311 | tlbie r11 |
315 | #endif | 312 | #endif |
316 | DO_8xx_CPU6(0x3780, r3) | ||
317 | mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */ | ||
318 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | ||
319 | 313 | ||
320 | /* If we are faulting a kernel address, we have to use the | 314 | /* If we are faulting a kernel address, we have to use the |
321 | * kernel page tables. | 315 | * kernel page tables. |
@@ -323,32 +317,37 @@ InstructionTLBMiss: | |||
323 | #ifdef CONFIG_MODULES | 317 | #ifdef CONFIG_MODULES |
324 | /* Only modules will cause ITLB Misses as we always | 318 | /* Only modules will cause ITLB Misses as we always |
325 | * pin the first 8MB of kernel memory */ | 319 | * pin the first 8MB of kernel memory */ |
326 | andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ | 320 | andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ |
321 | #endif | ||
322 | mfspr r11, SPRN_M_TW /* Get level 1 table base address */ | ||
323 | #ifdef CONFIG_MODULES | ||
327 | beq 3f | 324 | beq 3f |
328 | lis r11, swapper_pg_dir@h | 325 | lis r11, (swapper_pg_dir-PAGE_OFFSET)@h |
329 | ori r11, r11, swapper_pg_dir@l | 326 | ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l |
330 | rlwimi r10, r11, 0, 2, 19 | ||
331 | 3: | 327 | 3: |
332 | #endif | 328 | #endif |
333 | lwz r11, 0(r10) /* Get the level 1 entry */ | 329 | /* Extract level 1 index */ |
330 | rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 | ||
331 | lwzx r11, r10, r11 /* Get the level 1 entry */ | ||
334 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | 332 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ |
335 | beq 2f /* If zero, don't try to find a pte */ | 333 | beq 2f /* If zero, don't try to find a pte */ |
336 | 334 | ||
337 | /* We have a pte table, so load the MI_TWC with the attributes | 335 | /* We have a pte table, so load the MI_TWC with the attributes |
338 | * for this "segment." | 336 | * for this "segment." |
339 | */ | 337 | */ |
340 | ori r11,r11,1 /* Set valid bit */ | 338 | MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */ |
341 | DO_8xx_CPU6(0x2b80, r3) | 339 | mfspr r11, SPRN_SRR0 /* Get effective address of fault */ |
342 | mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ | 340 | /* Extract level 2 index */ |
343 | DO_8xx_CPU6(0x3b80, r3) | 341 | rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 |
344 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | 342 | lwzx r10, r10, r11 /* Get the pte */ |
345 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | ||
346 | lwz r10, 0(r11) /* Get the pte */ | ||
347 | 343 | ||
348 | #ifdef CONFIG_SWAP | 344 | #ifdef CONFIG_SWAP |
349 | andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT | 345 | andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT |
350 | cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT | 346 | cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT |
347 | li r11, RPN_PATTERN | ||
351 | bne- cr0, 2f | 348 | bne- cr0, 2f |
349 | #else | ||
350 | li r11, RPN_PATTERN | ||
352 | #endif | 351 | #endif |
353 | /* The Linux PTE won't go exactly into the MMU TLB. | 352 | /* The Linux PTE won't go exactly into the MMU TLB. |
354 | * Software indicator bits 21 and 28 must be clear. | 353 | * Software indicator bits 21 and 28 must be clear. |
@@ -356,62 +355,63 @@ InstructionTLBMiss: | |||
356 | * set. All other Linux PTE bits control the behavior | 355 | * set. All other Linux PTE bits control the behavior |
357 | * of the MMU. | 356 | * of the MMU. |
358 | */ | 357 | */ |
359 | li r11, 0x00f0 | ||
360 | rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */ | 358 | rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */ |
361 | DO_8xx_CPU6(0x2d80, r3) | 359 | MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */ |
362 | mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ | ||
363 | 360 | ||
364 | /* Restore registers */ | 361 | /* Restore registers */ |
365 | #ifdef CONFIG_8xx_CPU6 | 362 | #ifdef CONFIG_8xx_CPU6 |
366 | lwz r3, 8(r0) | 363 | mfspr r3, SPRN_DAR |
364 | mtspr SPRN_DAR, r11 /* Tag DAR */ | ||
367 | #endif | 365 | #endif |
368 | mfspr r10, SPRN_SPRG_SCRATCH2 | 366 | mfspr r10, SPRN_SPRG_SCRATCH2 |
369 | EXCEPTION_EPILOG_0 | 367 | EXCEPTION_EPILOG_0 |
370 | rfi | 368 | rfi |
371 | 2: | 369 | 2: |
372 | mfspr r11, SPRN_SRR1 | 370 | mfspr r10, SPRN_SRR1 |
373 | /* clear all error bits as TLB Miss | 371 | /* clear all error bits as TLB Miss |
374 | * sets a few unconditionally | 372 | * sets a few unconditionally |
375 | */ | 373 | */ |
376 | rlwinm r11, r11, 0, 0xffff | 374 | rlwinm r10, r10, 0, 0xffff |
377 | mtspr SPRN_SRR1, r11 | 375 | mtspr SPRN_SRR1, r10 |
378 | 376 | ||
379 | /* Restore registers */ | 377 | /* Restore registers */ |
380 | #ifdef CONFIG_8xx_CPU6 | 378 | #ifdef CONFIG_8xx_CPU6 |
381 | lwz r3, 8(r0) | 379 | mfspr r3, SPRN_DAR |
380 | mtspr SPRN_DAR, r11 /* Tag DAR */ | ||
382 | #endif | 381 | #endif |
383 | mfspr r10, SPRN_SPRG_SCRATCH2 | 382 | mfspr r10, SPRN_SPRG_SCRATCH2 |
384 | EXCEPTION_EPILOG_0 | 383 | b InstructionTLBError1 |
385 | b InstructionAccess | ||
386 | 384 | ||
387 | . = 0x1200 | 385 | . = 0x1200 |
388 | DataStoreTLBMiss: | 386 | DataStoreTLBMiss: |
389 | #ifdef CONFIG_8xx_CPU6 | 387 | #ifdef CONFIG_8xx_CPU6 |
390 | stw r3, 8(r0) | 388 | mtspr SPRN_DAR, r3 |
391 | #endif | 389 | #endif |
392 | EXCEPTION_PROLOG_0 | 390 | EXCEPTION_PROLOG_0 |
393 | mtspr SPRN_SPRG_SCRATCH2, r10 | 391 | mtspr SPRN_SPRG_SCRATCH2, r10 |
394 | mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ | 392 | mfspr r10, SPRN_MD_EPN |
395 | 393 | ||
396 | /* If we are faulting a kernel address, we have to use the | 394 | /* If we are faulting a kernel address, we have to use the |
397 | * kernel page tables. | 395 | * kernel page tables. |
398 | */ | 396 | */ |
399 | andi. r11, r10, 0x0800 | 397 | andis. r11, r10, 0x8000 |
398 | mfspr r11, SPRN_M_TW /* Get level 1 table base address */ | ||
400 | beq 3f | 399 | beq 3f |
401 | lis r11, swapper_pg_dir@h | 400 | lis r11, (swapper_pg_dir-PAGE_OFFSET)@h |
402 | ori r11, r11, swapper_pg_dir@l | 401 | ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l |
403 | rlwimi r10, r11, 0, 2, 19 | ||
404 | 3: | 402 | 3: |
405 | lwz r11, 0(r10) /* Get the level 1 entry */ | 403 | /* Extract level 1 index */ |
404 | rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 | ||
405 | lwzx r11, r10, r11 /* Get the level 1 entry */ | ||
406 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ | 406 | rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ |
407 | beq 2f /* If zero, don't try to find a pte */ | 407 | beq 2f /* If zero, don't try to find a pte */ |
408 | 408 | ||
409 | /* We have a pte table, so load fetch the pte from the table. | 409 | /* We have a pte table, so load fetch the pte from the table. |
410 | */ | 410 | */ |
411 | ori r11, r11, 1 /* Set valid bit in physical L2 page */ | 411 | mfspr r10, SPRN_MD_EPN /* Get address of fault */ |
412 | DO_8xx_CPU6(0x3b80, r3) | 412 | /* Extract level 2 index */ |
413 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | 413 | rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 |
414 | mfspr r10, SPRN_MD_TWC /* ....and get the pte address */ | 414 | rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */ |
415 | lwz r10, 0(r10) /* Get the pte */ | 415 | lwz r10, 0(r10) /* Get the pte */ |
416 | 416 | ||
417 | /* Insert the Guarded flag into the TWC from the Linux PTE. | 417 | /* Insert the Guarded flag into the TWC from the Linux PTE. |
@@ -425,8 +425,7 @@ DataStoreTLBMiss: | |||
425 | * It is bit 25 in the Linux PTE and bit 30 in the TWC | 425 | * It is bit 25 in the Linux PTE and bit 30 in the TWC |
426 | */ | 426 | */ |
427 | rlwimi r11, r10, 32-5, 30, 30 | 427 | rlwimi r11, r10, 32-5, 30, 30 |
428 | DO_8xx_CPU6(0x3b80, r3) | 428 | MTSPR_CPU6(SPRN_MD_TWC, r11, r3) |
429 | mtspr SPRN_MD_TWC, r11 | ||
430 | 429 | ||
431 | /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. | 430 | /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. |
432 | * We also need to know if the insn is a load/store, so: | 431 | * We also need to know if the insn is a load/store, so: |
@@ -442,14 +441,8 @@ DataStoreTLBMiss: | |||
442 | and r11, r11, r10 | 441 | and r11, r11, r10 |
443 | rlwimi r10, r11, 0, _PAGE_PRESENT | 442 | rlwimi r10, r11, 0, _PAGE_PRESENT |
444 | #endif | 443 | #endif |
445 | /* Honour kernel RO, User NA */ | 444 | /* invert RW */ |
446 | /* 0x200 == Extended encoding, bit 22 */ | 445 | xori r10, r10, _PAGE_RW |
447 | rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */ | ||
448 | /* r11 = (r10 & _PAGE_RW) >> 1 */ | ||
449 | rlwinm r11, r10, 32-1, 0x200 | ||
450 | or r10, r11, r10 | ||
451 | /* invert RW and 0x200 bits */ | ||
452 | xori r10, r10, _PAGE_RW | 0x200 | ||
453 | 446 | ||
454 | /* The Linux PTE won't go exactly into the MMU TLB. | 447 | /* The Linux PTE won't go exactly into the MMU TLB. |
455 | * Software indicator bits 22 and 28 must be clear. | 448 | * Software indicator bits 22 and 28 must be clear. |
@@ -457,14 +450,13 @@ DataStoreTLBMiss: | |||
457 | * set. All other Linux PTE bits control the behavior | 450 | * set. All other Linux PTE bits control the behavior |
458 | * of the MMU. | 451 | * of the MMU. |
459 | */ | 452 | */ |
460 | 2: li r11, 0x00f0 | 453 | 2: li r11, RPN_PATTERN |
461 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ | 454 | rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ |
462 | DO_8xx_CPU6(0x3d80, r3) | 455 | MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */ |
463 | mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ | ||
464 | 456 | ||
465 | /* Restore registers */ | 457 | /* Restore registers */ |
466 | #ifdef CONFIG_8xx_CPU6 | 458 | #ifdef CONFIG_8xx_CPU6 |
467 | lwz r3, 8(r0) | 459 | mfspr r3, SPRN_DAR |
468 | #endif | 460 | #endif |
469 | mtspr SPRN_DAR, r11 /* Tag DAR */ | 461 | mtspr SPRN_DAR, r11 /* Tag DAR */ |
470 | mfspr r10, SPRN_SPRG_SCRATCH2 | 462 | mfspr r10, SPRN_SPRG_SCRATCH2 |
@@ -477,7 +469,17 @@ DataStoreTLBMiss: | |||
477 | */ | 469 | */ |
478 | . = 0x1300 | 470 | . = 0x1300 |
479 | InstructionTLBError: | 471 | InstructionTLBError: |
480 | b InstructionAccess | 472 | EXCEPTION_PROLOG_0 |
473 | InstructionTLBError1: | ||
474 | EXCEPTION_PROLOG_1 | ||
475 | EXCEPTION_PROLOG_2 | ||
476 | mr r4,r12 | ||
477 | mr r5,r9 | ||
478 | andis. r10,r5,0x4000 | ||
479 | beq+ 1f | ||
480 | tlbie r4 | ||
481 | /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */ | ||
482 | 1: EXC_XFER_LITE(0x400, handle_page_fault) | ||
481 | 483 | ||
482 | /* This is the data TLB error on the MPC8xx. This could be due to | 484 | /* This is the data TLB error on the MPC8xx. This could be due to |
483 | * many reasons, including a dirty update to a pte. We bail out to | 485 | * many reasons, including a dirty update to a pte. We bail out to |
@@ -488,11 +490,21 @@ DataTLBError: | |||
488 | EXCEPTION_PROLOG_0 | 490 | EXCEPTION_PROLOG_0 |
489 | 491 | ||
490 | mfspr r11, SPRN_DAR | 492 | mfspr r11, SPRN_DAR |
491 | cmpwi cr0, r11, 0x00f0 | 493 | cmpwi cr0, r11, RPN_PATTERN |
492 | beq- FixupDAR /* must be a buggy dcbX, icbi insn. */ | 494 | beq- FixupDAR /* must be a buggy dcbX, icbi insn. */ |
493 | DARFixed:/* Return from dcbx instruction bug workaround */ | 495 | DARFixed:/* Return from dcbx instruction bug workaround */ |
494 | EXCEPTION_EPILOG_0 | 496 | EXCEPTION_PROLOG_1 |
495 | b DataAccess | 497 | EXCEPTION_PROLOG_2 |
498 | mfspr r5,SPRN_DSISR | ||
499 | stw r5,_DSISR(r11) | ||
500 | mfspr r4,SPRN_DAR | ||
501 | andis. r10,r5,0x4000 | ||
502 | beq+ 1f | ||
503 | tlbie r4 | ||
504 | 1: li r10,RPN_PATTERN | ||
505 | mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ | ||
506 | /* 0x300 is DataAccess exception, needed by bad_page_fault() */ | ||
507 | EXC_XFER_LITE(0x300, handle_page_fault) | ||
496 | 508 | ||
497 | EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) | 509 | EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) |
498 | EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) | 510 | EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) |
@@ -521,29 +533,30 @@ DARFixed:/* Return from dcbx instruction bug workaround */ | |||
521 | #define NO_SELF_MODIFYING_CODE | 533 | #define NO_SELF_MODIFYING_CODE |
522 | FixupDAR:/* Entry point for dcbx workaround. */ | 534 | FixupDAR:/* Entry point for dcbx workaround. */ |
523 | #ifdef CONFIG_8xx_CPU6 | 535 | #ifdef CONFIG_8xx_CPU6 |
524 | stw r3, 8(r0) | 536 | mtspr SPRN_DAR, r3 |
525 | #endif | 537 | #endif |
526 | mtspr SPRN_SPRG_SCRATCH2, r10 | 538 | mtspr SPRN_SPRG_SCRATCH2, r10 |
527 | /* fetch instruction from memory. */ | 539 | /* fetch instruction from memory. */ |
528 | mfspr r10, SPRN_SRR0 | 540 | mfspr r10, SPRN_SRR0 |
529 | andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ | 541 | andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ |
530 | DO_8xx_CPU6(0x3780, r3) | 542 | mfspr r11, SPRN_M_TW /* Get level 1 table base address */ |
531 | mtspr SPRN_MD_EPN, r10 | ||
532 | mfspr r11, SPRN_M_TWB /* Get level 1 table entry address */ | ||
533 | beq- 3f /* Branch if user space */ | 543 | beq- 3f /* Branch if user space */ |
534 | lis r11, (swapper_pg_dir-PAGE_OFFSET)@h | 544 | lis r11, (swapper_pg_dir-PAGE_OFFSET)@h |
535 | ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l | 545 | ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l |
536 | rlwimi r11, r10, 32-20, 0xffc /* r11 = r11&~0xffc|(r10>>20)&0xffc */ | 546 | /* Extract level 1 index */ |
537 | 3: lwz r11, 0(r11) /* Get the level 1 entry */ | 547 | 3: rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 |
538 | DO_8xx_CPU6(0x3b80, r3) | 548 | lwzx r11, r10, r11 /* Get the level 1 entry */ |
539 | mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ | 549 | rlwinm r10, r11,0,0,19 /* Extract page descriptor page address */ |
540 | mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ | 550 | mfspr r11, SPRN_SRR0 /* Get effective address of fault */ |
541 | lwz r11, 0(r11) /* Get the pte */ | 551 | /* Extract level 2 index */ |
552 | rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29 | ||
553 | lwzx r11, r10, r11 /* Get the pte */ | ||
542 | #ifdef CONFIG_8xx_CPU6 | 554 | #ifdef CONFIG_8xx_CPU6 |
543 | lwz r3, 8(r0) /* restore r3 from memory */ | 555 | mfspr r3, SPRN_DAR |
544 | #endif | 556 | #endif |
545 | /* concat physical page address(r11) and page offset(r10) */ | 557 | /* concat physical page address(r11) and page offset(r10) */ |
546 | rlwimi r11, r10, 0, 20, 31 | 558 | mfspr r10, SPRN_SRR0 |
559 | rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31 | ||
547 | lwz r11,0(r11) | 560 | lwz r11,0(r11) |
548 | /* Check if it really is a dcbx instruction. */ | 561 | /* Check if it really is a dcbx instruction. */ |
549 | /* dcbt and dcbtst does not generate DTLB Misses/Errors, | 562 | /* dcbt and dcbtst does not generate DTLB Misses/Errors, |
@@ -698,11 +711,11 @@ start_here: | |||
698 | #ifdef CONFIG_8xx_CPU6 | 711 | #ifdef CONFIG_8xx_CPU6 |
699 | lis r4, cpu6_errata_word@h | 712 | lis r4, cpu6_errata_word@h |
700 | ori r4, r4, cpu6_errata_word@l | 713 | ori r4, r4, cpu6_errata_word@l |
701 | li r3, 0x3980 | 714 | li r3, 0x3f80 |
702 | stw r3, 12(r4) | 715 | stw r3, 12(r4) |
703 | lwz r3, 12(r4) | 716 | lwz r3, 12(r4) |
704 | #endif | 717 | #endif |
705 | mtspr SPRN_M_TWB, r6 | 718 | mtspr SPRN_M_TW, r6 |
706 | lis r4,2f@h | 719 | lis r4,2f@h |
707 | ori r4,r4,2f@l | 720 | ori r4,r4,2f@l |
708 | tophys(r4,r4) | 721 | tophys(r4,r4) |
@@ -876,10 +889,10 @@ _GLOBAL(set_context) | |||
876 | lis r6, cpu6_errata_word@h | 889 | lis r6, cpu6_errata_word@h |
877 | ori r6, r6, cpu6_errata_word@l | 890 | ori r6, r6, cpu6_errata_word@l |
878 | tophys (r4, r4) | 891 | tophys (r4, r4) |
879 | li r7, 0x3980 | 892 | li r7, 0x3f80 |
880 | stw r7, 12(r6) | 893 | stw r7, 12(r6) |
881 | lwz r7, 12(r6) | 894 | lwz r7, 12(r6) |
882 | mtspr SPRN_M_TWB, r4 /* Update MMU base address */ | 895 | mtspr SPRN_M_TW, r4 /* Update MMU base address */ |
883 | li r7, 0x3380 | 896 | li r7, 0x3380 |
884 | stw r7, 12(r6) | 897 | stw r7, 12(r6) |
885 | lwz r7, 12(r6) | 898 | lwz r7, 12(r6) |
@@ -887,7 +900,7 @@ _GLOBAL(set_context) | |||
887 | #else | 900 | #else |
888 | mtspr SPRN_M_CASID,r3 /* Update context */ | 901 | mtspr SPRN_M_CASID,r3 /* Update context */ |
889 | tophys (r4, r4) | 902 | tophys (r4, r4) |
890 | mtspr SPRN_M_TWB, r4 /* and pgd */ | 903 | mtspr SPRN_M_TW, r4 /* and pgd */ |
891 | #endif | 904 | #endif |
892 | SYNC | 905 | SYNC |
893 | blr | 906 | blr |
@@ -919,12 +932,13 @@ set_dec_cpu6: | |||
919 | .globl sdata | 932 | .globl sdata |
920 | sdata: | 933 | sdata: |
921 | .globl empty_zero_page | 934 | .globl empty_zero_page |
935 | .align PAGE_SHIFT | ||
922 | empty_zero_page: | 936 | empty_zero_page: |
923 | .space 4096 | 937 | .space PAGE_SIZE |
924 | 938 | ||
925 | .globl swapper_pg_dir | 939 | .globl swapper_pg_dir |
926 | swapper_pg_dir: | 940 | swapper_pg_dir: |
927 | .space 4096 | 941 | .space PGD_TABLE_SIZE |
928 | 942 | ||
929 | /* Room for two PTE table poiners, usually the kernel and current user | 943 | /* Room for two PTE table poiners, usually the kernel and current user |
930 | * pointer to their respective root page table (pgdir). | 944 | * pointer to their respective root page table (pgdir). |
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 1f7d84e2e8b2..05e804cdecaa 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c | |||
@@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type) | |||
63 | int arch_install_hw_breakpoint(struct perf_event *bp) | 63 | int arch_install_hw_breakpoint(struct perf_event *bp) |
64 | { | 64 | { |
65 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | 65 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); |
66 | struct perf_event **slot = &__get_cpu_var(bp_per_reg); | 66 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg); |
67 | 67 | ||
68 | *slot = bp; | 68 | *slot = bp; |
69 | 69 | ||
@@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp) | |||
88 | */ | 88 | */ |
89 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | 89 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) |
90 | { | 90 | { |
91 | struct perf_event **slot = &__get_cpu_var(bp_per_reg); | 91 | struct perf_event **slot = this_cpu_ptr(&bp_per_reg); |
92 | 92 | ||
93 | if (*slot != bp) { | 93 | if (*slot != bp) { |
94 | WARN_ONCE(1, "Can't find the breakpoint"); | 94 | WARN_ONCE(1, "Can't find the breakpoint"); |
@@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
226 | */ | 226 | */ |
227 | rcu_read_lock(); | 227 | rcu_read_lock(); |
228 | 228 | ||
229 | bp = __get_cpu_var(bp_per_reg); | 229 | bp = __this_cpu_read(bp_per_reg); |
230 | if (!bp) | 230 | if (!bp) |
231 | goto out; | 231 | goto out; |
232 | info = counter_arch_bp(bp); | 232 | info = counter_arch_bp(bp); |
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index c0754bbf8118..18c0687e5ab3 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -212,6 +212,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | |||
212 | mtspr SPRN_SRR0,r5 | 212 | mtspr SPRN_SRR0,r5 |
213 | rfid | 213 | rfid |
214 | 214 | ||
215 | /* | ||
216 | * R3 here contains the value that will be returned to the caller | ||
217 | * of power7_nap. | ||
218 | */ | ||
215 | _GLOBAL(power7_wakeup_loss) | 219 | _GLOBAL(power7_wakeup_loss) |
216 | ld r1,PACAR1(r13) | 220 | ld r1,PACAR1(r13) |
217 | BEGIN_FTR_SECTION | 221 | BEGIN_FTR_SECTION |
@@ -219,15 +223,19 @@ BEGIN_FTR_SECTION | |||
219 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) | 223 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) |
220 | REST_NVGPRS(r1) | 224 | REST_NVGPRS(r1) |
221 | REST_GPR(2, r1) | 225 | REST_GPR(2, r1) |
222 | ld r3,_CCR(r1) | 226 | ld r6,_CCR(r1) |
223 | ld r4,_MSR(r1) | 227 | ld r4,_MSR(r1) |
224 | ld r5,_NIP(r1) | 228 | ld r5,_NIP(r1) |
225 | addi r1,r1,INT_FRAME_SIZE | 229 | addi r1,r1,INT_FRAME_SIZE |
226 | mtcr r3 | 230 | mtcr r6 |
227 | mtspr SPRN_SRR1,r4 | 231 | mtspr SPRN_SRR1,r4 |
228 | mtspr SPRN_SRR0,r5 | 232 | mtspr SPRN_SRR0,r5 |
229 | rfid | 233 | rfid |
230 | 234 | ||
235 | /* | ||
236 | * R3 here contains the value that will be returned to the caller | ||
237 | * of power7_nap. | ||
238 | */ | ||
231 | _GLOBAL(power7_wakeup_noloss) | 239 | _GLOBAL(power7_wakeup_noloss) |
232 | lbz r0,PACA_NAPSTATELOST(r13) | 240 | lbz r0,PACA_NAPSTATELOST(r13) |
233 | cmpwi r0,0 | 241 | cmpwi r0,0 |
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index a83cf5ef6488..5d3968c4d799 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev, | |||
208 | * We don't need to disable preemption here because any CPU can | 208 | * We don't need to disable preemption here because any CPU can |
209 | * safely use any IOMMU pool. | 209 | * safely use any IOMMU pool. |
210 | */ | 210 | */ |
211 | pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); | 211 | pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); |
212 | 212 | ||
213 | if (largealloc) | 213 | if (largealloc) |
214 | pool = &(tbl->large_pool); | 214 | pool = &(tbl->large_pool); |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index c14383575fe8..45096033d37b 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -50,7 +50,6 @@ | |||
50 | #include <linux/list.h> | 50 | #include <linux/list.h> |
51 | #include <linux/radix-tree.h> | 51 | #include <linux/radix-tree.h> |
52 | #include <linux/mutex.h> | 52 | #include <linux/mutex.h> |
53 | #include <linux/bootmem.h> | ||
54 | #include <linux/pci.h> | 53 | #include <linux/pci.h> |
55 | #include <linux/debugfs.h> | 54 | #include <linux/debugfs.h> |
56 | #include <linux/of.h> | 55 | #include <linux/of.h> |
@@ -114,7 +113,7 @@ static inline notrace void set_soft_enabled(unsigned long enable) | |||
114 | static inline notrace int decrementer_check_overflow(void) | 113 | static inline notrace int decrementer_check_overflow(void) |
115 | { | 114 | { |
116 | u64 now = get_tb_or_rtc(); | 115 | u64 now = get_tb_or_rtc(); |
117 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 116 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); |
118 | 117 | ||
119 | return now >= *next_tb; | 118 | return now >= *next_tb; |
120 | } | 119 | } |
@@ -499,7 +498,7 @@ void __do_irq(struct pt_regs *regs) | |||
499 | 498 | ||
500 | /* And finally process it */ | 499 | /* And finally process it */ |
501 | if (unlikely(irq == NO_IRQ)) | 500 | if (unlikely(irq == NO_IRQ)) |
502 | __get_cpu_var(irq_stat).spurious_irqs++; | 501 | __this_cpu_inc(irq_stat.spurious_irqs); |
503 | else | 502 | else |
504 | generic_handle_irq(irq); | 503 | generic_handle_irq(irq); |
505 | 504 | ||
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 8504657379f1..e77c3ccf8dcf 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs) | |||
155 | { | 155 | { |
156 | struct thread_info *thread_info, *exception_thread_info; | 156 | struct thread_info *thread_info, *exception_thread_info; |
157 | struct thread_info *backup_current_thread_info = | 157 | struct thread_info *backup_current_thread_info = |
158 | &__get_cpu_var(kgdb_thread_info); | 158 | this_cpu_ptr(&kgdb_thread_info); |
159 | 159 | ||
160 | if (user_mode(regs)) | 160 | if (user_mode(regs)) |
161 | return 0; | 161 | return 0; |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index 2f72af82513c..7c053f281406 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
@@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
119 | 119 | ||
120 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | 120 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
121 | { | 121 | { |
122 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; | 122 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); |
123 | kcb->kprobe_status = kcb->prev_kprobe.status; | 123 | kcb->kprobe_status = kcb->prev_kprobe.status; |
124 | kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; | 124 | kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; |
125 | } | 125 | } |
@@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | |||
127 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | 127 | static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
128 | struct kprobe_ctlblk *kcb) | 128 | struct kprobe_ctlblk *kcb) |
129 | { | 129 | { |
130 | __get_cpu_var(current_kprobe) = p; | 130 | __this_cpu_write(current_kprobe, p); |
131 | kcb->kprobe_saved_msr = regs->msr; | 131 | kcb->kprobe_saved_msr = regs->msr; |
132 | } | 132 | } |
133 | 133 | ||
@@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
192 | ret = 1; | 192 | ret = 1; |
193 | goto no_kprobe; | 193 | goto no_kprobe; |
194 | } | 194 | } |
195 | p = __get_cpu_var(current_kprobe); | 195 | p = __this_cpu_read(current_kprobe); |
196 | if (p->break_handler && p->break_handler(p, regs)) { | 196 | if (p->break_handler && p->break_handler(p, regs)) { |
197 | goto ss_probe; | 197 | goto ss_probe; |
198 | } | 198 | } |
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index a7fd4cb78b78..15c99b649b04 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c | |||
@@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled, | |||
73 | uint64_t nip, uint64_t addr) | 73 | uint64_t nip, uint64_t addr) |
74 | { | 74 | { |
75 | uint64_t srr1; | 75 | uint64_t srr1; |
76 | int index = __get_cpu_var(mce_nest_count)++; | 76 | int index = __this_cpu_inc_return(mce_nest_count); |
77 | struct machine_check_event *mce = &__get_cpu_var(mce_event[index]); | 77 | struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); |
78 | 78 | ||
79 | /* | 79 | /* |
80 | * Return if we don't have enough space to log mce event. | 80 | * Return if we don't have enough space to log mce event. |
@@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled, | |||
143 | */ | 143 | */ |
144 | int get_mce_event(struct machine_check_event *mce, bool release) | 144 | int get_mce_event(struct machine_check_event *mce, bool release) |
145 | { | 145 | { |
146 | int index = __get_cpu_var(mce_nest_count) - 1; | 146 | int index = __this_cpu_read(mce_nest_count) - 1; |
147 | struct machine_check_event *mc_evt; | 147 | struct machine_check_event *mc_evt; |
148 | int ret = 0; | 148 | int ret = 0; |
149 | 149 | ||
@@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) | |||
153 | 153 | ||
154 | /* Check if we have MCE info to process. */ | 154 | /* Check if we have MCE info to process. */ |
155 | if (index < MAX_MC_EVT) { | 155 | if (index < MAX_MC_EVT) { |
156 | mc_evt = &__get_cpu_var(mce_event[index]); | 156 | mc_evt = this_cpu_ptr(&mce_event[index]); |
157 | /* Copy the event structure and release the original */ | 157 | /* Copy the event structure and release the original */ |
158 | if (mce) | 158 | if (mce) |
159 | *mce = *mc_evt; | 159 | *mce = *mc_evt; |
@@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release) | |||
163 | } | 163 | } |
164 | /* Decrement the count to free the slot. */ | 164 | /* Decrement the count to free the slot. */ |
165 | if (release) | 165 | if (release) |
166 | __get_cpu_var(mce_nest_count)--; | 166 | __this_cpu_dec(mce_nest_count); |
167 | 167 | ||
168 | return ret; | 168 | return ret; |
169 | } | 169 | } |
@@ -184,13 +184,13 @@ void machine_check_queue_event(void) | |||
184 | if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) | 184 | if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) |
185 | return; | 185 | return; |
186 | 186 | ||
187 | index = __get_cpu_var(mce_queue_count)++; | 187 | index = __this_cpu_inc_return(mce_queue_count); |
188 | /* If queue is full, just return for now. */ | 188 | /* If queue is full, just return for now. */ |
189 | if (index >= MAX_MC_EVT) { | 189 | if (index >= MAX_MC_EVT) { |
190 | __get_cpu_var(mce_queue_count)--; | 190 | __this_cpu_dec(mce_queue_count); |
191 | return; | 191 | return; |
192 | } | 192 | } |
193 | __get_cpu_var(mce_event_queue[index]) = evt; | 193 | memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt)); |
194 | 194 | ||
195 | /* Queue irq work to process this event later. */ | 195 | /* Queue irq work to process this event later. */ |
196 | irq_work_queue(&mce_event_process_work); | 196 | irq_work_queue(&mce_event_process_work); |
@@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work) | |||
208 | * For now just print it to console. | 208 | * For now just print it to console. |
209 | * TODO: log this error event to FSP or nvram. | 209 | * TODO: log this error event to FSP or nvram. |
210 | */ | 210 | */ |
211 | while (__get_cpu_var(mce_queue_count) > 0) { | 211 | while (__this_cpu_read(mce_queue_count) > 0) { |
212 | index = __get_cpu_var(mce_queue_count) - 1; | 212 | index = __this_cpu_read(mce_queue_count) - 1; |
213 | machine_check_print_event_info( | 213 | machine_check_print_event_info( |
214 | &__get_cpu_var(mce_event_queue[index])); | 214 | this_cpu_ptr(&mce_event_queue[index])); |
215 | __get_cpu_var(mce_queue_count)--; | 215 | __this_cpu_dec(mce_queue_count); |
216 | } | 216 | } |
217 | } | 217 | } |
218 | 218 | ||
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index aa9aff3d6ad3..b6f123ab90ed 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c | |||
@@ -79,7 +79,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) | |||
79 | } | 79 | } |
80 | if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { | 80 | if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { |
81 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) | 81 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) |
82 | cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); | 82 | cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); |
83 | /* reset error bits */ | 83 | /* reset error bits */ |
84 | dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; | 84 | dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; |
85 | } | 85 | } |
@@ -110,7 +110,7 @@ static long mce_handle_common_ierror(uint64_t srr1) | |||
110 | break; | 110 | break; |
111 | case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: | 111 | case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: |
112 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { | 112 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { |
113 | cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); | 113 | cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); |
114 | handled = 1; | 114 | handled = 1; |
115 | } | 115 | } |
116 | break; | 116 | break; |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index e5dad9a9edc0..37d512d35943 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/string.h> | 21 | #include <linux/string.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/bootmem.h> | ||
24 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
25 | #include <linux/export.h> | 24 | #include <linux/export.h> |
26 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
@@ -1464,7 +1463,7 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, | |||
1464 | res = &hose->io_resource; | 1463 | res = &hose->io_resource; |
1465 | 1464 | ||
1466 | if (!res->flags) { | 1465 | if (!res->flags) { |
1467 | printk(KERN_WARNING "PCI: I/O resource not set for host" | 1466 | pr_info("PCI: I/O resource not set for host" |
1468 | " bridge %s (domain %d)\n", | 1467 | " bridge %s (domain %d)\n", |
1469 | hose->dn->full_name, hose->global_number); | 1468 | hose->dn->full_name, hose->global_number); |
1470 | } else { | 1469 | } else { |
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 432459c817fa..1f7930037cb7 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c | |||
@@ -199,9 +199,7 @@ pci_create_OF_bus_map(void) | |||
199 | struct property* of_prop; | 199 | struct property* of_prop; |
200 | struct device_node *dn; | 200 | struct device_node *dn; |
201 | 201 | ||
202 | of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256); | 202 | of_prop = memblock_virt_alloc(sizeof(struct property) + 256, 0); |
203 | if (!of_prop) | ||
204 | return; | ||
205 | dn = of_find_node_by_path("/"); | 203 | dn = of_find_node_by_path("/"); |
206 | if (dn) { | 204 | if (dn) { |
207 | memset(of_prop, -1, sizeof(struct property) + 256); | 205 | memset(of_prop, -1, sizeof(struct property) + 256); |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index b15194e2c5fc..60bb187cb46a 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
18 | #include <linux/string.h> | 18 | #include <linux/string.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/bootmem.h> | ||
21 | #include <linux/export.h> | 20 | #include <linux/export.h> |
22 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
23 | #include <linux/list.h> | 22 | #include <linux/list.h> |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 923cd2daba89..b4cc7bef6b16 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -37,9 +37,9 @@ | |||
37 | #include <linux/personality.h> | 37 | #include <linux/personality.h> |
38 | #include <linux/random.h> | 38 | #include <linux/random.h> |
39 | #include <linux/hw_breakpoint.h> | 39 | #include <linux/hw_breakpoint.h> |
40 | #include <linux/uaccess.h> | ||
40 | 41 | ||
41 | #include <asm/pgtable.h> | 42 | #include <asm/pgtable.h> |
42 | #include <asm/uaccess.h> | ||
43 | #include <asm/io.h> | 43 | #include <asm/io.h> |
44 | #include <asm/processor.h> | 44 | #include <asm/processor.h> |
45 | #include <asm/mmu.h> | 45 | #include <asm/mmu.h> |
@@ -499,7 +499,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk) | |||
499 | 499 | ||
500 | void __set_breakpoint(struct arch_hw_breakpoint *brk) | 500 | void __set_breakpoint(struct arch_hw_breakpoint *brk) |
501 | { | 501 | { |
502 | __get_cpu_var(current_brk) = *brk; | 502 | memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk)); |
503 | 503 | ||
504 | if (cpu_has_feature(CPU_FTR_DAWR)) | 504 | if (cpu_has_feature(CPU_FTR_DAWR)) |
505 | set_dawr(brk); | 505 | set_dawr(brk); |
@@ -842,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
842 | * schedule DABR | 842 | * schedule DABR |
843 | */ | 843 | */ |
844 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | 844 | #ifndef CONFIG_HAVE_HW_BREAKPOINT |
845 | if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) | 845 | if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk))) |
846 | __set_breakpoint(&new->thread.hw_brk); | 846 | __set_breakpoint(&new->thread.hw_brk); |
847 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 847 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
848 | #endif | 848 | #endif |
@@ -856,7 +856,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
856 | * Collect processor utilization data per process | 856 | * Collect processor utilization data per process |
857 | */ | 857 | */ |
858 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 858 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
859 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | 859 | struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); |
860 | long unsigned start_tb, current_tb; | 860 | long unsigned start_tb, current_tb; |
861 | start_tb = old_thread->start_tb; | 861 | start_tb = old_thread->start_tb; |
862 | cu->current_tb = current_tb = mfspr(SPRN_PURR); | 862 | cu->current_tb = current_tb = mfspr(SPRN_PURR); |
@@ -866,7 +866,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
866 | #endif /* CONFIG_PPC64 */ | 866 | #endif /* CONFIG_PPC64 */ |
867 | 867 | ||
868 | #ifdef CONFIG_PPC_BOOK3S_64 | 868 | #ifdef CONFIG_PPC_BOOK3S_64 |
869 | batch = &__get_cpu_var(ppc64_tlb_batch); | 869 | batch = this_cpu_ptr(&ppc64_tlb_batch); |
870 | if (batch->active) { | 870 | if (batch->active) { |
871 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; | 871 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; |
872 | if (batch->index) | 872 | if (batch->index) |
@@ -889,7 +889,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
889 | #ifdef CONFIG_PPC_BOOK3S_64 | 889 | #ifdef CONFIG_PPC_BOOK3S_64 |
890 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { | 890 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { |
891 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; | 891 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; |
892 | batch = &__get_cpu_var(ppc64_tlb_batch); | 892 | batch = this_cpu_ptr(&ppc64_tlb_batch); |
893 | batch->active = 1; | 893 | batch->active = 1; |
894 | } | 894 | } |
895 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 895 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
@@ -921,12 +921,8 @@ static void show_instructions(struct pt_regs *regs) | |||
921 | pc = (unsigned long)phys_to_virt(pc); | 921 | pc = (unsigned long)phys_to_virt(pc); |
922 | #endif | 922 | #endif |
923 | 923 | ||
924 | /* We use __get_user here *only* to avoid an OOPS on a | ||
925 | * bad address because the pc *should* only be a | ||
926 | * kernel address. | ||
927 | */ | ||
928 | if (!__kernel_text_address(pc) || | 924 | if (!__kernel_text_address(pc) || |
929 | __get_user(instr, (unsigned int __user *)pc)) { | 925 | probe_kernel_address((unsigned int __user *)pc, instr)) { |
930 | printk(KERN_CONT "XXXXXXXX "); | 926 | printk(KERN_CONT "XXXXXXXX "); |
931 | } else { | 927 | } else { |
932 | if (regs->nip == pc) | 928 | if (regs->nip == pc) |
@@ -1531,13 +1527,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) | |||
1531 | int curr_frame = current->curr_ret_stack; | 1527 | int curr_frame = current->curr_ret_stack; |
1532 | extern void return_to_handler(void); | 1528 | extern void return_to_handler(void); |
1533 | unsigned long rth = (unsigned long)return_to_handler; | 1529 | unsigned long rth = (unsigned long)return_to_handler; |
1534 | unsigned long mrth = -1; | ||
1535 | #ifdef CONFIG_PPC64 | ||
1536 | extern void mod_return_to_handler(void); | ||
1537 | rth = *(unsigned long *)rth; | ||
1538 | mrth = (unsigned long)mod_return_to_handler; | ||
1539 | mrth = *(unsigned long *)mrth; | ||
1540 | #endif | ||
1541 | #endif | 1530 | #endif |
1542 | 1531 | ||
1543 | sp = (unsigned long) stack; | 1532 | sp = (unsigned long) stack; |
@@ -1562,7 +1551,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) | |||
1562 | if (!firstframe || ip != lr) { | 1551 | if (!firstframe || ip != lr) { |
1563 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); | 1552 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); |
1564 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1553 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1565 | if ((ip == rth || ip == mrth) && curr_frame >= 0) { | 1554 | if ((ip == rth) && curr_frame >= 0) { |
1566 | printk(" (%pS)", | 1555 | printk(" (%pS)", |
1567 | (void *)current->ret_stack[curr_frame].ret); | 1556 | (void *)current->ret_stack[curr_frame].ret); |
1568 | curr_frame--; | 1557 | curr_frame--; |
@@ -1665,12 +1654,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
1665 | return ret; | 1654 | return ret; |
1666 | } | 1655 | } |
1667 | 1656 | ||
1668 | unsigned long randomize_et_dyn(unsigned long base) | ||
1669 | { | ||
1670 | unsigned long ret = PAGE_ALIGN(base + brk_rnd()); | ||
1671 | |||
1672 | if (ret < base) | ||
1673 | return base; | ||
1674 | |||
1675 | return ret; | ||
1676 | } | ||
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 099f27e6d1b0..6a799b3cc6b4 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -160,6 +160,12 @@ static struct ibm_pa_feature { | |||
160 | {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, | 160 | {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, |
161 | {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, | 161 | {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, |
162 | {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, | 162 | {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, |
163 | /* | ||
164 | * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n), | ||
165 | * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP | ||
166 | * which is 0 if the kernel doesn't support TM. | ||
167 | */ | ||
168 | {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0}, | ||
163 | }; | 169 | }; |
164 | 170 | ||
165 | static void __init scan_features(unsigned long node, const unsigned char *ftrs, | 171 | static void __init scan_features(unsigned long node, const unsigned char *ftrs, |
@@ -696,10 +702,7 @@ void __init early_init_devtree(void *params) | |||
696 | reserve_crashkernel(); | 702 | reserve_crashkernel(); |
697 | early_reserve_mem(); | 703 | early_reserve_mem(); |
698 | 704 | ||
699 | /* | 705 | /* Ensure that total memory size is page-aligned. */ |
700 | * Ensure that total memory size is page-aligned, because otherwise | ||
701 | * mark_bootmem() gets upset. | ||
702 | */ | ||
703 | limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); | 706 | limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); |
704 | memblock_enforce_memory_limit(limit); | 707 | memblock_enforce_memory_limit(limit); |
705 | 708 | ||
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c index 8777fb02349f..fb2fb3ea85e5 100644 --- a/arch/powerpc/kernel/rtas-proc.c +++ b/arch/powerpc/kernel/rtas-proc.c | |||
@@ -113,17 +113,6 @@ | |||
113 | #define SENSOR_PREFIX "ibm,sensor-" | 113 | #define SENSOR_PREFIX "ibm,sensor-" |
114 | #define cel_to_fahr(x) ((x*9/5)+32) | 114 | #define cel_to_fahr(x) ((x*9/5)+32) |
115 | 115 | ||
116 | |||
117 | /* Globals */ | ||
118 | static struct rtas_sensors sensors; | ||
119 | static struct device_node *rtas_node = NULL; | ||
120 | static unsigned long power_on_time = 0; /* Save the time the user set */ | ||
121 | static char progress_led[MAX_LINELENGTH]; | ||
122 | |||
123 | static unsigned long rtas_tone_frequency = 1000; | ||
124 | static unsigned long rtas_tone_volume = 0; | ||
125 | |||
126 | /* ****************STRUCTS******************************************* */ | ||
127 | struct individual_sensor { | 116 | struct individual_sensor { |
128 | unsigned int token; | 117 | unsigned int token; |
129 | unsigned int quant; | 118 | unsigned int quant; |
@@ -134,6 +123,15 @@ struct rtas_sensors { | |||
134 | unsigned int quant; | 123 | unsigned int quant; |
135 | }; | 124 | }; |
136 | 125 | ||
126 | /* Globals */ | ||
127 | static struct rtas_sensors sensors; | ||
128 | static struct device_node *rtas_node = NULL; | ||
129 | static unsigned long power_on_time = 0; /* Save the time the user set */ | ||
130 | static char progress_led[MAX_LINELENGTH]; | ||
131 | |||
132 | static unsigned long rtas_tone_frequency = 1000; | ||
133 | static unsigned long rtas_tone_volume = 0; | ||
134 | |||
137 | /* ****************************************************************** */ | 135 | /* ****************************************************************** */ |
138 | /* Declarations */ | 136 | /* Declarations */ |
139 | static int ppc_rtas_sensors_show(struct seq_file *m, void *v); | 137 | static int ppc_rtas_sensors_show(struct seq_file *m, void *v); |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 8b4c857c1421..4af905e81ab0 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -1091,8 +1091,8 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) | |||
1091 | } | 1091 | } |
1092 | 1092 | ||
1093 | /* | 1093 | /* |
1094 | * Call early during boot, before mem init or bootmem, to retrieve the RTAS | 1094 | * Call early during boot, before mem init, to retrieve the RTAS |
1095 | * informations from the device-tree and allocate the RMO buffer for userland | 1095 | * information from the device-tree and allocate the RMO buffer for userland |
1096 | * accesses. | 1096 | * accesses. |
1097 | */ | 1097 | */ |
1098 | void __init rtas_initialize(void) | 1098 | void __init rtas_initialize(void) |
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 7c55b86206b3..ce230da2c015 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
27 | #include <linux/string.h> | 27 | #include <linux/string.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/bootmem.h> | ||
30 | 29 | ||
31 | #include <asm/io.h> | 30 | #include <asm/io.h> |
32 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 1362cd62b3fa..44c8d03558ac 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -139,8 +139,8 @@ void machine_restart(char *cmd) | |||
139 | void machine_power_off(void) | 139 | void machine_power_off(void) |
140 | { | 140 | { |
141 | machine_shutdown(); | 141 | machine_shutdown(); |
142 | if (ppc_md.power_off) | 142 | if (pm_power_off) |
143 | ppc_md.power_off(); | 143 | pm_power_off(); |
144 | #ifdef CONFIG_SMP | 144 | #ifdef CONFIG_SMP |
145 | smp_send_stop(); | 145 | smp_send_stop(); |
146 | #endif | 146 | #endif |
@@ -151,7 +151,7 @@ void machine_power_off(void) | |||
151 | /* Used by the G5 thermal driver */ | 151 | /* Used by the G5 thermal driver */ |
152 | EXPORT_SYMBOL_GPL(machine_power_off); | 152 | EXPORT_SYMBOL_GPL(machine_power_off); |
153 | 153 | ||
154 | void (*pm_power_off)(void) = machine_power_off; | 154 | void (*pm_power_off)(void); |
155 | EXPORT_SYMBOL_GPL(pm_power_off); | 155 | EXPORT_SYMBOL_GPL(pm_power_off); |
156 | 156 | ||
157 | void machine_halt(void) | 157 | void machine_halt(void) |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 07831ed0d9ef..bb02e9f6944e 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include <linux/initrd.h> | 12 | #include <linux/initrd.h> |
13 | #include <linux/tty.h> | 13 | #include <linux/tty.h> |
14 | #include <linux/bootmem.h> | ||
15 | #include <linux/seq_file.h> | 14 | #include <linux/seq_file.h> |
16 | #include <linux/root_dev.h> | 15 | #include <linux/root_dev.h> |
17 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
@@ -53,11 +52,6 @@ unsigned long ISA_DMA_THRESHOLD; | |||
53 | unsigned int DMA_MODE_READ; | 52 | unsigned int DMA_MODE_READ; |
54 | unsigned int DMA_MODE_WRITE; | 53 | unsigned int DMA_MODE_WRITE; |
55 | 54 | ||
56 | #ifdef CONFIG_VGA_CONSOLE | ||
57 | unsigned long vgacon_remap_base; | ||
58 | EXPORT_SYMBOL(vgacon_remap_base); | ||
59 | #endif | ||
60 | |||
61 | /* | 55 | /* |
62 | * These are used in binfmt_elf.c to put aux entries on the stack | 56 | * These are used in binfmt_elf.c to put aux entries on the stack |
63 | * for each elf executable being started. | 57 | * for each elf executable being started. |
@@ -311,9 +305,8 @@ void __init setup_arch(char **cmdline_p) | |||
311 | 305 | ||
312 | irqstack_early_init(); | 306 | irqstack_early_init(); |
313 | 307 | ||
314 | /* set up the bootmem stuff with available memory */ | 308 | initmem_init(); |
315 | do_init_bootmem(); | 309 | if ( ppc_md.progress ) ppc_md.progress("setup_arch: initmem", 0x3eab); |
316 | if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab); | ||
317 | 310 | ||
318 | #ifdef CONFIG_DUMMY_CONSOLE | 311 | #ifdef CONFIG_DUMMY_CONSOLE |
319 | conswitchp = &dummy_con; | 312 | conswitchp = &dummy_con; |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4f3cfe1b6a33..49f553bbb360 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -660,13 +660,11 @@ static void __init emergency_stack_init(void) | |||
660 | } | 660 | } |
661 | 661 | ||
662 | /* | 662 | /* |
663 | * Called into from start_kernel this initializes bootmem, which is used | 663 | * Called into from start_kernel this initializes memblock, which is used |
664 | * to manage page allocation until mem_init is called. | 664 | * to manage page allocation until mem_init is called. |
665 | */ | 665 | */ |
666 | void __init setup_arch(char **cmdline_p) | 666 | void __init setup_arch(char **cmdline_p) |
667 | { | 667 | { |
668 | ppc64_boot_msg(0x12, "Setup Arch"); | ||
669 | |||
670 | *cmdline_p = boot_command_line; | 668 | *cmdline_p = boot_command_line; |
671 | 669 | ||
672 | /* | 670 | /* |
@@ -691,9 +689,7 @@ void __init setup_arch(char **cmdline_p) | |||
691 | exc_lvl_early_init(); | 689 | exc_lvl_early_init(); |
692 | emergency_stack_init(); | 690 | emergency_stack_init(); |
693 | 691 | ||
694 | /* set up the bootmem stuff with available memory */ | 692 | initmem_init(); |
695 | do_init_bootmem(); | ||
696 | sparse_init(); | ||
697 | 693 | ||
698 | #ifdef CONFIG_DUMMY_CONSOLE | 694 | #ifdef CONFIG_DUMMY_CONSOLE |
699 | conswitchp = &dummy_con; | 695 | conswitchp = &dummy_con; |
@@ -711,33 +707,6 @@ void __init setup_arch(char **cmdline_p) | |||
711 | if ((unsigned long)_stext & 0xffff) | 707 | if ((unsigned long)_stext & 0xffff) |
712 | panic("Kernelbase not 64K-aligned (0x%lx)!\n", | 708 | panic("Kernelbase not 64K-aligned (0x%lx)!\n", |
713 | (unsigned long)_stext); | 709 | (unsigned long)_stext); |
714 | |||
715 | ppc64_boot_msg(0x15, "Setup Done"); | ||
716 | } | ||
717 | |||
718 | |||
719 | /* ToDo: do something useful if ppc_md is not yet setup. */ | ||
720 | #define PPC64_LINUX_FUNCTION 0x0f000000 | ||
721 | #define PPC64_IPL_MESSAGE 0xc0000000 | ||
722 | #define PPC64_TERM_MESSAGE 0xb0000000 | ||
723 | |||
724 | static void ppc64_do_msg(unsigned int src, const char *msg) | ||
725 | { | ||
726 | if (ppc_md.progress) { | ||
727 | char buf[128]; | ||
728 | |||
729 | sprintf(buf, "%08X\n", src); | ||
730 | ppc_md.progress(buf, 0); | ||
731 | snprintf(buf, 128, "%s", msg); | ||
732 | ppc_md.progress(buf, 0); | ||
733 | } | ||
734 | } | ||
735 | |||
736 | /* Print a boot progress message. */ | ||
737 | void ppc64_boot_msg(unsigned int src, const char *msg) | ||
738 | { | ||
739 | ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg); | ||
740 | printk("[boot]%04x %s\n", src, msg); | ||
741 | } | 710 | } |
742 | 711 | ||
743 | #ifdef CONFIG_SMP | 712 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 71e186d5f331..8b2d2dc8ef10 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -243,7 +243,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) | |||
243 | 243 | ||
244 | irqreturn_t smp_ipi_demux(void) | 244 | irqreturn_t smp_ipi_demux(void) |
245 | { | 245 | { |
246 | struct cpu_messages *info = &__get_cpu_var(ipi_message); | 246 | struct cpu_messages *info = this_cpu_ptr(&ipi_message); |
247 | unsigned int all; | 247 | unsigned int all; |
248 | 248 | ||
249 | mb(); /* order any irq clear */ | 249 | mb(); /* order any irq clear */ |
@@ -442,9 +442,9 @@ void generic_mach_cpu_die(void) | |||
442 | idle_task_exit(); | 442 | idle_task_exit(); |
443 | cpu = smp_processor_id(); | 443 | cpu = smp_processor_id(); |
444 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | 444 | printk(KERN_DEBUG "CPU%d offline\n", cpu); |
445 | __get_cpu_var(cpu_state) = CPU_DEAD; | 445 | __this_cpu_write(cpu_state, CPU_DEAD); |
446 | smp_wmb(); | 446 | smp_wmb(); |
447 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | 447 | while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE) |
448 | cpu_relax(); | 448 | cpu_relax(); |
449 | } | 449 | } |
450 | 450 | ||
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index 67fd2fd2620a..fa1fd8a0c867 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -394,10 +394,10 @@ void ppc_enable_pmcs(void) | |||
394 | ppc_set_pmu_inuse(1); | 394 | ppc_set_pmu_inuse(1); |
395 | 395 | ||
396 | /* Only need to enable them once */ | 396 | /* Only need to enable them once */ |
397 | if (__get_cpu_var(pmcs_enabled)) | 397 | if (__this_cpu_read(pmcs_enabled)) |
398 | return; | 398 | return; |
399 | 399 | ||
400 | __get_cpu_var(pmcs_enabled) = 1; | 400 | __this_cpu_write(pmcs_enabled, 1); |
401 | 401 | ||
402 | if (ppc_md.enable_pmcs) | 402 | if (ppc_md.enable_pmcs) |
403 | ppc_md.enable_pmcs(); | 403 | ppc_md.enable_pmcs(); |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 7505599c2593..fa7c4f12104f 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void) | |||
458 | 458 | ||
459 | DEFINE_PER_CPU(u8, irq_work_pending); | 459 | DEFINE_PER_CPU(u8, irq_work_pending); |
460 | 460 | ||
461 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 | 461 | #define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) |
462 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) | 462 | #define test_irq_work_pending() __this_cpu_read(irq_work_pending) |
463 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 | 463 | #define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) |
464 | 464 | ||
465 | #endif /* 32 vs 64 bit */ | 465 | #endif /* 32 vs 64 bit */ |
466 | 466 | ||
@@ -482,8 +482,8 @@ void arch_irq_work_raise(void) | |||
482 | static void __timer_interrupt(void) | 482 | static void __timer_interrupt(void) |
483 | { | 483 | { |
484 | struct pt_regs *regs = get_irq_regs(); | 484 | struct pt_regs *regs = get_irq_regs(); |
485 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 485 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); |
486 | struct clock_event_device *evt = &__get_cpu_var(decrementers); | 486 | struct clock_event_device *evt = this_cpu_ptr(&decrementers); |
487 | u64 now; | 487 | u64 now; |
488 | 488 | ||
489 | trace_timer_interrupt_entry(regs); | 489 | trace_timer_interrupt_entry(regs); |
@@ -498,7 +498,7 @@ static void __timer_interrupt(void) | |||
498 | *next_tb = ~(u64)0; | 498 | *next_tb = ~(u64)0; |
499 | if (evt->event_handler) | 499 | if (evt->event_handler) |
500 | evt->event_handler(evt); | 500 | evt->event_handler(evt); |
501 | __get_cpu_var(irq_stat).timer_irqs_event++; | 501 | __this_cpu_inc(irq_stat.timer_irqs_event); |
502 | } else { | 502 | } else { |
503 | now = *next_tb - now; | 503 | now = *next_tb - now; |
504 | if (now <= DECREMENTER_MAX) | 504 | if (now <= DECREMENTER_MAX) |
@@ -506,13 +506,13 @@ static void __timer_interrupt(void) | |||
506 | /* We may have raced with new irq work */ | 506 | /* We may have raced with new irq work */ |
507 | if (test_irq_work_pending()) | 507 | if (test_irq_work_pending()) |
508 | set_dec(1); | 508 | set_dec(1); |
509 | __get_cpu_var(irq_stat).timer_irqs_others++; | 509 | __this_cpu_inc(irq_stat.timer_irqs_others); |
510 | } | 510 | } |
511 | 511 | ||
512 | #ifdef CONFIG_PPC64 | 512 | #ifdef CONFIG_PPC64 |
513 | /* collect purr register values often, for accurate calculations */ | 513 | /* collect purr register values often, for accurate calculations */ |
514 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 514 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { |
515 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | 515 | struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); |
516 | cu->current_tb = mfspr(SPRN_PURR); | 516 | cu->current_tb = mfspr(SPRN_PURR); |
517 | } | 517 | } |
518 | #endif | 518 | #endif |
@@ -527,7 +527,7 @@ static void __timer_interrupt(void) | |||
527 | void timer_interrupt(struct pt_regs * regs) | 527 | void timer_interrupt(struct pt_regs * regs) |
528 | { | 528 | { |
529 | struct pt_regs *old_regs; | 529 | struct pt_regs *old_regs; |
530 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 530 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); |
531 | 531 | ||
532 | /* Ensure a positive value is written to the decrementer, or else | 532 | /* Ensure a positive value is written to the decrementer, or else |
533 | * some CPUs will continue to take decrementer exceptions. | 533 | * some CPUs will continue to take decrementer exceptions. |
@@ -813,7 +813,7 @@ static void __init clocksource_init(void) | |||
813 | static int decrementer_set_next_event(unsigned long evt, | 813 | static int decrementer_set_next_event(unsigned long evt, |
814 | struct clock_event_device *dev) | 814 | struct clock_event_device *dev) |
815 | { | 815 | { |
816 | __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; | 816 | __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt); |
817 | set_dec(evt); | 817 | set_dec(evt); |
818 | 818 | ||
819 | /* We may have raced with new irq work */ | 819 | /* We may have raced with new irq work */ |
@@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode, | |||
833 | /* Interrupt handler for the timer broadcast IPI */ | 833 | /* Interrupt handler for the timer broadcast IPI */ |
834 | void tick_broadcast_ipi_handler(void) | 834 | void tick_broadcast_ipi_handler(void) |
835 | { | 835 | { |
836 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | 836 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); |
837 | 837 | ||
838 | *next_tb = get_tb_or_rtc(); | 838 | *next_tb = get_tb_or_rtc(); |
839 | __timer_interrupt(); | 839 | __timer_interrupt(); |
@@ -989,6 +989,7 @@ void GregorianDay(struct rtc_time * tm) | |||
989 | 989 | ||
990 | tm->tm_wday = day % 7; | 990 | tm->tm_wday = day % 7; |
991 | } | 991 | } |
992 | EXPORT_SYMBOL_GPL(GregorianDay); | ||
992 | 993 | ||
993 | void to_tm(int tim, struct rtc_time * tm) | 994 | void to_tm(int tim, struct rtc_time * tm) |
994 | { | 995 | { |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 0dc43f9932cf..e6595b72269b 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs) | |||
295 | { | 295 | { |
296 | long handled = 0; | 296 | long handled = 0; |
297 | 297 | ||
298 | __get_cpu_var(irq_stat).mce_exceptions++; | 298 | __this_cpu_inc(irq_stat.mce_exceptions); |
299 | 299 | ||
300 | if (cur_cpu_spec && cur_cpu_spec->machine_check_early) | 300 | if (cur_cpu_spec && cur_cpu_spec->machine_check_early) |
301 | handled = cur_cpu_spec->machine_check_early(regs); | 301 | handled = cur_cpu_spec->machine_check_early(regs); |
@@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs) | |||
304 | 304 | ||
305 | long hmi_exception_realmode(struct pt_regs *regs) | 305 | long hmi_exception_realmode(struct pt_regs *regs) |
306 | { | 306 | { |
307 | __get_cpu_var(irq_stat).hmi_exceptions++; | 307 | __this_cpu_inc(irq_stat.hmi_exceptions); |
308 | 308 | ||
309 | if (ppc_md.hmi_exception_early) | 309 | if (ppc_md.hmi_exception_early) |
310 | ppc_md.hmi_exception_early(regs); | 310 | ppc_md.hmi_exception_early(regs); |
@@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs) | |||
700 | enum ctx_state prev_state = exception_enter(); | 700 | enum ctx_state prev_state = exception_enter(); |
701 | int recover = 0; | 701 | int recover = 0; |
702 | 702 | ||
703 | __get_cpu_var(irq_stat).mce_exceptions++; | 703 | __this_cpu_inc(irq_stat.mce_exceptions); |
704 | 704 | ||
705 | /* See if any machine dependent calls. In theory, we would want | 705 | /* See if any machine dependent calls. In theory, we would want |
706 | * to call the CPU first, and call the ppc_md. one if the CPU | 706 | * to call the CPU first, and call the ppc_md. one if the CPU |
@@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs) | |||
1519 | 1519 | ||
1520 | void performance_monitor_exception(struct pt_regs *regs) | 1520 | void performance_monitor_exception(struct pt_regs *regs) |
1521 | { | 1521 | { |
1522 | __get_cpu_var(irq_stat).pmu_irqs++; | 1522 | __this_cpu_inc(irq_stat.pmu_irqs); |
1523 | 1523 | ||
1524 | perf_irq(regs); | 1524 | perf_irq(regs); |
1525 | } | 1525 | } |
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index 6e7c4923b5ea..411116c38da4 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c | |||
@@ -69,8 +69,12 @@ static void udbg_uart_putc(char c) | |||
69 | 69 | ||
70 | static int udbg_uart_getc_poll(void) | 70 | static int udbg_uart_getc_poll(void) |
71 | { | 71 | { |
72 | if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR)) | 72 | if (!udbg_uart_in) |
73 | return -1; | ||
74 | |||
75 | if (!(udbg_uart_in(UART_LSR) & LSR_DR)) | ||
73 | return udbg_uart_in(UART_RBR); | 76 | return udbg_uart_in(UART_RBR); |
77 | |||
74 | return -1; | 78 | return -1; |
75 | } | 79 | } |
76 | 80 | ||
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index f174351842cf..305eb0d9b768 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/user.h> | 20 | #include <linux/user.h> |
21 | #include <linux/elf.h> | 21 | #include <linux/elf.h> |
22 | #include <linux/security.h> | 22 | #include <linux/security.h> |
23 | #include <linux/bootmem.h> | ||
24 | #include <linux/memblock.h> | 23 | #include <linux/memblock.h> |
25 | 24 | ||
26 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |