aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-05 14:16:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-05 14:16:12 -0400
commit5f3d2f2e1a63679cf1c4a4210f2f1cc2f335bef6 (patch)
tree9189bd6c81fe5f982a7ae45d2f3d900176658509 /arch/powerpc/kernel
parent283dbd82055eb70ff3b469f812d9c695f18c9641 (diff)
parentd900bd7366463fd96a907b2c212242e2b68b27d8 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "Some highlights in addition to the usual batch of fixes: - 64TB address space support for 64-bit processes by Aneesh Kumar - Gavin Shan did a major cleanup & re-organization of our EEH support code (IBM fancy PCI error handling & recovery infrastructure) which paves the way for supporting different platform backends, along with some rework of the PCIe code for the PowerNV platform in order to remove home made resource allocations and instead use the generic code (which is possible after some small improvements to it done by Gavin). - Uprobes support by Ananth N Mavinakayanahalli - A pile of embedded updates from Freescale folks, including new SoC and board supports, more KVM stuff including preparing for 64-bit BookE KVM support, ePAPR 1.1 updates, etc..." Fixup trivial conflicts in drivers/scsi/ipr.c * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (146 commits) powerpc/iommu: Fix multiple issues with IOMMU pools code powerpc: Fix VMX fix for memcpy case driver/mtd:IFC NAND:Initialise internal SRAM before any write powerpc/fsl-pci: use 'Header Type' to identify PCIE mode powerpc/eeh: Don't release eeh_mutex in eeh_phb_pe_get powerpc: Remove tlb batching hack for nighthawk powerpc: Set paca->data_offset = 0 for boot cpu powerpc/perf: Sample only if SIAR-Valid bit is set in P7+ powerpc/fsl-pci: fix warning when CONFIG_SWIOTLB is disabled powerpc/mpc85xx: Update interrupt handling for IFC controller powerpc/85xx: Enable USB support in p1023rds_defconfig powerpc/smp: Do not disable IPI interrupts during suspend powerpc/eeh: Fix crash on converting OF node to edev powerpc/eeh: Lock module while handling EEH event powerpc/kprobe: Don't emulate store when kprobe stwu r1 powerpc/kprobe: Complete kprobe and migrate exception frame powerpc/kprobe: Introduce a new thread flag powerpc: Remove unused __get_user64() and __put_user64() powerpc/eeh: Global mutex to protect PE tree powerpc/eeh: Remove EEH PE for normal PCI hotplug ...
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S74
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c22
-rw-r--r--arch/powerpc/kernel/dma.c3
-rw-r--r--arch/powerpc/kernel/entry_32.S47
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S212
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S127
-rw-r--r--arch/powerpc/kernel/fadump.c3
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S46
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c25
-rw-r--r--arch/powerpc/kernel/ibmebus.c1
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/kernel/irq.c8
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/paca.c1
-rw-r--r--arch/powerpc/kernel/pci-common.c16
-rw-r--r--arch/powerpc/kernel/process.c16
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/kernel/prom_init.c2
-rw-r--r--arch/powerpc/kernel/ptrace.c3
-rw-r--r--arch/powerpc/kernel/rtas_flash.c7
-rw-r--r--arch/powerpc/kernel/rtas_pci.c5
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.c8
-rw-r--r--arch/powerpc/kernel/smp.c14
-rw-r--r--arch/powerpc/kernel/time.c8
-rw-r--r--arch/powerpc/kernel/traps.c1
-rw-r--r--arch/powerpc/kernel/uprobes.c184
-rw-r--r--arch/powerpc/kernel/vdso.c4
-rw-r--r--arch/powerpc/kernel/vio.c1
33 files changed, 734 insertions, 171 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index bb282dd81612..cde12f8a4ebc 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_MODULES) += ppc_ksyms.o
96obj-$(CONFIG_BOOTX_TEXT) += btext.o 96obj-$(CONFIG_BOOTX_TEXT) += btext.o
97obj-$(CONFIG_SMP) += smp.o 97obj-$(CONFIG_SMP) += smp.o
98obj-$(CONFIG_KPROBES) += kprobes.o 98obj-$(CONFIG_KPROBES) += kprobes.o
99obj-$(CONFIG_UPROBES) += uprobes.o
99obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o 100obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
100obj-$(CONFIG_STACKTRACE) += stacktrace.o 101obj-$(CONFIG_STACKTRACE) += stacktrace.o
101obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o 102obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e8995727b1c1..7523539cfe9f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -206,6 +206,7 @@ int main(void)
206 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 206 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
207 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); 207 DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
208 DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); 208 DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
209 DEFINE(PACA_SPRG3, offsetof(struct paca_struct, sprg3));
209#endif /* CONFIG_PPC64 */ 210#endif /* CONFIG_PPC64 */
210 211
211 /* RTAS */ 212 /* RTAS */
@@ -534,7 +535,6 @@ int main(void)
534 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); 535 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
535 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); 536 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
536 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); 537 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
537 HSTATE_FIELD(HSTATE_SPRG3, sprg3);
538 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); 538 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
539 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); 539 HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
540 HSTATE_FIELD(HSTATE_NAPPING, napping); 540 HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 69fdd2322a66..dcd881937f7a 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -16,6 +16,8 @@
16#include <asm/processor.h> 16#include <asm/processor.h>
17#include <asm/cputable.h> 17#include <asm/cputable.h>
18#include <asm/ppc_asm.h> 18#include <asm/ppc_asm.h>
19#include <asm/mmu-book3e.h>
20#include <asm/asm-offsets.h>
19 21
20_GLOBAL(__e500_icache_setup) 22_GLOBAL(__e500_icache_setup)
21 mfspr r0, SPRN_L1CSR1 23 mfspr r0, SPRN_L1CSR1
@@ -73,27 +75,81 @@ _GLOBAL(__setup_cpu_e500v2)
73 mtlr r4 75 mtlr r4
74 blr 76 blr
75_GLOBAL(__setup_cpu_e500mc) 77_GLOBAL(__setup_cpu_e500mc)
76 mr r5, r4 78_GLOBAL(__setup_cpu_e5500)
77 mflr r4 79 mflr r5
78 bl __e500_icache_setup 80 bl __e500_icache_setup
79 bl __e500_dcache_setup 81 bl __e500_dcache_setup
80 bl __setup_e500mc_ivors 82 bl __setup_e500mc_ivors
81 mtlr r4 83 /*
84 * We only want to touch IVOR38-41 if we're running on hardware
85 * that supports category E.HV. The architectural way to determine
86 * this is MMUCFG[LPIDSIZE].
87 */
88 mfspr r3, SPRN_MMUCFG
89 rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
90 beq 1f
91 bl __setup_ehv_ivors
92 b 2f
931:
94 lwz r3, CPU_SPEC_FEATURES(r4)
95 /* We need this check as cpu_setup is also called for
96 * the secondary cores. So, if we have already cleared
97 * the feature on the primary core, avoid doing it on the
98 * secondary core.
99 */
100 andis. r6, r3, CPU_FTR_EMB_HV@h
101 beq 2f
102 rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
103 stw r3, CPU_SPEC_FEATURES(r4)
1042:
105 mtlr r5
82 blr 106 blr
83#endif 107#endif
84/* Right now, restore and setup are the same thing */ 108
109#ifdef CONFIG_PPC_BOOK3E_64
85_GLOBAL(__restore_cpu_e5500) 110_GLOBAL(__restore_cpu_e5500)
86_GLOBAL(__setup_cpu_e5500)
87 mflr r4 111 mflr r4
88 bl __e500_icache_setup 112 bl __e500_icache_setup
89 bl __e500_dcache_setup 113 bl __e500_dcache_setup
90#ifdef CONFIG_PPC_BOOK3E_64
91 bl .__setup_base_ivors 114 bl .__setup_base_ivors
92 bl .setup_perfmon_ivor 115 bl .setup_perfmon_ivor
93 bl .setup_doorbell_ivors 116 bl .setup_doorbell_ivors
117 /*
118 * We only want to touch IVOR38-41 if we're running on hardware
119 * that supports category E.HV. The architectural way to determine
120 * this is MMUCFG[LPIDSIZE].
121 */
122 mfspr r10,SPRN_MMUCFG
123 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
124 beq 1f
94 bl .setup_ehv_ivors 125 bl .setup_ehv_ivors
95#else 1261:
96 bl __setup_e500mc_ivors
97#endif
98 mtlr r4 127 mtlr r4
99 blr 128 blr
129
130_GLOBAL(__setup_cpu_e5500)
131 mflr r5
132 bl __e500_icache_setup
133 bl __e500_dcache_setup
134 bl .__setup_base_ivors
135 bl .setup_perfmon_ivor
136 bl .setup_doorbell_ivors
137 /*
138 * We only want to touch IVOR38-41 if we're running on hardware
139 * that supports category E.HV. The architectural way to determine
140 * this is MMUCFG[LPIDSIZE].
141 */
142 mfspr r10,SPRN_MMUCFG
143 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
144 beq 1f
145 bl .setup_ehv_ivors
146 b 2f
1471:
148 ld r10,CPU_SPEC_FEATURES(r4)
149 LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
150 andc r10,r10,r9
151 std r10,CPU_SPEC_FEATURES(r4)
1522:
153 mtlr r5
154 blr
155#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 455faa389876..0514c21f138b 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2016,7 +2016,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
2016 .oprofile_cpu_type = "ppc/e500mc", 2016 .oprofile_cpu_type = "ppc/e500mc",
2017 .oprofile_type = PPC_OPROFILE_FSL_EMB, 2017 .oprofile_type = PPC_OPROFILE_FSL_EMB,
2018 .cpu_setup = __setup_cpu_e5500, 2018 .cpu_setup = __setup_cpu_e5500,
2019#ifndef CONFIG_PPC32
2019 .cpu_restore = __restore_cpu_e5500, 2020 .cpu_restore = __restore_cpu_e5500,
2021#endif
2020 .machine_check = machine_check_e500mc, 2022 .machine_check = machine_check_e500mc,
2021 .platform = "ppce5500", 2023 .platform = "ppce5500",
2022 }, 2024 },
@@ -2034,7 +2036,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
2034 .oprofile_cpu_type = "ppc/e6500", 2036 .oprofile_cpu_type = "ppc/e6500",
2035 .oprofile_type = PPC_OPROFILE_FSL_EMB, 2037 .oprofile_type = PPC_OPROFILE_FSL_EMB,
2036 .cpu_setup = __setup_cpu_e5500, 2038 .cpu_setup = __setup_cpu_e5500,
2039#ifndef CONFIG_PPC32
2037 .cpu_restore = __restore_cpu_e5500, 2040 .cpu_restore = __restore_cpu_e5500,
2041#endif
2038 .machine_check = machine_check_e500mc, 2042 .machine_check = machine_check_e500mc,
2039 .platform = "ppce6500", 2043 .platform = "ppce6500",
2040 }, 2044 },
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 46943651da23..bd1a2aba599f 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -12,6 +12,7 @@
12 */ 12 */
13 13
14#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
15#include <linux/memblock.h>
15#include <linux/pfn.h> 16#include <linux/pfn.h>
16#include <linux/of_platform.h> 17#include <linux/of_platform.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
@@ -20,7 +21,6 @@
20#include <asm/machdep.h> 21#include <asm/machdep.h>
21#include <asm/swiotlb.h> 22#include <asm/swiotlb.h>
22#include <asm/dma.h> 23#include <asm/dma.h>
23#include <asm/abs_addr.h>
24 24
25unsigned int ppc_swiotlb_enable; 25unsigned int ppc_swiotlb_enable;
26 26
@@ -105,3 +105,23 @@ int __init swiotlb_setup_bus_notifier(void)
105 &ppc_swiotlb_plat_bus_notifier); 105 &ppc_swiotlb_plat_bus_notifier);
106 return 0; 106 return 0;
107} 107}
108
109void swiotlb_detect_4g(void)
110{
111 if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
112 ppc_swiotlb_enable = 1;
113}
114
115static int __init swiotlb_late_init(void)
116{
117 if (ppc_swiotlb_enable) {
118 swiotlb_print_info();
119 set_pci_dma_ops(&swiotlb_dma_ops);
120 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
121 } else {
122 swiotlb_free();
123 }
124
125 return 0;
126}
127subsys_initcall(swiotlb_late_init);
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 355b9d84b0f8..8032b97ccdcb 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -14,7 +14,6 @@
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <asm/vio.h> 15#include <asm/vio.h>
16#include <asm/bug.h> 16#include <asm/bug.h>
17#include <asm/abs_addr.h>
18#include <asm/machdep.h> 17#include <asm/machdep.h>
19 18
20/* 19/*
@@ -50,7 +49,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
50 return NULL; 49 return NULL;
51 ret = page_address(page); 50 ret = page_address(page);
52 memset(ret, 0, size); 51 memset(ret, 0, size);
53 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev); 52 *dma_handle = __pa(ret) + get_dma_offset(dev);
54 53
55 return ret; 54 return ret;
56#endif 55#endif
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ead5016b02d0..af37528da49f 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -831,19 +831,56 @@ restore_user:
831 bnel- load_dbcr0 831 bnel- load_dbcr0
832#endif 832#endif
833 833
834#ifdef CONFIG_PREEMPT
835 b restore 834 b restore
836 835
837/* N.B. the only way to get here is from the beq following ret_from_except. */ 836/* N.B. the only way to get here is from the beq following ret_from_except. */
838resume_kernel: 837resume_kernel:
839 /* check current_thread_info->preempt_count */ 838 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
840 CURRENT_THREAD_INFO(r9, r1) 839 CURRENT_THREAD_INFO(r9, r1)
840 lwz r8,TI_FLAGS(r9)
841 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
842 beq+ 1f
843
844 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
845
846 lwz r3,GPR1(r1)
847 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
848 mr r4,r1 /* src: current exception frame */
849 mr r1,r3 /* Reroute the trampoline frame to r1 */
850
851 /* Copy from the original to the trampoline. */
852 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
853 li r6,0 /* start offset: 0 */
854 mtctr r5
8552: lwzx r0,r6,r4
856 stwx r0,r6,r3
857 addi r6,r6,4
858 bdnz 2b
859
860 /* Do real store operation to complete stwu */
861 lwz r5,GPR1(r1)
862 stw r8,0(r5)
863
864 /* Clear _TIF_EMULATE_STACK_STORE flag */
865 lis r11,_TIF_EMULATE_STACK_STORE@h
866 addi r5,r9,TI_FLAGS
8670: lwarx r8,0,r5
868 andc r8,r8,r11
869#ifdef CONFIG_IBM405_ERR77
870 dcbt 0,r5
871#endif
872 stwcx. r8,0,r5
873 bne- 0b
8741:
875
876#ifdef CONFIG_PREEMPT
877 /* check current_thread_info->preempt_count */
841 lwz r0,TI_PREEMPT(r9) 878 lwz r0,TI_PREEMPT(r9)
842 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ 879 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
843 bne restore 880 bne restore
844 lwz r0,TI_FLAGS(r9) 881 andi. r8,r8,_TIF_NEED_RESCHED
845 andi. r0,r0,_TIF_NEED_RESCHED
846 beq+ restore 882 beq+ restore
883 lwz r3,_MSR(r1)
847 andi. r0,r3,MSR_EE /* interrupts off? */ 884 andi. r0,r3,MSR_EE /* interrupts off? */
848 beq restore /* don't schedule if so */ 885 beq restore /* don't schedule if so */
849#ifdef CONFIG_TRACE_IRQFLAGS 886#ifdef CONFIG_TRACE_IRQFLAGS
@@ -864,8 +901,6 @@ resume_kernel:
864 */ 901 */
865 bl trace_hardirqs_on 902 bl trace_hardirqs_on
866#endif 903#endif
867#else
868resume_kernel:
869#endif /* CONFIG_PREEMPT */ 904#endif /* CONFIG_PREEMPT */
870 905
871 /* interrupts are hard-disabled at this point */ 906 /* interrupts are hard-disabled at this point */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b40e0b4815b3..0e931aaffca2 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -593,6 +593,41 @@ _GLOBAL(ret_from_except_lite)
593 b .ret_from_except 593 b .ret_from_except
594 594
595resume_kernel: 595resume_kernel:
596 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
597 CURRENT_THREAD_INFO(r9, r1)
598 ld r8,TI_FLAGS(r9)
599 andis. r8,r8,_TIF_EMULATE_STACK_STORE@h
600 beq+ 1f
601
602 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
603
604 lwz r3,GPR1(r1)
605 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
606 mr r4,r1 /* src: current exception frame */
607 mr r1,r3 /* Reroute the trampoline frame to r1 */
608
609 /* Copy from the original to the trampoline. */
610 li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
611 li r6,0 /* start offset: 0 */
612 mtctr r5
6132: ldx r0,r6,r4
614 stdx r0,r6,r3
615 addi r6,r6,8
616 bdnz 2b
617
618 /* Do real store operation to complete stwu */
619 lwz r5,GPR1(r1)
620 std r8,0(r5)
621
622 /* Clear _TIF_EMULATE_STACK_STORE flag */
623 lis r11,_TIF_EMULATE_STACK_STORE@h
624 addi r5,r9,TI_FLAGS
625 ldarx r4,0,r5
626 andc r4,r4,r11
627 stdcx. r4,0,r5
628 bne- 0b
6291:
630
596#ifdef CONFIG_PREEMPT 631#ifdef CONFIG_PREEMPT
597 /* Check if we need to preempt */ 632 /* Check if we need to preempt */
598 andi. r0,r4,_TIF_NEED_RESCHED 633 andi. r0,r4,_TIF_NEED_RESCHED
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 98be7f0cd227..4684e33a26c3 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -25,6 +25,8 @@
25#include <asm/ppc-opcode.h> 25#include <asm/ppc-opcode.h>
26#include <asm/mmu.h> 26#include <asm/mmu.h>
27#include <asm/hw_irq.h> 27#include <asm/hw_irq.h>
28#include <asm/kvm_asm.h>
29#include <asm/kvm_booke_hv_asm.h>
28 30
29/* XXX This will ultimately add space for a special exception save 31/* XXX This will ultimately add space for a special exception save
30 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... 32 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -35,16 +37,18 @@
35#define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE 37#define SPECIAL_EXC_FRAME_SIZE INT_FRAME_SIZE
36 38
37/* Exception prolog code for all exceptions */ 39/* Exception prolog code for all exceptions */
38#define EXCEPTION_PROLOG(n, type, addition) \ 40#define EXCEPTION_PROLOG(n, intnum, type, addition) \
39 mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \ 41 mtspr SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */ \
40 mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \ 42 mfspr r13,SPRN_SPRG_PACA; /* get PACA */ \
41 std r10,PACA_EX##type+EX_R10(r13); \ 43 std r10,PACA_EX##type+EX_R10(r13); \
42 std r11,PACA_EX##type+EX_R11(r13); \ 44 std r11,PACA_EX##type+EX_R11(r13); \
45 PROLOG_STORE_RESTORE_SCRATCH_##type; \
43 mfcr r10; /* save CR */ \ 46 mfcr r10; /* save CR */ \
47 mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
48 DO_KVM intnum,SPRN_##type##_SRR1; /* KVM hook */ \
49 stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
44 addition; /* additional code for that exc. */ \ 50 addition; /* additional code for that exc. */ \
45 std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \ 51 std r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */ \
46 stw r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
47 mfspr r11,SPRN_##type##_SRR1;/* what are we coming from */ \
48 type##_SET_KSTACK; /* get special stack if necessary */\ 52 type##_SET_KSTACK; /* get special stack if necessary */\
49 andi. r10,r11,MSR_PR; /* save stack pointer */ \ 53 andi. r10,r11,MSR_PR; /* save stack pointer */ \
50 beq 1f; /* branch around if supervisor */ \ 54 beq 1f; /* branch around if supervisor */ \
@@ -59,6 +63,10 @@
59#define SPRN_GEN_SRR0 SPRN_SRR0 63#define SPRN_GEN_SRR0 SPRN_SRR0
60#define SPRN_GEN_SRR1 SPRN_SRR1 64#define SPRN_GEN_SRR1 SPRN_SRR1
61 65
66#define GDBELL_SET_KSTACK GEN_SET_KSTACK
67#define SPRN_GDBELL_SRR0 SPRN_GSRR0
68#define SPRN_GDBELL_SRR1 SPRN_GSRR1
69
62#define CRIT_SET_KSTACK \ 70#define CRIT_SET_KSTACK \
63 ld r1,PACA_CRIT_STACK(r13); \ 71 ld r1,PACA_CRIT_STACK(r13); \
64 subi r1,r1,SPECIAL_EXC_FRAME_SIZE; 72 subi r1,r1,SPECIAL_EXC_FRAME_SIZE;
@@ -77,29 +85,46 @@
77#define SPRN_MC_SRR0 SPRN_MCSRR0 85#define SPRN_MC_SRR0 SPRN_MCSRR0
78#define SPRN_MC_SRR1 SPRN_MCSRR1 86#define SPRN_MC_SRR1 SPRN_MCSRR1
79 87
80#define NORMAL_EXCEPTION_PROLOG(n, addition) \ 88#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
81 EXCEPTION_PROLOG(n, GEN, addition##_GEN(n)) 89 EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
90
91#define CRIT_EXCEPTION_PROLOG(n, intnum, addition) \
92 EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
82 93
83#define CRIT_EXCEPTION_PROLOG(n, addition) \ 94#define DBG_EXCEPTION_PROLOG(n, intnum, addition) \
84 EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n)) 95 EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
85 96
86#define DBG_EXCEPTION_PROLOG(n, addition) \ 97#define MC_EXCEPTION_PROLOG(n, intnum, addition) \
87 EXCEPTION_PROLOG(n, DBG, addition##_DBG(n)) 98 EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
88 99
89#define MC_EXCEPTION_PROLOG(n, addition) \ 100#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition) \
90 EXCEPTION_PROLOG(n, MC, addition##_MC(n)) 101 EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
91 102
103/*
104 * Store user-visible scratch in PACA exception slots and restore proper value
105 */
106#define PROLOG_STORE_RESTORE_SCRATCH_GEN
107#define PROLOG_STORE_RESTORE_SCRATCH_GDBELL
108#define PROLOG_STORE_RESTORE_SCRATCH_DBG
109#define PROLOG_STORE_RESTORE_SCRATCH_MC
110
111#define PROLOG_STORE_RESTORE_SCRATCH_CRIT \
112 mfspr r10,SPRN_SPRG_CRIT_SCRATCH; /* get r13 */ \
113 std r10,PACA_EXCRIT+EX_R13(r13); \
114 ld r11,PACA_SPRG3(r13); \
115 mtspr SPRN_SPRG_CRIT_SCRATCH,r11;
92 116
93/* Variants of the "addition" argument for the prolog 117/* Variants of the "addition" argument for the prolog
94 */ 118 */
95#define PROLOG_ADDITION_NONE_GEN(n) 119#define PROLOG_ADDITION_NONE_GEN(n)
120#define PROLOG_ADDITION_NONE_GDBELL(n)
96#define PROLOG_ADDITION_NONE_CRIT(n) 121#define PROLOG_ADDITION_NONE_CRIT(n)
97#define PROLOG_ADDITION_NONE_DBG(n) 122#define PROLOG_ADDITION_NONE_DBG(n)
98#define PROLOG_ADDITION_NONE_MC(n) 123#define PROLOG_ADDITION_NONE_MC(n)
99 124
100#define PROLOG_ADDITION_MASKABLE_GEN(n) \ 125#define PROLOG_ADDITION_MASKABLE_GEN(n) \
101 lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ 126 lbz r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
102 cmpwi cr0,r11,0; /* yes -> go out of line */ \ 127 cmpwi cr0,r10,0; /* yes -> go out of line */ \
103 beq masked_interrupt_book3e_##n 128 beq masked_interrupt_book3e_##n
104 129
105#define PROLOG_ADDITION_2REGS_GEN(n) \ 130#define PROLOG_ADDITION_2REGS_GEN(n) \
@@ -233,9 +258,9 @@ exc_##n##_bad_stack: \
2331: 2581:
234 259
235 260
236#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ 261#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack) \
237 START_EXCEPTION(label); \ 262 START_EXCEPTION(label); \
238 NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ 263 NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
239 EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \ 264 EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \
240 ack(r8); \ 265 ack(r8); \
241 CHECK_NAPPING(); \ 266 CHECK_NAPPING(); \
@@ -286,7 +311,8 @@ interrupt_end_book3e:
286 311
287/* Critical Input Interrupt */ 312/* Critical Input Interrupt */
288 START_EXCEPTION(critical_input); 313 START_EXCEPTION(critical_input);
289 CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) 314 CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
315 PROLOG_ADDITION_NONE)
290// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE) 316// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE)
291// bl special_reg_save_crit 317// bl special_reg_save_crit
292// CHECK_NAPPING(); 318// CHECK_NAPPING();
@@ -297,7 +323,8 @@ interrupt_end_book3e:
297 323
298/* Machine Check Interrupt */ 324/* Machine Check Interrupt */
299 START_EXCEPTION(machine_check); 325 START_EXCEPTION(machine_check);
300 CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) 326 MC_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_MACHINE_CHECK,
327 PROLOG_ADDITION_NONE)
301// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE) 328// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE)
302// bl special_reg_save_mc 329// bl special_reg_save_mc
303// addi r3,r1,STACK_FRAME_OVERHEAD 330// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -308,7 +335,8 @@ interrupt_end_book3e:
308 335
309/* Data Storage Interrupt */ 336/* Data Storage Interrupt */
310 START_EXCEPTION(data_storage) 337 START_EXCEPTION(data_storage)
311 NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) 338 NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
339 PROLOG_ADDITION_2REGS)
312 mfspr r14,SPRN_DEAR 340 mfspr r14,SPRN_DEAR
313 mfspr r15,SPRN_ESR 341 mfspr r15,SPRN_ESR
314 EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE) 342 EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE)
@@ -316,18 +344,21 @@ interrupt_end_book3e:
316 344
317/* Instruction Storage Interrupt */ 345/* Instruction Storage Interrupt */
318 START_EXCEPTION(instruction_storage); 346 START_EXCEPTION(instruction_storage);
319 NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) 347 NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
348 PROLOG_ADDITION_2REGS)
320 li r15,0 349 li r15,0
321 mr r14,r10 350 mr r14,r10
322 EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE) 351 EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE)
323 b storage_fault_common 352 b storage_fault_common
324 353
325/* External Input Interrupt */ 354/* External Input Interrupt */
326 MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE) 355 MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
356 external_input, .do_IRQ, ACK_NONE)
327 357
328/* Alignment */ 358/* Alignment */
329 START_EXCEPTION(alignment); 359 START_EXCEPTION(alignment);
330 NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS) 360 NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
361 PROLOG_ADDITION_2REGS)
331 mfspr r14,SPRN_DEAR 362 mfspr r14,SPRN_DEAR
332 mfspr r15,SPRN_ESR 363 mfspr r15,SPRN_ESR
333 EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP) 364 EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP)
@@ -335,7 +366,8 @@ interrupt_end_book3e:
335 366
336/* Program Interrupt */ 367/* Program Interrupt */
337 START_EXCEPTION(program); 368 START_EXCEPTION(program);
338 NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) 369 NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
370 PROLOG_ADDITION_1REG)
339 mfspr r14,SPRN_ESR 371 mfspr r14,SPRN_ESR
340 EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE) 372 EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE)
341 std r14,_DSISR(r1) 373 std r14,_DSISR(r1)
@@ -347,7 +379,8 @@ interrupt_end_book3e:
347 379
348/* Floating Point Unavailable Interrupt */ 380/* Floating Point Unavailable Interrupt */
349 START_EXCEPTION(fp_unavailable); 381 START_EXCEPTION(fp_unavailable);
350 NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE) 382 NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
383 PROLOG_ADDITION_NONE)
351 /* we can probably do a shorter exception entry for that one... */ 384 /* we can probably do a shorter exception entry for that one... */
352 EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) 385 EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP)
353 ld r12,_MSR(r1) 386 ld r12,_MSR(r1)
@@ -362,14 +395,17 @@ interrupt_end_book3e:
362 b .ret_from_except 395 b .ret_from_except
363 396
364/* Decrementer Interrupt */ 397/* Decrementer Interrupt */
365 MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC) 398 MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
399 decrementer, .timer_interrupt, ACK_DEC)
366 400
367/* Fixed Interval Timer Interrupt */ 401/* Fixed Interval Timer Interrupt */
368 MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT) 402 MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
403 fixed_interval, .unknown_exception, ACK_FIT)
369 404
370/* Watchdog Timer Interrupt */ 405/* Watchdog Timer Interrupt */
371 START_EXCEPTION(watchdog); 406 START_EXCEPTION(watchdog);
372 CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) 407 CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
408 PROLOG_ADDITION_NONE)
373// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE) 409// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE)
374// bl special_reg_save_crit 410// bl special_reg_save_crit
375// CHECK_NAPPING(); 411// CHECK_NAPPING();
@@ -388,7 +424,8 @@ interrupt_end_book3e:
388 424
389/* Auxiliary Processor Unavailable Interrupt */ 425/* Auxiliary Processor Unavailable Interrupt */
390 START_EXCEPTION(ap_unavailable); 426 START_EXCEPTION(ap_unavailable);
391 NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) 427 NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
428 PROLOG_ADDITION_NONE)
392 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE) 429 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE)
393 bl .save_nvgprs 430 bl .save_nvgprs
394 addi r3,r1,STACK_FRAME_OVERHEAD 431 addi r3,r1,STACK_FRAME_OVERHEAD
@@ -397,7 +434,8 @@ interrupt_end_book3e:
397 434
398/* Debug exception as a critical interrupt*/ 435/* Debug exception as a critical interrupt*/
399 START_EXCEPTION(debug_crit); 436 START_EXCEPTION(debug_crit);
400 CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) 437 CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
438 PROLOG_ADDITION_2REGS)
401 439
402 /* 440 /*
403 * If there is a single step or branch-taken exception in an 441 * If there is a single step or branch-taken exception in an
@@ -431,7 +469,7 @@ interrupt_end_book3e:
431 mtcr r10 469 mtcr r10
432 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */ 470 ld r10,PACA_EXCRIT+EX_R10(r13) /* restore registers */
433 ld r11,PACA_EXCRIT+EX_R11(r13) 471 ld r11,PACA_EXCRIT+EX_R11(r13)
434 mfspr r13,SPRN_SPRG_CRIT_SCRATCH 472 ld r13,PACA_EXCRIT+EX_R13(r13)
435 rfci 473 rfci
436 474
437 /* Normal debug exception */ 475 /* Normal debug exception */
@@ -444,7 +482,7 @@ interrupt_end_book3e:
444 /* Now we mash up things to make it look like we are coming on a 482 /* Now we mash up things to make it look like we are coming on a
445 * normal exception 483 * normal exception
446 */ 484 */
447 mfspr r15,SPRN_SPRG_CRIT_SCRATCH 485 ld r15,PACA_EXCRIT+EX_R13(r13)
448 mtspr SPRN_SPRG_GEN_SCRATCH,r15 486 mtspr SPRN_SPRG_GEN_SCRATCH,r15
449 mfspr r14,SPRN_DBSR 487 mfspr r14,SPRN_DBSR
450 EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE) 488 EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE)
@@ -462,7 +500,8 @@ kernel_dbg_exc:
462 500
463/* Debug exception as a debug interrupt*/ 501/* Debug exception as a debug interrupt*/
464 START_EXCEPTION(debug_debug); 502 START_EXCEPTION(debug_debug);
465 DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS) 503 DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
504 PROLOG_ADDITION_2REGS)
466 505
467 /* 506 /*
468 * If there is a single step or branch-taken exception in an 507 * If there is a single step or branch-taken exception in an
@@ -523,18 +562,21 @@ kernel_dbg_exc:
523 b .ret_from_except 562 b .ret_from_except
524 563
525 START_EXCEPTION(perfmon); 564 START_EXCEPTION(perfmon);
526 NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE) 565 NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
566 PROLOG_ADDITION_NONE)
527 EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE) 567 EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE)
528 addi r3,r1,STACK_FRAME_OVERHEAD 568 addi r3,r1,STACK_FRAME_OVERHEAD
529 bl .performance_monitor_exception 569 bl .performance_monitor_exception
530 b .ret_from_except_lite 570 b .ret_from_except_lite
531 571
532/* Doorbell interrupt */ 572/* Doorbell interrupt */
533 MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE) 573 MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
574 doorbell, .doorbell_exception, ACK_NONE)
534 575
535/* Doorbell critical Interrupt */ 576/* Doorbell critical Interrupt */
536 START_EXCEPTION(doorbell_crit); 577 START_EXCEPTION(doorbell_crit);
537 CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE) 578 CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
579 PROLOG_ADDITION_NONE)
538// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE) 580// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE)
539// bl special_reg_save_crit 581// bl special_reg_save_crit
540// CHECK_NAPPING(); 582// CHECK_NAPPING();
@@ -543,12 +585,24 @@ kernel_dbg_exc:
543// b ret_from_crit_except 585// b ret_from_crit_except
544 b . 586 b .
545 587
546/* Guest Doorbell */ 588/*
547 MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) 589 * Guest doorbell interrupt
590 * This general exception use GSRRx save/restore registers
591 */
592 START_EXCEPTION(guest_doorbell);
593 GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
594 PROLOG_ADDITION_NONE)
595 EXCEPTION_COMMON(0x2c0, PACA_EXGEN, INTS_KEEP)
596 addi r3,r1,STACK_FRAME_OVERHEAD
597 bl .save_nvgprs
598 INTS_RESTORE_HARD
599 bl .unknown_exception
600 b .ret_from_except
548 601
549/* Guest Doorbell critical Interrupt */ 602/* Guest Doorbell critical Interrupt */
550 START_EXCEPTION(guest_doorbell_crit); 603 START_EXCEPTION(guest_doorbell_crit);
551 CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE) 604 CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
605 PROLOG_ADDITION_NONE)
552// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE) 606// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE)
553// bl special_reg_save_crit 607// bl special_reg_save_crit
554// CHECK_NAPPING(); 608// CHECK_NAPPING();
@@ -559,7 +613,8 @@ kernel_dbg_exc:
559 613
560/* Hypervisor call */ 614/* Hypervisor call */
561 START_EXCEPTION(hypercall); 615 START_EXCEPTION(hypercall);
562 NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE) 616 NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
617 PROLOG_ADDITION_NONE)
563 EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP) 618 EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP)
564 addi r3,r1,STACK_FRAME_OVERHEAD 619 addi r3,r1,STACK_FRAME_OVERHEAD
565 bl .save_nvgprs 620 bl .save_nvgprs
@@ -569,7 +624,8 @@ kernel_dbg_exc:
569 624
570/* Embedded Hypervisor priviledged */ 625/* Embedded Hypervisor priviledged */
571 START_EXCEPTION(ehpriv); 626 START_EXCEPTION(ehpriv);
572 NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE) 627 NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
628 PROLOG_ADDITION_NONE)
573 EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP) 629 EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP)
574 addi r3,r1,STACK_FRAME_OVERHEAD 630 addi r3,r1,STACK_FRAME_OVERHEAD
575 bl .save_nvgprs 631 bl .save_nvgprs
@@ -582,44 +638,42 @@ kernel_dbg_exc:
582 * accordingly and if the interrupt is level sensitive, we hard disable 638 * accordingly and if the interrupt is level sensitive, we hard disable
583 */ 639 */
584 640
641.macro masked_interrupt_book3e paca_irq full_mask
642 lbz r10,PACAIRQHAPPENED(r13)
643 ori r10,r10,\paca_irq
644 stb r10,PACAIRQHAPPENED(r13)
645
646 .if \full_mask == 1
647 rldicl r10,r11,48,1 /* clear MSR_EE */
648 rotldi r11,r10,16
649 mtspr SPRN_SRR1,r11
650 .endif
651
652 lwz r11,PACA_EXGEN+EX_CR(r13)
653 mtcr r11
654 ld r10,PACA_EXGEN+EX_R10(r13)
655 ld r11,PACA_EXGEN+EX_R11(r13)
656 mfspr r13,SPRN_SPRG_GEN_SCRATCH
657 rfi
658 b .
659.endm
660
585masked_interrupt_book3e_0x500: 661masked_interrupt_book3e_0x500:
586 /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */ 662 // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE
587 li r11,PACA_IRQ_EE 663 masked_interrupt_book3e PACA_IRQ_EE 1
588 b masked_interrupt_book3e_full_mask
589 664
590masked_interrupt_book3e_0x900: 665masked_interrupt_book3e_0x900:
591 ACK_DEC(r11); 666 ACK_DEC(r10);
592 li r11,PACA_IRQ_DEC 667 masked_interrupt_book3e PACA_IRQ_DEC 0
593 b masked_interrupt_book3e_no_mask 668
594masked_interrupt_book3e_0x980: 669masked_interrupt_book3e_0x980:
595 ACK_FIT(r11); 670 ACK_FIT(r10);
596 li r11,PACA_IRQ_DEC 671 masked_interrupt_book3e PACA_IRQ_DEC 0
597 b masked_interrupt_book3e_no_mask 672
598masked_interrupt_book3e_0x280: 673masked_interrupt_book3e_0x280:
599masked_interrupt_book3e_0x2c0: 674masked_interrupt_book3e_0x2c0:
600 li r11,PACA_IRQ_DBELL 675 masked_interrupt_book3e PACA_IRQ_DBELL 0
601 b masked_interrupt_book3e_no_mask
602 676
603masked_interrupt_book3e_no_mask:
604 mtcr r10
605 lbz r10,PACAIRQHAPPENED(r13)
606 or r10,r10,r11
607 stb r10,PACAIRQHAPPENED(r13)
608 b 1f
609masked_interrupt_book3e_full_mask:
610 mtcr r10
611 lbz r10,PACAIRQHAPPENED(r13)
612 or r10,r10,r11
613 stb r10,PACAIRQHAPPENED(r13)
614 mfspr r10,SPRN_SRR1
615 rldicl r11,r10,48,1 /* clear MSR_EE */
616 rotldi r10,r11,16
617 mtspr SPRN_SRR1,r10
6181: ld r10,PACA_EXGEN+EX_R10(r13);
619 ld r11,PACA_EXGEN+EX_R11(r13);
620 mfspr r13,SPRN_SPRG_GEN_SCRATCH;
621 rfi
622 b .
623/* 677/*
624 * Called from arch_local_irq_enable when an interrupt needs 678 * Called from arch_local_irq_enable when an interrupt needs
625 * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280 679 * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
@@ -1302,25 +1356,11 @@ _GLOBAL(setup_perfmon_ivor)
1302_GLOBAL(setup_doorbell_ivors) 1356_GLOBAL(setup_doorbell_ivors)
1303 SET_IVOR(36, 0x280) /* Processor Doorbell */ 1357 SET_IVOR(36, 0x280) /* Processor Doorbell */
1304 SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ 1358 SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
1305
1306 /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */
1307 mfspr r10,SPRN_MMUCFG
1308 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
1309 beqlr
1310
1311 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1312 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1313 blr 1359 blr
1314 1360
1315_GLOBAL(setup_ehv_ivors) 1361_GLOBAL(setup_ehv_ivors)
1316 /*
1317 * We may be running as a guest and lack E.HV even on a chip
1318 * that normally has it.
1319 */
1320 mfspr r10,SPRN_MMUCFG
1321 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
1322 beqlr
1323
1324 SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ 1362 SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
1325 SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ 1363 SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
1364 SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1365 SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1326 blr 1366 blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 39aa97d3ff88..10b658ad65e1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -275,6 +275,31 @@ vsx_unavailable_pSeries_1:
275 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 275 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
276 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 276 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
277 277
278 . = 0x1500
279 .global denorm_Hypervisor
280denorm_exception_hv:
281 HMT_MEDIUM
282 mtspr SPRN_SPRG_HSCRATCH0,r13
283 mfspr r13,SPRN_SPRG_HPACA
284 std r9,PACA_EXGEN+EX_R9(r13)
285 std r10,PACA_EXGEN+EX_R10(r13)
286 std r11,PACA_EXGEN+EX_R11(r13)
287 std r12,PACA_EXGEN+EX_R12(r13)
288 mfspr r9,SPRN_SPRG_HSCRATCH0
289 std r9,PACA_EXGEN+EX_R13(r13)
290 mfcr r9
291
292#ifdef CONFIG_PPC_DENORMALISATION
293 mfspr r10,SPRN_HSRR1
294 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
295 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
296 addi r11,r11,-4 /* HSRR0 is next instruction */
297 bne+ denorm_assist
298#endif
299
300 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
301 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
302
278#ifdef CONFIG_CBE_RAS 303#ifdef CONFIG_CBE_RAS
279 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 304 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
280 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 305 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
@@ -336,6 +361,103 @@ do_stab_bolted_pSeries:
336 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 361 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
337 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 362 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
338 363
364#ifdef CONFIG_PPC_DENORMALISATION
365denorm_assist:
366BEGIN_FTR_SECTION
367/*
368 * To denormalise we need to move a copy of the register to itself.
369 * For POWER6 do that here for all FP regs.
370 */
371 mfmsr r10
372 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
373 xori r10,r10,(MSR_FE0|MSR_FE1)
374 mtmsrd r10
375 sync
376 fmr 0,0
377 fmr 1,1
378 fmr 2,2
379 fmr 3,3
380 fmr 4,4
381 fmr 5,5
382 fmr 6,6
383 fmr 7,7
384 fmr 8,8
385 fmr 9,9
386 fmr 10,10
387 fmr 11,11
388 fmr 12,12
389 fmr 13,13
390 fmr 14,14
391 fmr 15,15
392 fmr 16,16
393 fmr 17,17
394 fmr 18,18
395 fmr 19,19
396 fmr 20,20
397 fmr 21,21
398 fmr 22,22
399 fmr 23,23
400 fmr 24,24
401 fmr 25,25
402 fmr 26,26
403 fmr 27,27
404 fmr 28,28
405 fmr 29,29
406 fmr 30,30
407 fmr 31,31
408FTR_SECTION_ELSE
409/*
410 * To denormalise we need to move a copy of the register to itself.
411 * For POWER7 do that here for the first 32 VSX registers only.
412 */
413 mfmsr r10
414 oris r10,r10,MSR_VSX@h
415 mtmsrd r10
416 sync
417 XVCPSGNDP(0,0,0)
418 XVCPSGNDP(1,1,1)
419 XVCPSGNDP(2,2,2)
420 XVCPSGNDP(3,3,3)
421 XVCPSGNDP(4,4,4)
422 XVCPSGNDP(5,5,5)
423 XVCPSGNDP(6,6,6)
424 XVCPSGNDP(7,7,7)
425 XVCPSGNDP(8,8,8)
426 XVCPSGNDP(9,9,9)
427 XVCPSGNDP(10,10,10)
428 XVCPSGNDP(11,11,11)
429 XVCPSGNDP(12,12,12)
430 XVCPSGNDP(13,13,13)
431 XVCPSGNDP(14,14,14)
432 XVCPSGNDP(15,15,15)
433 XVCPSGNDP(16,16,16)
434 XVCPSGNDP(17,17,17)
435 XVCPSGNDP(18,18,18)
436 XVCPSGNDP(19,19,19)
437 XVCPSGNDP(20,20,20)
438 XVCPSGNDP(21,21,21)
439 XVCPSGNDP(22,22,22)
440 XVCPSGNDP(23,23,23)
441 XVCPSGNDP(24,24,24)
442 XVCPSGNDP(25,25,25)
443 XVCPSGNDP(26,26,26)
444 XVCPSGNDP(27,27,27)
445 XVCPSGNDP(28,28,28)
446 XVCPSGNDP(29,29,29)
447 XVCPSGNDP(30,30,30)
448 XVCPSGNDP(31,31,31)
449ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
450 mtspr SPRN_HSRR0,r11
451 mtcrf 0x80,r9
452 ld r9,PACA_EXGEN+EX_R9(r13)
453 ld r10,PACA_EXGEN+EX_R10(r13)
454 ld r11,PACA_EXGEN+EX_R11(r13)
455 ld r12,PACA_EXGEN+EX_R12(r13)
456 ld r13,PACA_EXGEN+EX_R13(r13)
457 HRFID
458 b .
459#endif
460
339 .align 7 461 .align 7
340 /* moved from 0xe00 */ 462 /* moved from 0xe00 */
341 STD_EXCEPTION_HV(., 0xe02, h_data_storage) 463 STD_EXCEPTION_HV(., 0xe02, h_data_storage)
@@ -495,6 +617,7 @@ machine_check_common:
495 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 617 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
496 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) 618 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
497 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 619 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
620 STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception)
498#ifdef CONFIG_ALTIVEC 621#ifdef CONFIG_ALTIVEC
499 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 622 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
500#else 623#else
@@ -960,7 +1083,9 @@ _GLOBAL(do_stab_bolted)
960 rldimi r10,r11,7,52 /* r10 = first ste of the group */ 1083 rldimi r10,r11,7,52 /* r10 = first ste of the group */
961 1084
962 /* Calculate VSID */ 1085 /* Calculate VSID */
963 /* This is a kernel address, so protovsid = ESID */ 1086 /* This is a kernel address, so protovsid = ESID | 1 << 37 */
1087 li r9,0x1
1088 rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
964 ASM_VSID_SCRAMBLE(r11, r9, 256M) 1089 ASM_VSID_SCRAMBLE(r11, r9, 256M)
965 rldic r9,r11,12,16 /* r9 = vsid << 12 */ 1090 rldic r9,r11,12,16 /* r9 = vsid << 12 */
966 1091
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 18bdf74fa164..06c8202a69cf 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -289,8 +289,7 @@ int __init fadump_reserve_mem(void)
289 else 289 else
290 memory_limit = memblock_end_of_DRAM(); 290 memory_limit = memblock_end_of_DRAM();
291 printk(KERN_INFO "Adjusted memory_limit for firmware-assisted" 291 printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
292 " dump, now %#016llx\n", 292 " dump, now %#016llx\n", memory_limit);
293 (unsigned long long)memory_limit);
294 } 293 }
295 if (memory_limit) 294 if (memory_limit)
296 memory_boundary = memory_limit; 295 memory_boundary = memory_limit;
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 0f59863c3ade..6f62a737f607 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -895,15 +895,11 @@ _GLOBAL(__setup_e500mc_ivors)
895 mtspr SPRN_IVOR36,r3 895 mtspr SPRN_IVOR36,r3
896 li r3,CriticalDoorbell@l 896 li r3,CriticalDoorbell@l
897 mtspr SPRN_IVOR37,r3 897 mtspr SPRN_IVOR37,r3
898 sync
899 blr
898 900
899 /* 901/* setup ehv ivors for */
900 * We only want to touch IVOR38-41 if we're running on hardware 902_GLOBAL(__setup_ehv_ivors)
901 * that supports category E.HV. The architectural way to determine
902 * this is MMUCFG[LPIDSIZE].
903 */
904 mfspr r3, SPRN_MMUCFG
905 andis. r3, r3, MMUCFG_LPIDSIZE@h
906 beq no_hv
907 li r3,GuestDoorbell@l 903 li r3,GuestDoorbell@l
908 mtspr SPRN_IVOR38,r3 904 mtspr SPRN_IVOR38,r3
909 li r3,CriticalGuestDoorbell@l 905 li r3,CriticalGuestDoorbell@l
@@ -912,14 +908,8 @@ _GLOBAL(__setup_e500mc_ivors)
912 mtspr SPRN_IVOR40,r3 908 mtspr SPRN_IVOR40,r3
913 li r3,Ehvpriv@l 909 li r3,Ehvpriv@l
914 mtspr SPRN_IVOR41,r3 910 mtspr SPRN_IVOR41,r3
915skip_hv_ivors:
916 sync 911 sync
917 blr 912 blr
918no_hv:
919 lwz r3, CPU_SPEC_FEATURES(r5)
920 rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
921 stw r3, CPU_SPEC_FEATURES(r5)
922 b skip_hv_ivors
923 913
924#ifdef CONFIG_SPE 914#ifdef CONFIG_SPE
925/* 915/*
@@ -1043,6 +1033,34 @@ _GLOBAL(flush_dcache_L1)
1043 1033
1044 blr 1034 blr
1045 1035
1036/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
1037_GLOBAL(__flush_disable_L1)
1038 mflr r10
1039 bl flush_dcache_L1 /* Flush L1 d-cache */
1040 mtlr r10
1041
1042 mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
1043 li r5, 2
1044 rlwimi r4, r5, 0, 3
1045
1046 msync
1047 isync
1048 mtspr SPRN_L1CSR0, r4
1049 isync
1050
10511: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
1052 andi. r4, r4, 2
1053 bne 1b
1054
1055 mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
1056 li r5, 2
1057 rlwimi r4, r5, 0, 3
1058
1059 mtspr SPRN_L1CSR1, r4
1060 isync
1061
1062 blr
1063
1046#ifdef CONFIG_SMP 1064#ifdef CONFIG_SMP
1047/* When we get here, r24 needs to hold the CPU # */ 1065/* When we get here, r24 needs to hold the CPU # */
1048 .globl __secondary_start 1066 .globl __secondary_start
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 956a4c496de9..a89cae481b04 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -73,7 +73,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
73 * If so, DABR will be populated in single_step_dabr_instruction(). 73 * If so, DABR will be populated in single_step_dabr_instruction().
74 */ 74 */
75 if (current->thread.last_hit_ubp != bp) 75 if (current->thread.last_hit_ubp != bp)
76 set_dabr(info->address | info->type | DABR_TRANSLATION); 76 set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
77 77
78 return 0; 78 return 0;
79} 79}
@@ -97,7 +97,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
97 } 97 }
98 98
99 *slot = NULL; 99 *slot = NULL;
100 set_dabr(0); 100 set_dabr(0, 0);
101} 101}
102 102
103/* 103/*
@@ -170,6 +170,13 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
170 170
171 info->address = bp->attr.bp_addr; 171 info->address = bp->attr.bp_addr;
172 info->len = bp->attr.bp_len; 172 info->len = bp->attr.bp_len;
173 info->dabrx = DABRX_ALL;
174 if (bp->attr.exclude_user)
175 info->dabrx &= ~DABRX_USER;
176 if (bp->attr.exclude_kernel)
177 info->dabrx &= ~DABRX_KERNEL;
178 if (bp->attr.exclude_hv)
179 info->dabrx &= ~DABRX_HYP;
173 180
174 /* 181 /*
175 * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8) 182 * Since breakpoint length can be a maximum of HW_BREAKPOINT_LEN(8)
@@ -197,7 +204,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
197 204
198 info = counter_arch_bp(tsk->thread.last_hit_ubp); 205 info = counter_arch_bp(tsk->thread.last_hit_ubp);
199 regs->msr &= ~MSR_SE; 206 regs->msr &= ~MSR_SE;
200 set_dabr(info->address | info->type | DABR_TRANSLATION); 207 set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
201 tsk->thread.last_hit_ubp = NULL; 208 tsk->thread.last_hit_ubp = NULL;
202} 209}
203 210
@@ -215,7 +222,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
215 unsigned long dar = regs->dar; 222 unsigned long dar = regs->dar;
216 223
217 /* Disable breakpoints during exception handling */ 224 /* Disable breakpoints during exception handling */
218 set_dabr(0); 225 set_dabr(0, 0);
219 226
220 /* 227 /*
221 * The counter may be concurrently released but that can only 228 * The counter may be concurrently released but that can only
@@ -281,7 +288,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
281 if (!info->extraneous_interrupt) 288 if (!info->extraneous_interrupt)
282 perf_bp_event(bp, regs); 289 perf_bp_event(bp, regs);
283 290
284 set_dabr(info->address | info->type | DABR_TRANSLATION); 291 set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
285out: 292out:
286 rcu_read_unlock(); 293 rcu_read_unlock();
287 return rc; 294 return rc;
@@ -294,7 +301,7 @@ int __kprobes single_step_dabr_instruction(struct die_args *args)
294{ 301{
295 struct pt_regs *regs = args->regs; 302 struct pt_regs *regs = args->regs;
296 struct perf_event *bp = NULL; 303 struct perf_event *bp = NULL;
297 struct arch_hw_breakpoint *bp_info; 304 struct arch_hw_breakpoint *info;
298 305
299 bp = current->thread.last_hit_ubp; 306 bp = current->thread.last_hit_ubp;
300 /* 307 /*
@@ -304,16 +311,16 @@ int __kprobes single_step_dabr_instruction(struct die_args *args)
304 if (!bp) 311 if (!bp)
305 return NOTIFY_DONE; 312 return NOTIFY_DONE;
306 313
307 bp_info = counter_arch_bp(bp); 314 info = counter_arch_bp(bp);
308 315
309 /* 316 /*
310 * We shall invoke the user-defined callback function in the single 317 * We shall invoke the user-defined callback function in the single
311 * stepping handler to confirm to 'trigger-after-execute' semantics 318 * stepping handler to confirm to 'trigger-after-execute' semantics
312 */ 319 */
313 if (!bp_info->extraneous_interrupt) 320 if (!info->extraneous_interrupt)
314 perf_bp_event(bp, regs); 321 perf_bp_event(bp, regs);
315 322
316 set_dabr(bp_info->address | bp_info->type | DABR_TRANSLATION); 323 set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
317 current->thread.last_hit_ubp = NULL; 324 current->thread.last_hit_ubp = NULL;
318 325
319 /* 326 /*
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index b01d14eeca8d..8220baa46faf 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -47,7 +47,6 @@
47#include <linux/stat.h> 47#include <linux/stat.h>
48#include <linux/of_platform.h> 48#include <linux/of_platform.h>
49#include <asm/ibmebus.h> 49#include <asm/ibmebus.h>
50#include <asm/abs_addr.h>
51 50
52static struct device ibmebus_bus_device = { /* fake "parent" device */ 51static struct device ibmebus_bus_device = { /* fake "parent" device */
53 .init_name = "ibmebus", 52 .init_name = "ibmebus",
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ff5a6ce027b8..8226c6cb348a 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -215,7 +215,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
215 spin_lock_irqsave(&(pool->lock), flags); 215 spin_lock_irqsave(&(pool->lock), flags);
216 216
217again: 217again:
218 if ((pass == 0) && handle && *handle) 218 if ((pass == 0) && handle && *handle &&
219 (*handle >= pool->start) && (*handle < pool->end))
219 start = *handle; 220 start = *handle;
220 else 221 else
221 start = pool->hint; 222 start = pool->hint;
@@ -236,7 +237,9 @@ again:
236 * but on second pass, start at 0 in pool 0. 237 * but on second pass, start at 0 in pool 0.
237 */ 238 */
238 if ((start & mask) >= limit || pass > 0) { 239 if ((start & mask) >= limit || pass > 0) {
240 spin_unlock(&(pool->lock));
239 pool = &(tbl->pools[0]); 241 pool = &(tbl->pools[0]);
242 spin_lock(&(pool->lock));
240 start = pool->start; 243 start = pool->start;
241 } else { 244 } else {
242 start &= mask; 245 start &= mask;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 1f017bb7a7ce..71413f41278f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -489,10 +489,10 @@ void do_IRQ(struct pt_regs *regs)
489 struct pt_regs *old_regs = set_irq_regs(regs); 489 struct pt_regs *old_regs = set_irq_regs(regs);
490 unsigned int irq; 490 unsigned int irq;
491 491
492 trace_irq_entry(regs);
493
494 irq_enter(); 492 irq_enter();
495 493
494 trace_irq_entry(regs);
495
496 check_stack_overflow(); 496 check_stack_overflow();
497 497
498 /* 498 /*
@@ -511,10 +511,10 @@ void do_IRQ(struct pt_regs *regs)
511 else 511 else
512 __get_cpu_var(irq_stat).spurious_irqs++; 512 __get_cpu_var(irq_stat).spurious_irqs++;
513 513
514 trace_irq_exit(regs);
515
514 irq_exit(); 516 irq_exit();
515 set_irq_regs(old_regs); 517 set_irq_regs(old_regs);
516
517 trace_irq_exit(regs);
518} 518}
519 519
520void __init init_IRQ(void) 520void __init init_IRQ(void)
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 5df777794403..fa9f6c72f557 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -165,7 +165,7 @@ void __init reserve_crashkernel(void)
165 if (memory_limit && memory_limit <= crashk_res.end) { 165 if (memory_limit && memory_limit <= crashk_res.end) {
166 memory_limit = crashk_res.end + 1; 166 memory_limit = crashk_res.end + 1;
167 printk("Adjusted memory limit for crashkernel, now 0x%llx\n", 167 printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
168 (unsigned long long)memory_limit); 168 memory_limit);
169 } 169 }
170 170
171 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 171 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
@@ -204,6 +204,12 @@ static struct property crashk_size_prop = {
204 .value = &crashk_size, 204 .value = &crashk_size,
205}; 205};
206 206
207static struct property memory_limit_prop = {
208 .name = "linux,memory-limit",
209 .length = sizeof(unsigned long long),
210 .value = &memory_limit,
211};
212
207static void __init export_crashk_values(struct device_node *node) 213static void __init export_crashk_values(struct device_node *node)
208{ 214{
209 struct property *prop; 215 struct property *prop;
@@ -223,6 +229,12 @@ static void __init export_crashk_values(struct device_node *node)
223 crashk_size = resource_size(&crashk_res); 229 crashk_size = resource_size(&crashk_res);
224 prom_add_property(node, &crashk_size_prop); 230 prom_add_property(node, &crashk_size_prop);
225 } 231 }
232
233 /*
234 * memory_limit is required by the kexec-tools to limit the
235 * crash regions to the actual memory used.
236 */
237 prom_update_property(node, &memory_limit_prop);
226} 238}
227 239
228static int __init kexec_setup(void) 240static int __init kexec_setup(void)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index fbe1a12dc7f1..cd6da855090c 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -142,6 +142,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
142 new_paca->hw_cpu_id = 0xffff; 142 new_paca->hw_cpu_id = 0xffff;
143 new_paca->kexec_state = KEXEC_STATE_NONE; 143 new_paca->kexec_state = KEXEC_STATE_NONE;
144 new_paca->__current = &init_task; 144 new_paca->__current = &init_task;
145 new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
145#ifdef CONFIG_PPC_STD_MMU_64 146#ifdef CONFIG_PPC_STD_MMU_64
146 new_paca->slb_shadow_ptr = &slb_shadow[cpu]; 147 new_paca->slb_shadow_ptr = &slb_shadow[cpu];
147#endif /* CONFIG_PPC_STD_MMU_64 */ 148#endif /* CONFIG_PPC_STD_MMU_64 */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 43fea543d686..7f94f760dd0c 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -980,13 +980,14 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
980 if (i >= 3 && bus->self->transparent) 980 if (i >= 3 && bus->self->transparent)
981 continue; 981 continue;
982 982
983 /* If we are going to re-assign everything, mark the resource 983 /* If we're going to reassign everything, we can
984 * as unset and move it down to 0 984 * shrink the P2P resource to have size as being
985 * of 0 in order to save space.
985 */ 986 */
986 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) { 987 if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
987 res->flags |= IORESOURCE_UNSET; 988 res->flags |= IORESOURCE_UNSET;
988 res->end -= res->start;
989 res->start = 0; 989 res->start = 0;
990 res->end = -1;
990 continue; 991 continue;
991 } 992 }
992 993
@@ -1248,7 +1249,14 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
1248 pr_warning("PCI: Cannot allocate resource region " 1249 pr_warning("PCI: Cannot allocate resource region "
1249 "%d of PCI bridge %d, will remap\n", i, bus->number); 1250 "%d of PCI bridge %d, will remap\n", i, bus->number);
1250 clear_resource: 1251 clear_resource:
1251 res->start = res->end = 0; 1252 /* The resource might be figured out when doing
1253 * reassignment based on the resources required
1254 * by the downstream PCI devices. Here we set
1255 * the size of the resource to be 0 in order to
1256 * save more space.
1257 */
1258 res->start = 0;
1259 res->end = -1;
1252 res->flags = 0; 1260 res->flags = 0;
1253 } 1261 }
1254 1262
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index e9cb51f5f801..d5ad666efd8b 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -258,6 +258,7 @@ void do_send_trap(struct pt_regs *regs, unsigned long address,
258{ 258{
259 siginfo_t info; 259 siginfo_t info;
260 260
261 current->thread.trap_nr = signal_code;
261 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 262 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
262 11, SIGSEGV) == NOTIFY_STOP) 263 11, SIGSEGV) == NOTIFY_STOP)
263 return; 264 return;
@@ -275,6 +276,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
275{ 276{
276 siginfo_t info; 277 siginfo_t info;
277 278
279 current->thread.trap_nr = TRAP_HWBKPT;
278 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 280 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
279 11, SIGSEGV) == NOTIFY_STOP) 281 11, SIGSEGV) == NOTIFY_STOP)
280 return; 282 return;
@@ -283,7 +285,7 @@ void do_dabr(struct pt_regs *regs, unsigned long address,
283 return; 285 return;
284 286
285 /* Clear the DABR */ 287 /* Clear the DABR */
286 set_dabr(0); 288 set_dabr(0, 0);
287 289
288 /* Deliver the signal to userspace */ 290 /* Deliver the signal to userspace */
289 info.si_signo = SIGTRAP; 291 info.si_signo = SIGTRAP;
@@ -364,18 +366,19 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
364{ 366{
365 if (thread->dabr) { 367 if (thread->dabr) {
366 thread->dabr = 0; 368 thread->dabr = 0;
367 set_dabr(0); 369 thread->dabrx = 0;
370 set_dabr(0, 0);
368 } 371 }
369} 372}
370#endif /* !CONFIG_HAVE_HW_BREAKPOINT */ 373#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
371#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 374#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
372 375
373int set_dabr(unsigned long dabr) 376int set_dabr(unsigned long dabr, unsigned long dabrx)
374{ 377{
375 __get_cpu_var(current_dabr) = dabr; 378 __get_cpu_var(current_dabr) = dabr;
376 379
377 if (ppc_md.set_dabr) 380 if (ppc_md.set_dabr)
378 return ppc_md.set_dabr(dabr); 381 return ppc_md.set_dabr(dabr, dabrx);
379 382
380 /* XXX should we have a CPU_FTR_HAS_DABR ? */ 383 /* XXX should we have a CPU_FTR_HAS_DABR ? */
381#ifdef CONFIG_PPC_ADV_DEBUG_REGS 384#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -385,9 +388,8 @@ int set_dabr(unsigned long dabr)
385#endif 388#endif
386#elif defined(CONFIG_PPC_BOOK3S) 389#elif defined(CONFIG_PPC_BOOK3S)
387 mtspr(SPRN_DABR, dabr); 390 mtspr(SPRN_DABR, dabr);
391 mtspr(SPRN_DABRX, dabrx);
388#endif 392#endif
389
390
391 return 0; 393 return 0;
392} 394}
393 395
@@ -480,7 +482,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
480 */ 482 */
481#ifndef CONFIG_HAVE_HW_BREAKPOINT 483#ifndef CONFIG_HAVE_HW_BREAKPOINT
482 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) 484 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
483 set_dabr(new->thread.dabr); 485 set_dabr(new->thread.dabr, new->thread.dabrx);
484#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 486#endif /* CONFIG_HAVE_HW_BREAKPOINT */
485#endif 487#endif
486 488
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f191bf02943a..37725e86651e 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -78,7 +78,7 @@ static int __init early_parse_mem(char *p)
78 return 1; 78 return 1;
79 79
80 memory_limit = PAGE_ALIGN(memparse(p, &p)); 80 memory_limit = PAGE_ALIGN(memparse(p, &p));
81 DBG("memory limit = 0x%llx\n", (unsigned long long)memory_limit); 81 DBG("memory limit = 0x%llx\n", memory_limit);
82 82
83 return 0; 83 return 0;
84} 84}
@@ -661,7 +661,7 @@ void __init early_init_devtree(void *params)
661 661
662 /* make sure we've parsed cmdline for mem= before this */ 662 /* make sure we've parsed cmdline for mem= before this */
663 if (memory_limit) 663 if (memory_limit)
664 first_memblock_size = min(first_memblock_size, memory_limit); 664 first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
665 setup_initial_memory_limit(memstart_addr, first_memblock_size); 665 setup_initial_memory_limit(memstart_addr, first_memblock_size);
666 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */ 666 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
667 memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 667 memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 47834a3f4938..cb6c123722a2 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1748,7 +1748,7 @@ static void __init prom_initialize_tce_table(void)
1748 * else will impact performance, so we always allocate 8MB. 1748 * else will impact performance, so we always allocate 8MB.
1749 * Anton 1749 * Anton
1750 */ 1750 */
1751 if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p)) 1751 if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p))
1752 minsize = 8UL << 20; 1752 minsize = 8UL << 20;
1753 else 1753 else
1754 minsize = 4UL << 20; 1754 minsize = 4UL << 20;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index c10fc28b9092..79d8e56470df 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -960,6 +960,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
960 thread->ptrace_bps[0] = bp; 960 thread->ptrace_bps[0] = bp;
961 ptrace_put_breakpoints(task); 961 ptrace_put_breakpoints(task);
962 thread->dabr = data; 962 thread->dabr = data;
963 thread->dabrx = DABRX_ALL;
963 return 0; 964 return 0;
964 } 965 }
965 966
@@ -983,6 +984,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
983 984
984 /* Move contents to the DABR register */ 985 /* Move contents to the DABR register */
985 task->thread.dabr = data; 986 task->thread.dabr = data;
987 task->thread.dabrx = DABRX_ALL;
986#else /* CONFIG_PPC_ADV_DEBUG_REGS */ 988#else /* CONFIG_PPC_ADV_DEBUG_REGS */
987 /* As described above, it was assumed 3 bits were passed with the data 989 /* As described above, it was assumed 3 bits were passed with the data
988 * address, but we will assume only the mode bits will be passed 990 * address, but we will assume only the mode bits will be passed
@@ -1397,6 +1399,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
1397 dabr |= DABR_DATA_WRITE; 1399 dabr |= DABR_DATA_WRITE;
1398 1400
1399 child->thread.dabr = dabr; 1401 child->thread.dabr = dabr;
1402 child->thread.dabrx = DABRX_ALL;
1400 1403
1401 return 1; 1404 return 1;
1402#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ 1405#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 2c0ee6405633..20b0120db0c3 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -21,7 +21,6 @@
21#include <asm/delay.h> 21#include <asm/delay.h>
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/rtas.h> 23#include <asm/rtas.h>
24#include <asm/abs_addr.h>
25 24
26#define MODULE_VERS "1.0" 25#define MODULE_VERS "1.0"
27#define MODULE_NAME "rtas_flash" 26#define MODULE_NAME "rtas_flash"
@@ -582,7 +581,7 @@ static void rtas_flash_firmware(int reboot_type)
582 flist = (struct flash_block_list *)&rtas_data_buf[0]; 581 flist = (struct flash_block_list *)&rtas_data_buf[0];
583 flist->num_blocks = 0; 582 flist->num_blocks = 0;
584 flist->next = rtas_firmware_flash_list; 583 flist->next = rtas_firmware_flash_list;
585 rtas_block_list = virt_to_abs(flist); 584 rtas_block_list = __pa(flist);
586 if (rtas_block_list >= 4UL*1024*1024*1024) { 585 if (rtas_block_list >= 4UL*1024*1024*1024) {
587 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n"); 586 printk(KERN_ALERT "FLASH: kernel bug...flash list header addr above 4GB\n");
588 spin_unlock(&rtas_data_buf_lock); 587 spin_unlock(&rtas_data_buf_lock);
@@ -596,13 +595,13 @@ static void rtas_flash_firmware(int reboot_type)
596 for (f = flist; f; f = next) { 595 for (f = flist; f; f = next) {
597 /* Translate data addrs to absolute */ 596 /* Translate data addrs to absolute */
598 for (i = 0; i < f->num_blocks; i++) { 597 for (i = 0; i < f->num_blocks; i++) {
599 f->blocks[i].data = (char *)virt_to_abs(f->blocks[i].data); 598 f->blocks[i].data = (char *)__pa(f->blocks[i].data);
600 image_size += f->blocks[i].length; 599 image_size += f->blocks[i].length;
601 } 600 }
602 next = f->next; 601 next = f->next;
603 /* Don't translate NULL pointer for last entry */ 602 /* Don't translate NULL pointer for last entry */
604 if (f->next) 603 if (f->next)
605 f->next = (struct flash_block_list *)virt_to_abs(f->next); 604 f->next = (struct flash_block_list *)__pa(f->next);
606 else 605 else
607 f->next = NULL; 606 f->next = NULL;
608 /* make num_blocks into the version/length field */ 607 /* make num_blocks into the version/length field */
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 179af906dcda..6de63e3250bb 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -81,7 +81,7 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
81 return PCIBIOS_DEVICE_NOT_FOUND; 81 return PCIBIOS_DEVICE_NOT_FOUND;
82 82
83 if (returnval == EEH_IO_ERROR_VALUE(size) && 83 if (returnval == EEH_IO_ERROR_VALUE(size) &&
84 eeh_dn_check_failure (pdn->node, NULL)) 84 eeh_dev_check_failure(of_node_to_eeh_dev(pdn->node)))
85 return PCIBIOS_DEVICE_NOT_FOUND; 85 return PCIBIOS_DEVICE_NOT_FOUND;
86 86
87 return PCIBIOS_SUCCESSFUL; 87 return PCIBIOS_SUCCESSFUL;
@@ -275,9 +275,6 @@ void __init find_and_init_phbs(void)
275 of_node_put(root); 275 of_node_put(root);
276 pci_devs_phb_init(); 276 pci_devs_phb_init();
277 277
278 /* Create EEH devices for all PHBs */
279 eeh_dev_phb_init();
280
281 /* 278 /*
282 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties 279 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
283 * in chosen. 280 * in chosen.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 389bd4f0cdb1..efb6a41b3131 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -208,6 +208,8 @@ void __init early_setup(unsigned long dt_ptr)
208 208
209 /* Fix up paca fields required for the boot cpu */ 209 /* Fix up paca fields required for the boot cpu */
210 get_paca()->cpu_start = 1; 210 get_paca()->cpu_start = 1;
211 /* Allow percpu accesses to "work" until we setup percpu data */
212 get_paca()->data_offset = 0;
211 213
212 /* Probe the machine type */ 214 /* Probe the machine type */
213 probe_machine(); 215 probe_machine();
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 5c023c9cf16e..a2dc75793bd5 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/tracehook.h> 12#include <linux/tracehook.h>
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <linux/uprobes.h>
14#include <linux/key.h> 15#include <linux/key.h>
15#include <asm/hw_breakpoint.h> 16#include <asm/hw_breakpoint.h>
16#include <asm/uaccess.h> 17#include <asm/uaccess.h>
@@ -130,7 +131,7 @@ static int do_signal(struct pt_regs *regs)
130 * triggered inside the kernel. 131 * triggered inside the kernel.
131 */ 132 */
132 if (current->thread.dabr) 133 if (current->thread.dabr)
133 set_dabr(current->thread.dabr); 134 set_dabr(current->thread.dabr, current->thread.dabrx);
134#endif 135#endif
135 /* Re-enable the breakpoints for the signal stack */ 136 /* Re-enable the breakpoints for the signal stack */
136 thread_change_pc(current, regs); 137 thread_change_pc(current, regs);
@@ -157,6 +158,11 @@ static int do_signal(struct pt_regs *regs)
157 158
158void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) 159void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
159{ 160{
161 if (thread_info_flags & _TIF_UPROBE) {
162 clear_thread_flag(TIF_UPROBE);
163 uprobe_notify_resume(regs);
164 }
165
160 if (thread_info_flags & _TIF_SIGPENDING) 166 if (thread_info_flags & _TIF_SIGPENDING)
161 do_signal(regs); 167 do_signal(regs);
162 168
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8d4214afc21d..2b952b5386fd 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -102,7 +102,7 @@ int __devinit smp_generic_kick_cpu(int nr)
102 * Ok it's not there, so it might be soft-unplugged, let's 102 * Ok it's not there, so it might be soft-unplugged, let's
103 * try to bring it back 103 * try to bring it back
104 */ 104 */
105 per_cpu(cpu_state, nr) = CPU_UP_PREPARE; 105 generic_set_cpu_up(nr);
106 smp_wmb(); 106 smp_wmb();
107 smp_send_reschedule(nr); 107 smp_send_reschedule(nr);
108#endif /* CONFIG_HOTPLUG_CPU */ 108#endif /* CONFIG_HOTPLUG_CPU */
@@ -171,7 +171,7 @@ int smp_request_message_ipi(int virq, int msg)
171 } 171 }
172#endif 172#endif
173 err = request_irq(virq, smp_ipi_action[msg], 173 err = request_irq(virq, smp_ipi_action[msg],
174 IRQF_PERCPU | IRQF_NO_THREAD, 174 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
175 smp_ipi_name[msg], 0); 175 smp_ipi_name[msg], 0);
176 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 176 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
177 virq, smp_ipi_name[msg], err); 177 virq, smp_ipi_name[msg], err);
@@ -413,6 +413,16 @@ void generic_set_cpu_dead(unsigned int cpu)
413 per_cpu(cpu_state, cpu) = CPU_DEAD; 413 per_cpu(cpu_state, cpu) = CPU_DEAD;
414} 414}
415 415
416/*
417 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
418 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
419 * which makes the delay in generic_cpu_die() not happen.
420 */
421void generic_set_cpu_up(unsigned int cpu)
422{
423 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
424}
425
416int generic_check_cpu_restart(unsigned int cpu) 426int generic_check_cpu_restart(unsigned int cpu)
417{ 427{
418 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; 428 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index eaa9d0e6abca..c9986fd400d8 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -508,8 +508,6 @@ void timer_interrupt(struct pt_regs * regs)
508 */ 508 */
509 may_hard_irq_enable(); 509 may_hard_irq_enable();
510 510
511 trace_timer_interrupt_entry(regs);
512
513 __get_cpu_var(irq_stat).timer_irqs++; 511 __get_cpu_var(irq_stat).timer_irqs++;
514 512
515#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 513#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC)
@@ -520,6 +518,8 @@ void timer_interrupt(struct pt_regs * regs)
520 old_regs = set_irq_regs(regs); 518 old_regs = set_irq_regs(regs);
521 irq_enter(); 519 irq_enter();
522 520
521 trace_timer_interrupt_entry(regs);
522
523 if (test_irq_work_pending()) { 523 if (test_irq_work_pending()) {
524 clear_irq_work_pending(); 524 clear_irq_work_pending();
525 irq_work_run(); 525 irq_work_run();
@@ -544,10 +544,10 @@ void timer_interrupt(struct pt_regs * regs)
544 } 544 }
545#endif 545#endif
546 546
547 trace_timer_interrupt_exit(regs);
548
547 irq_exit(); 549 irq_exit();
548 set_irq_regs(old_regs); 550 set_irq_regs(old_regs);
549
550 trace_timer_interrupt_exit(regs);
551} 551}
552 552
553/* 553/*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index ae0843fa7a61..32518401af68 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -251,6 +251,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
251 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) 251 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
252 local_irq_enable(); 252 local_irq_enable();
253 253
254 current->thread.trap_nr = code;
254 memset(&info, 0, sizeof(info)); 255 memset(&info, 0, sizeof(info));
255 info.si_signo = signr; 256 info.si_signo = signr;
256 info.si_code = code; 257 info.si_code = code;
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
new file mode 100644
index 000000000000..d2d46d1014f8
--- /dev/null
+++ b/arch/powerpc/kernel/uprobes.c
@@ -0,0 +1,184 @@
1/*
2 * User-space Probes (UProbes) for powerpc
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2007-2012
19 *
20 * Adapted from the x86 port by Ananth N Mavinakayanahalli <ananth@in.ibm.com>
21 */
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/ptrace.h>
25#include <linux/uprobes.h>
26#include <linux/uaccess.h>
27#include <linux/kdebug.h>
28
29#include <asm/sstep.h>
30
31#define UPROBE_TRAP_NR UINT_MAX
32
33/**
34 * arch_uprobe_analyze_insn
35 * @mm: the probed address space.
36 * @arch_uprobe: the probepoint information.
37 * @addr: vaddr to probe.
38 * Return 0 on success or a -ve number on error.
39 */
40int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
41 struct mm_struct *mm, unsigned long addr)
42{
43 if (addr & 0x03)
44 return -EINVAL;
45
46 /*
47 * We currently don't support a uprobe on an already
48 * existing breakpoint instruction underneath
49 */
50 if (is_trap(auprobe->ainsn))
51 return -ENOTSUPP;
52 return 0;
53}
54
55/*
56 * arch_uprobe_pre_xol - prepare to execute out of line.
57 * @auprobe: the probepoint information.
58 * @regs: reflects the saved user state of current task.
59 */
60int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
61{
62 struct arch_uprobe_task *autask = &current->utask->autask;
63
64 autask->saved_trap_nr = current->thread.trap_nr;
65 current->thread.trap_nr = UPROBE_TRAP_NR;
66 regs->nip = current->utask->xol_vaddr;
67 return 0;
68}
69
70/**
71 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
72 * @regs: Reflects the saved state of the task after it has hit a breakpoint
73 * instruction.
74 * Return the address of the breakpoint instruction.
75 */
76unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
77{
78 return instruction_pointer(regs);
79}
80
81/*
82 * If xol insn itself traps and generates a signal (SIGILL/SIGSEGV/etc),
83 * then detect the case where a singlestepped instruction jumps back to its
84 * own address. It is assumed that anything like do_page_fault/do_trap/etc
85 * sets thread.trap_nr != UINT_MAX.
86 *
87 * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
88 * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
89 * UPROBE_TRAP_NR == UINT_MAX set by arch_uprobe_pre_xol().
90 */
91bool arch_uprobe_xol_was_trapped(struct task_struct *t)
92{
93 if (t->thread.trap_nr != UPROBE_TRAP_NR)
94 return true;
95
96 return false;
97}
98
99/*
100 * Called after single-stepping. To avoid the SMP problems that can
101 * occur when we temporarily put back the original opcode to
102 * single-step, we single-stepped a copy of the instruction.
103 *
104 * This function prepares to resume execution after the single-step.
105 */
106int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
107{
108 struct uprobe_task *utask = current->utask;
109
110 WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
111
112 current->thread.trap_nr = utask->autask.saved_trap_nr;
113
114 /*
115 * On powerpc, except for loads and stores, most instructions
116 * including ones that alter code flow (branches, calls, returns)
117 * are emulated in the kernel. We get here only if the emulation
118 * support doesn't exist and have to fix-up the next instruction
119 * to be executed.
120 */
121 regs->nip = utask->vaddr + MAX_UINSN_BYTES;
122 return 0;
123}
124
125/* callback routine for handling exceptions. */
126int arch_uprobe_exception_notify(struct notifier_block *self,
127 unsigned long val, void *data)
128{
129 struct die_args *args = data;
130 struct pt_regs *regs = args->regs;
131
132 /* regs == NULL is a kernel bug */
133 if (WARN_ON(!regs))
134 return NOTIFY_DONE;
135
136 /* We are only interested in userspace traps */
137 if (!user_mode(regs))
138 return NOTIFY_DONE;
139
140 switch (val) {
141 case DIE_BPT:
142 if (uprobe_pre_sstep_notifier(regs))
143 return NOTIFY_STOP;
144 break;
145 case DIE_SSTEP:
146 if (uprobe_post_sstep_notifier(regs))
147 return NOTIFY_STOP;
148 default:
149 break;
150 }
151 return NOTIFY_DONE;
152}
153
154/*
155 * This function gets called when XOL instruction either gets trapped or
156 * the thread has a fatal signal, so reset the instruction pointer to its
157 * probed address.
158 */
159void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
160{
161 struct uprobe_task *utask = current->utask;
162
163 current->thread.trap_nr = utask->autask.saved_trap_nr;
164 instruction_pointer_set(regs, utask->vaddr);
165}
166
167/*
168 * See if the instruction can be emulated.
169 * Returns true if instruction was emulated, false otherwise.
170 */
171bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
172{
173 int ret;
174
175 /*
176 * emulate_step() returns 1 if the insn was successfully emulated.
177 * For all other cases, we need to single-step in hardware.
178 */
179 ret = emulate_step(regs, auprobe->ainsn);
180 if (ret > 0)
181 return true;
182
183 return false;
184}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b67db22e102d..1b2076f049ce 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -723,9 +723,7 @@ int __cpuinit vdso_getcpu_init(void)
723 723
724 val = (cpu & 0xfff) | ((node & 0xffff) << 16); 724 val = (cpu & 0xfff) | ((node & 0xffff) << 16);
725 mtspr(SPRN_SPRG3, val); 725 mtspr(SPRN_SPRG3, val);
726#ifdef CONFIG_KVM_BOOK3S_HANDLER 726 get_paca()->sprg3 = val;
727 get_paca()->kvm_hstate.sprg3 = val;
728#endif
729 727
730 put_cpu(); 728 put_cpu();
731 729
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 02b32216bbc3..201ba59738be 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -33,7 +33,6 @@
33#include <asm/prom.h> 33#include <asm/prom.h>
34#include <asm/firmware.h> 34#include <asm/firmware.h>
35#include <asm/tce.h> 35#include <asm/tce.h>
36#include <asm/abs_addr.h>
37#include <asm/page.h> 36#include <asm/page.h>
38#include <asm/hvcall.h> 37#include <asm/hvcall.h>
39 38