diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-06-17 07:06:17 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-17 07:06:17 -0400 |
commit | a3d06cc6aa3e765dc2bf98626f87272dcf641dca (patch) | |
tree | aa3e49b58f08d6c0ea55cdca4fb5e6c8ba6ae333 /arch/powerpc/kernel | |
parent | 0990b1c65729012a63e0eeca93aaaafea4e9a064 (diff) | |
parent | 65795efbd380a832ae508b04dba8f8e53f0b84d9 (diff) |
Merge branch 'linus' into perfcounters/core
Conflicts:
arch/x86/include/asm/kmap_types.h
include/linux/mm.h
include/asm-generic/kmap_types.h
Merge reason: We crossed changes with kmap_types.h cleanups in mainline.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel')
30 files changed, 1745 insertions, 1469 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index a2c683403c2b..612b0c4dc26d 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -36,7 +36,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ | |||
36 | firmware.o nvram_64.o | 36 | firmware.o nvram_64.o |
37 | obj64-$(CONFIG_RELOCATABLE) += reloc_64.o | 37 | obj64-$(CONFIG_RELOCATABLE) += reloc_64.o |
38 | obj-$(CONFIG_PPC64) += vdso64/ | 38 | obj-$(CONFIG_PPC64) += vdso64/ |
39 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o | 39 | obj-$(CONFIG_ALTIVEC) += vecemu.o |
40 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o | 40 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o |
41 | obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o | 41 | obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o |
42 | obj-$(CONFIG_PPC_CLOCK) += clock.o | 42 | obj-$(CONFIG_PPC_CLOCK) += clock.o |
@@ -82,6 +82,7 @@ obj-$(CONFIG_SMP) += smp.o | |||
82 | obj-$(CONFIG_KPROBES) += kprobes.o | 82 | obj-$(CONFIG_KPROBES) += kprobes.o |
83 | obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o | 83 | obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o |
84 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 84 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
85 | obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o | ||
85 | 86 | ||
86 | pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o | 87 | pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o |
87 | obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \ | 88 | obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \ |
@@ -111,6 +112,7 @@ obj-y += ppc_save_regs.o | |||
111 | endif | 112 | endif |
112 | 113 | ||
113 | extra-$(CONFIG_PPC_FPU) += fpu.o | 114 | extra-$(CONFIG_PPC_FPU) += fpu.o |
115 | extra-$(CONFIG_ALTIVEC) += vector.o | ||
114 | extra-$(CONFIG_PPC64) += entry_64.o | 116 | extra-$(CONFIG_PPC64) += entry_64.o |
115 | 117 | ||
116 | extra-y += systbl_chk.i | 118 | extra-y += systbl_chk.i |
@@ -123,6 +125,7 @@ PHONY += systbl_chk | |||
123 | systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i | 125 | systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i |
124 | $(call cmd,systbl_chk) | 126 | $(call cmd,systbl_chk) |
125 | 127 | ||
128 | ifeq ($(CONFIG_PPC_OF_BOOT_TRAMPOLINE),y) | ||
126 | $(obj)/built-in.o: prom_init_check | 129 | $(obj)/built-in.o: prom_init_check |
127 | 130 | ||
128 | quiet_cmd_prom_init_check = CALL $< | 131 | quiet_cmd_prom_init_check = CALL $< |
@@ -131,5 +134,6 @@ quiet_cmd_prom_init_check = CALL $< | |||
131 | PHONY += prom_init_check | 134 | PHONY += prom_init_check |
132 | prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o | 135 | prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o |
133 | $(call cmd,prom_init_check) | 136 | $(call cmd,prom_init_check) |
137 | endif | ||
134 | 138 | ||
135 | clean-files := vmlinux.lds | 139 | clean-files := vmlinux.lds |
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 5ffcfaa77d6a..a5b632e52fae 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/system.h> | 24 | #include <asm/system.h> |
25 | #include <asm/cache.h> | 25 | #include <asm/cache.h> |
26 | #include <asm/cputable.h> | 26 | #include <asm/cputable.h> |
27 | #include <asm/emulated_ops.h> | ||
27 | 28 | ||
28 | struct aligninfo { | 29 | struct aligninfo { |
29 | unsigned char len; | 30 | unsigned char len; |
@@ -730,8 +731,10 @@ int fix_alignment(struct pt_regs *regs) | |||
730 | areg = dsisr & 0x1f; /* register to update */ | 731 | areg = dsisr & 0x1f; /* register to update */ |
731 | 732 | ||
732 | #ifdef CONFIG_SPE | 733 | #ifdef CONFIG_SPE |
733 | if ((instr >> 26) == 0x4) | 734 | if ((instr >> 26) == 0x4) { |
735 | PPC_WARN_EMULATED(spe); | ||
734 | return emulate_spe(regs, reg, instr); | 736 | return emulate_spe(regs, reg, instr); |
737 | } | ||
735 | #endif | 738 | #endif |
736 | 739 | ||
737 | instr = (dsisr >> 10) & 0x7f; | 740 | instr = (dsisr >> 10) & 0x7f; |
@@ -783,23 +786,28 @@ int fix_alignment(struct pt_regs *regs) | |||
783 | flags |= SPLT; | 786 | flags |= SPLT; |
784 | nb = 8; | 787 | nb = 8; |
785 | } | 788 | } |
789 | PPC_WARN_EMULATED(vsx); | ||
786 | return emulate_vsx(addr, reg, areg, regs, flags, nb); | 790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); |
787 | } | 791 | } |
788 | #endif | 792 | #endif |
789 | /* A size of 0 indicates an instruction we don't support, with | 793 | /* A size of 0 indicates an instruction we don't support, with |
790 | * the exception of DCBZ which is handled as a special case here | 794 | * the exception of DCBZ which is handled as a special case here |
791 | */ | 795 | */ |
792 | if (instr == DCBZ) | 796 | if (instr == DCBZ) { |
797 | PPC_WARN_EMULATED(dcbz); | ||
793 | return emulate_dcbz(regs, addr); | 798 | return emulate_dcbz(regs, addr); |
799 | } | ||
794 | if (unlikely(nb == 0)) | 800 | if (unlikely(nb == 0)) |
795 | return 0; | 801 | return 0; |
796 | 802 | ||
797 | /* Load/Store Multiple instructions are handled in their own | 803 | /* Load/Store Multiple instructions are handled in their own |
798 | * function | 804 | * function |
799 | */ | 805 | */ |
800 | if (flags & M) | 806 | if (flags & M) { |
807 | PPC_WARN_EMULATED(multiple); | ||
801 | return emulate_multiple(regs, addr, reg, nb, | 808 | return emulate_multiple(regs, addr, reg, nb, |
802 | flags, instr, swiz); | 809 | flags, instr, swiz); |
810 | } | ||
803 | 811 | ||
804 | /* Verify the address of the operand */ | 812 | /* Verify the address of the operand */ |
805 | if (unlikely(user_mode(regs) && | 813 | if (unlikely(user_mode(regs) && |
@@ -816,8 +824,12 @@ int fix_alignment(struct pt_regs *regs) | |||
816 | } | 824 | } |
817 | 825 | ||
818 | /* Special case for 16-byte FP loads and stores */ | 826 | /* Special case for 16-byte FP loads and stores */ |
819 | if (nb == 16) | 827 | if (nb == 16) { |
828 | PPC_WARN_EMULATED(fp_pair); | ||
820 | return emulate_fp_pair(addr, reg, flags); | 829 | return emulate_fp_pair(addr, reg, flags); |
830 | } | ||
831 | |||
832 | PPC_WARN_EMULATED(unaligned); | ||
821 | 833 | ||
822 | /* If we are loading, get the data from user space, else | 834 | /* If we are loading, get the data from user space, else |
823 | * get it from register values | 835 | * get it from register values |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index e981d1ce1914..561b64652311 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -122,8 +122,6 @@ int main(void) | |||
122 | DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); | 122 | DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); |
123 | DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); | 123 | DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); |
124 | DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); | 124 | DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); |
125 | DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); | ||
126 | DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); | ||
127 | DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); | 125 | DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); |
128 | DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); | 126 | DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); |
129 | DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); | 127 | DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); |
@@ -132,35 +130,30 @@ int main(void) | |||
132 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); | 130 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); |
133 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); | 131 | DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); |
134 | DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); | 132 | DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending)); |
135 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); | ||
136 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); | ||
137 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); | 133 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); |
138 | DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); | ||
139 | #ifdef CONFIG_PPC_MM_SLICES | 134 | #ifdef CONFIG_PPC_MM_SLICES |
140 | DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, | 135 | DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, |
141 | context.low_slices_psize)); | 136 | context.low_slices_psize)); |
142 | DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, | 137 | DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, |
143 | context.high_slices_psize)); | 138 | context.high_slices_psize)); |
144 | DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); | 139 | DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); |
140 | #endif /* CONFIG_PPC_MM_SLICES */ | ||
141 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
142 | DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real)); | ||
143 | DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr)); | ||
144 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); | ||
145 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); | ||
146 | DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); | ||
147 | #ifdef CONFIG_PPC_MM_SLICES | ||
145 | DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); | 148 | DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); |
146 | #else | 149 | #else |
147 | DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp)); | 150 | DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp)); |
148 | |||
149 | #endif /* CONFIG_PPC_MM_SLICES */ | 151 | #endif /* CONFIG_PPC_MM_SLICES */ |
150 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); | 152 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); |
151 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); | 153 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); |
152 | DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); | 154 | DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); |
153 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | ||
154 | DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); | 155 | DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); |
155 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | ||
156 | DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); | ||
157 | DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); | ||
158 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); | ||
159 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); | ||
160 | DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); | 156 | DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); |
161 | DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); | ||
162 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); | ||
163 | |||
164 | DEFINE(SLBSHADOW_STACKVSID, | 157 | DEFINE(SLBSHADOW_STACKVSID, |
165 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); | 158 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); |
166 | DEFINE(SLBSHADOW_STACKESID, | 159 | DEFINE(SLBSHADOW_STACKESID, |
@@ -170,6 +163,15 @@ int main(void) | |||
170 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | 163 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); |
171 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | 164 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); |
172 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); | 165 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); |
166 | #endif /* CONFIG_PPC_STD_MMU_64 */ | ||
167 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | ||
168 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | ||
169 | DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); | ||
170 | DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); | ||
171 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); | ||
172 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); | ||
173 | DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset)); | ||
174 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); | ||
173 | #endif /* CONFIG_PPC64 */ | 175 | #endif /* CONFIG_PPC64 */ |
174 | 176 | ||
175 | /* RTAS */ | 177 | /* RTAS */ |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 3e33fb933d99..4a24a2fc4574 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -427,7 +427,8 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
427 | .cpu_name = "POWER7 (architected)", | 427 | .cpu_name = "POWER7 (architected)", |
428 | .cpu_features = CPU_FTRS_POWER7, | 428 | .cpu_features = CPU_FTRS_POWER7, |
429 | .cpu_user_features = COMMON_USER_POWER7, | 429 | .cpu_user_features = COMMON_USER_POWER7, |
430 | .mmu_features = MMU_FTR_HPTE_TABLE, | 430 | .mmu_features = MMU_FTR_HPTE_TABLE | |
431 | MMU_FTR_TLBIE_206, | ||
431 | .icache_bsize = 128, | 432 | .icache_bsize = 128, |
432 | .dcache_bsize = 128, | 433 | .dcache_bsize = 128, |
433 | .machine_check = machine_check_generic, | 434 | .machine_check = machine_check_generic, |
@@ -441,7 +442,8 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
441 | .cpu_name = "POWER7 (raw)", | 442 | .cpu_name = "POWER7 (raw)", |
442 | .cpu_features = CPU_FTRS_POWER7, | 443 | .cpu_features = CPU_FTRS_POWER7, |
443 | .cpu_user_features = COMMON_USER_POWER7, | 444 | .cpu_user_features = COMMON_USER_POWER7, |
444 | .mmu_features = MMU_FTR_HPTE_TABLE, | 445 | .mmu_features = MMU_FTR_HPTE_TABLE | |
446 | MMU_FTR_TLBIE_206, | ||
445 | .icache_bsize = 128, | 447 | .icache_bsize = 128, |
446 | .dcache_bsize = 128, | 448 | .dcache_bsize = 128, |
447 | .num_pmcs = 6, | 449 | .num_pmcs = 6, |
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c new file mode 100644 index 000000000000..68ccf11e4f19 --- /dev/null +++ b/arch/powerpc/kernel/dma-swiotlb.c | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * Contains routines needed to support swiotlb for ppc. | ||
3 | * | ||
4 | * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/dma-mapping.h> | ||
14 | #include <linux/pfn.h> | ||
15 | #include <linux/of_platform.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/pci.h> | ||
18 | |||
19 | #include <asm/machdep.h> | ||
20 | #include <asm/swiotlb.h> | ||
21 | #include <asm/dma.h> | ||
22 | #include <asm/abs_addr.h> | ||
23 | |||
24 | int swiotlb __read_mostly; | ||
25 | unsigned int ppc_swiotlb_enable; | ||
26 | |||
27 | void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr) | ||
28 | { | ||
29 | unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr)); | ||
30 | void *pageaddr = page_address(pfn_to_page(pfn)); | ||
31 | |||
32 | if (pageaddr != NULL) | ||
33 | return pageaddr + (addr % PAGE_SIZE); | ||
34 | return NULL; | ||
35 | } | ||
36 | |||
37 | dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr) | ||
38 | { | ||
39 | return paddr + get_dma_direct_offset(hwdev); | ||
40 | } | ||
41 | |||
42 | phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr) | ||
43 | |||
44 | { | ||
45 | return baddr - get_dma_direct_offset(hwdev); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Determine if an address needs bounce buffering via swiotlb. | ||
50 | * Going forward I expect the swiotlb code to generalize on using | ||
51 | * a dma_ops->addr_needs_map, and this function will move from here to the | ||
52 | * generic swiotlb code. | ||
53 | */ | ||
54 | int | ||
55 | swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr, | ||
56 | size_t size) | ||
57 | { | ||
58 | struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev); | ||
59 | |||
60 | BUG_ON(!dma_ops); | ||
61 | return dma_ops->addr_needs_map(hwdev, addr, size); | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * Determine if an address is reachable by a pci device, or if we must bounce. | ||
66 | */ | ||
67 | static int | ||
68 | swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | ||
69 | { | ||
70 | u64 mask = dma_get_mask(hwdev); | ||
71 | dma_addr_t max; | ||
72 | struct pci_controller *hose; | ||
73 | struct pci_dev *pdev = to_pci_dev(hwdev); | ||
74 | |||
75 | hose = pci_bus_to_host(pdev->bus); | ||
76 | max = hose->dma_window_base_cur + hose->dma_window_size; | ||
77 | |||
78 | /* check that we're within mapped pci window space */ | ||
79 | if ((addr + size > max) | (addr < hose->dma_window_base_cur)) | ||
80 | return 1; | ||
81 | |||
82 | return !is_buffer_dma_capable(mask, addr, size); | ||
83 | } | ||
84 | |||
85 | static int | ||
86 | swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size) | ||
87 | { | ||
88 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); | ||
89 | } | ||
90 | |||
91 | |||
92 | /* | ||
93 | * At the moment, all platforms that use this code only require | ||
94 | * swiotlb to be used if we're operating on HIGHMEM. Since | ||
95 | * we don't ever call anything other than map_sg, unmap_sg, | ||
96 | * map_page, and unmap_page on highmem, use normal dma_ops | ||
97 | * for everything else. | ||
98 | */ | ||
99 | struct dma_mapping_ops swiotlb_dma_ops = { | ||
100 | .alloc_coherent = dma_direct_alloc_coherent, | ||
101 | .free_coherent = dma_direct_free_coherent, | ||
102 | .map_sg = swiotlb_map_sg_attrs, | ||
103 | .unmap_sg = swiotlb_unmap_sg_attrs, | ||
104 | .dma_supported = swiotlb_dma_supported, | ||
105 | .map_page = swiotlb_map_page, | ||
106 | .unmap_page = swiotlb_unmap_page, | ||
107 | .addr_needs_map = swiotlb_addr_needs_map, | ||
108 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | ||
109 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | ||
110 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||
111 | .sync_sg_for_device = swiotlb_sync_sg_for_device | ||
112 | }; | ||
113 | |||
114 | struct dma_mapping_ops swiotlb_pci_dma_ops = { | ||
115 | .alloc_coherent = dma_direct_alloc_coherent, | ||
116 | .free_coherent = dma_direct_free_coherent, | ||
117 | .map_sg = swiotlb_map_sg_attrs, | ||
118 | .unmap_sg = swiotlb_unmap_sg_attrs, | ||
119 | .dma_supported = swiotlb_dma_supported, | ||
120 | .map_page = swiotlb_map_page, | ||
121 | .unmap_page = swiotlb_unmap_page, | ||
122 | .addr_needs_map = swiotlb_pci_addr_needs_map, | ||
123 | .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu, | ||
124 | .sync_single_range_for_device = swiotlb_sync_single_range_for_device, | ||
125 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, | ||
126 | .sync_sg_for_device = swiotlb_sync_sg_for_device | ||
127 | }; | ||
128 | |||
129 | static int ppc_swiotlb_bus_notify(struct notifier_block *nb, | ||
130 | unsigned long action, void *data) | ||
131 | { | ||
132 | struct device *dev = data; | ||
133 | |||
134 | /* We are only intereted in device addition */ | ||
135 | if (action != BUS_NOTIFY_ADD_DEVICE) | ||
136 | return 0; | ||
137 | |||
138 | /* May need to bounce if the device can't address all of DRAM */ | ||
139 | if (dma_get_mask(dev) < lmb_end_of_DRAM()) | ||
140 | set_dma_ops(dev, &swiotlb_dma_ops); | ||
141 | |||
142 | return NOTIFY_DONE; | ||
143 | } | ||
144 | |||
145 | static struct notifier_block ppc_swiotlb_plat_bus_notifier = { | ||
146 | .notifier_call = ppc_swiotlb_bus_notify, | ||
147 | .priority = 0, | ||
148 | }; | ||
149 | |||
150 | static struct notifier_block ppc_swiotlb_of_bus_notifier = { | ||
151 | .notifier_call = ppc_swiotlb_bus_notify, | ||
152 | .priority = 0, | ||
153 | }; | ||
154 | |||
155 | int __init swiotlb_setup_bus_notifier(void) | ||
156 | { | ||
157 | bus_register_notifier(&platform_bus_type, | ||
158 | &ppc_swiotlb_plat_bus_notifier); | ||
159 | bus_register_notifier(&of_platform_bus_type, | ||
160 | &ppc_swiotlb_of_bus_notifier); | ||
161 | |||
162 | return 0; | ||
163 | } | ||
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 6b02793dc75b..20a60d661ba8 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * default the offset is PCI_DRAM_OFFSET. | 19 | * default the offset is PCI_DRAM_OFFSET. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | static unsigned long get_dma_direct_offset(struct device *dev) | 22 | unsigned long get_dma_direct_offset(struct device *dev) |
23 | { | 23 | { |
24 | if (dev) | 24 | if (dev) |
25 | return (unsigned long)dev->archdata.dma_data; | 25 | return (unsigned long)dev->archdata.dma_data; |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S new file mode 100644 index 000000000000..eb898112e577 --- /dev/null +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -0,0 +1,978 @@ | |||
1 | /* | ||
2 | * This file contains the 64-bit "server" PowerPC variant | ||
3 | * of the low level exception handling including exception | ||
4 | * vectors, exception return, part of the slb and stab | ||
5 | * handling and other fixed offset specific things. | ||
6 | * | ||
7 | * This file is meant to be #included from head_64.S due to | ||
8 | * position dependant assembly. | ||
9 | * | ||
10 | * Most of this originates from head_64.S and thus has the same | ||
11 | * copyright history. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * We layout physical memory as follows: | ||
17 | * 0x0000 - 0x00ff : Secondary processor spin code | ||
18 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | ||
19 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | ||
20 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | ||
21 | * 0x7000 - 0x7fff : FWNMI data area | ||
22 | * 0x8000 - : Early init and support code | ||
23 | */ | ||
24 | |||
25 | |||
26 | /* | ||
27 | * SPRG Usage | ||
28 | * | ||
29 | * Register Definition | ||
30 | * | ||
31 | * SPRG0 reserved for hypervisor | ||
32 | * SPRG1 temp - used to save gpr | ||
33 | * SPRG2 temp - used to save gpr | ||
34 | * SPRG3 virt addr of paca | ||
35 | */ | ||
36 | |||
37 | /* | ||
38 | * This is the start of the interrupt handlers for pSeries | ||
39 | * This code runs with relocation off. | ||
40 | * Code from here to __end_interrupts gets copied down to real | ||
41 | * address 0x100 when we are running a relocatable kernel. | ||
42 | * Therefore any relative branches in this section must only | ||
43 | * branch to labels in this section. | ||
44 | */ | ||
45 | . = 0x100 | ||
46 | .globl __start_interrupts | ||
47 | __start_interrupts: | ||
48 | |||
49 | STD_EXCEPTION_PSERIES(0x100, system_reset) | ||
50 | |||
51 | . = 0x200 | ||
52 | _machine_check_pSeries: | ||
53 | HMT_MEDIUM | ||
54 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
55 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
56 | |||
57 | . = 0x300 | ||
58 | .globl data_access_pSeries | ||
59 | data_access_pSeries: | ||
60 | HMT_MEDIUM | ||
61 | mtspr SPRN_SPRG1,r13 | ||
62 | BEGIN_FTR_SECTION | ||
63 | mtspr SPRN_SPRG2,r12 | ||
64 | mfspr r13,SPRN_DAR | ||
65 | mfspr r12,SPRN_DSISR | ||
66 | srdi r13,r13,60 | ||
67 | rlwimi r13,r12,16,0x20 | ||
68 | mfcr r12 | ||
69 | cmpwi r13,0x2c | ||
70 | beq do_stab_bolted_pSeries | ||
71 | mtcrf 0x80,r12 | ||
72 | mfspr r12,SPRN_SPRG2 | ||
73 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
74 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | ||
75 | |||
76 | . = 0x380 | ||
77 | .globl data_access_slb_pSeries | ||
78 | data_access_slb_pSeries: | ||
79 | HMT_MEDIUM | ||
80 | mtspr SPRN_SPRG1,r13 | ||
81 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | ||
82 | std r3,PACA_EXSLB+EX_R3(r13) | ||
83 | mfspr r3,SPRN_DAR | ||
84 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
85 | mfcr r9 | ||
86 | #ifdef __DISABLED__ | ||
87 | /* Keep that around for when we re-implement dynamic VSIDs */ | ||
88 | cmpdi r3,0 | ||
89 | bge slb_miss_user_pseries | ||
90 | #endif /* __DISABLED__ */ | ||
91 | std r10,PACA_EXSLB+EX_R10(r13) | ||
92 | std r11,PACA_EXSLB+EX_R11(r13) | ||
93 | std r12,PACA_EXSLB+EX_R12(r13) | ||
94 | mfspr r10,SPRN_SPRG1 | ||
95 | std r10,PACA_EXSLB+EX_R13(r13) | ||
96 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | ||
97 | #ifndef CONFIG_RELOCATABLE | ||
98 | b .slb_miss_realmode | ||
99 | #else | ||
100 | /* | ||
101 | * We can't just use a direct branch to .slb_miss_realmode | ||
102 | * because the distance from here to there depends on where | ||
103 | * the kernel ends up being put. | ||
104 | */ | ||
105 | mfctr r11 | ||
106 | ld r10,PACAKBASE(r13) | ||
107 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
108 | mtctr r10 | ||
109 | bctr | ||
110 | #endif | ||
111 | |||
112 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | ||
113 | |||
114 | . = 0x480 | ||
115 | .globl instruction_access_slb_pSeries | ||
116 | instruction_access_slb_pSeries: | ||
117 | HMT_MEDIUM | ||
118 | mtspr SPRN_SPRG1,r13 | ||
119 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | ||
120 | std r3,PACA_EXSLB+EX_R3(r13) | ||
121 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | ||
122 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
123 | mfcr r9 | ||
124 | #ifdef __DISABLED__ | ||
125 | /* Keep that around for when we re-implement dynamic VSIDs */ | ||
126 | cmpdi r3,0 | ||
127 | bge slb_miss_user_pseries | ||
128 | #endif /* __DISABLED__ */ | ||
129 | std r10,PACA_EXSLB+EX_R10(r13) | ||
130 | std r11,PACA_EXSLB+EX_R11(r13) | ||
131 | std r12,PACA_EXSLB+EX_R12(r13) | ||
132 | mfspr r10,SPRN_SPRG1 | ||
133 | std r10,PACA_EXSLB+EX_R13(r13) | ||
134 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | ||
135 | #ifndef CONFIG_RELOCATABLE | ||
136 | b .slb_miss_realmode | ||
137 | #else | ||
138 | mfctr r11 | ||
139 | ld r10,PACAKBASE(r13) | ||
140 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
141 | mtctr r10 | ||
142 | bctr | ||
143 | #endif | ||
144 | |||
145 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) | ||
146 | STD_EXCEPTION_PSERIES(0x600, alignment) | ||
147 | STD_EXCEPTION_PSERIES(0x700, program_check) | ||
148 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | ||
149 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) | ||
150 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) | ||
151 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | ||
152 | |||
153 | . = 0xc00 | ||
154 | .globl system_call_pSeries | ||
155 | system_call_pSeries: | ||
156 | HMT_MEDIUM | ||
157 | BEGIN_FTR_SECTION | ||
158 | cmpdi r0,0x1ebe | ||
159 | beq- 1f | ||
160 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | ||
161 | mr r9,r13 | ||
162 | mfspr r13,SPRN_SPRG3 | ||
163 | mfspr r11,SPRN_SRR0 | ||
164 | ld r12,PACAKBASE(r13) | ||
165 | ld r10,PACAKMSR(r13) | ||
166 | LOAD_HANDLER(r12, system_call_entry) | ||
167 | mtspr SPRN_SRR0,r12 | ||
168 | mfspr r12,SPRN_SRR1 | ||
169 | mtspr SPRN_SRR1,r10 | ||
170 | rfid | ||
171 | b . /* prevent speculative execution */ | ||
172 | |||
173 | /* Fast LE/BE switch system call */ | ||
174 | 1: mfspr r12,SPRN_SRR1 | ||
175 | xori r12,r12,MSR_LE | ||
176 | mtspr SPRN_SRR1,r12 | ||
177 | rfid /* return to userspace */ | ||
178 | b . | ||
179 | |||
180 | STD_EXCEPTION_PSERIES(0xd00, single_step) | ||
181 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | ||
182 | |||
183 | /* We need to deal with the Altivec unavailable exception | ||
184 | * here which is at 0xf20, thus in the middle of the | ||
185 | * prolog code of the PerformanceMonitor one. A little | ||
186 | * trickery is thus necessary | ||
187 | */ | ||
188 | . = 0xf00 | ||
189 | b performance_monitor_pSeries | ||
190 | |||
191 | . = 0xf20 | ||
192 | b altivec_unavailable_pSeries | ||
193 | |||
194 | . = 0xf40 | ||
195 | b vsx_unavailable_pSeries | ||
196 | |||
197 | #ifdef CONFIG_CBE_RAS | ||
198 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | ||
199 | #endif /* CONFIG_CBE_RAS */ | ||
200 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | ||
201 | #ifdef CONFIG_CBE_RAS | ||
202 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) | ||
203 | #endif /* CONFIG_CBE_RAS */ | ||
204 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | ||
205 | #ifdef CONFIG_CBE_RAS | ||
206 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) | ||
207 | #endif /* CONFIG_CBE_RAS */ | ||
208 | |||
209 | . = 0x3000 | ||
210 | |||
211 | /*** pSeries interrupt support ***/ | ||
212 | |||
213 | /* moved from 0xf00 */ | ||
214 | STD_EXCEPTION_PSERIES(., performance_monitor) | ||
215 | STD_EXCEPTION_PSERIES(., altivec_unavailable) | ||
216 | STD_EXCEPTION_PSERIES(., vsx_unavailable) | ||
217 | |||
218 | /* | ||
219 | * An interrupt came in while soft-disabled; clear EE in SRR1, | ||
220 | * clear paca->hard_enabled and return. | ||
221 | */ | ||
222 | masked_interrupt: | ||
223 | stb r10,PACAHARDIRQEN(r13) | ||
224 | mtcrf 0x80,r9 | ||
225 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
226 | mfspr r10,SPRN_SRR1 | ||
227 | rldicl r10,r10,48,1 /* clear MSR_EE */ | ||
228 | rotldi r10,r10,16 | ||
229 | mtspr SPRN_SRR1,r10 | ||
230 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
231 | mfspr r13,SPRN_SPRG1 | ||
232 | rfid | ||
233 | b . | ||
234 | |||
235 | .align 7 | ||
236 | do_stab_bolted_pSeries: | ||
237 | mtcrf 0x80,r12 | ||
238 | mfspr r12,SPRN_SPRG2 | ||
239 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) | ||
240 | |||
241 | #ifdef CONFIG_PPC_PSERIES | ||
242 | /* | ||
243 | * Vectors for the FWNMI option. Share common code. | ||
244 | */ | ||
245 | .globl system_reset_fwnmi | ||
246 | .align 7 | ||
247 | system_reset_fwnmi: | ||
248 | HMT_MEDIUM | ||
249 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
250 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | ||
251 | |||
252 | .globl machine_check_fwnmi | ||
253 | .align 7 | ||
254 | machine_check_fwnmi: | ||
255 | HMT_MEDIUM | ||
256 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
257 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
258 | |||
259 | #endif /* CONFIG_PPC_PSERIES */ | ||
260 | |||
261 | #ifdef __DISABLED__ | ||
262 | /* | ||
263 | * This is used for when the SLB miss handler has to go virtual, | ||
264 | * which doesn't happen for now anymore but will once we re-implement | ||
265 | * dynamic VSIDs for shared page tables | ||
266 | */ | ||
267 | slb_miss_user_pseries: | ||
268 | std r10,PACA_EXGEN+EX_R10(r13) | ||
269 | std r11,PACA_EXGEN+EX_R11(r13) | ||
270 | std r12,PACA_EXGEN+EX_R12(r13) | ||
271 | mfspr r10,SPRG1 | ||
272 | ld r11,PACA_EXSLB+EX_R9(r13) | ||
273 | ld r12,PACA_EXSLB+EX_R3(r13) | ||
274 | std r10,PACA_EXGEN+EX_R13(r13) | ||
275 | std r11,PACA_EXGEN+EX_R9(r13) | ||
276 | std r12,PACA_EXGEN+EX_R3(r13) | ||
277 | clrrdi r12,r13,32 | ||
278 | mfmsr r10 | ||
279 | mfspr r11,SRR0 /* save SRR0 */ | ||
280 | ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ | ||
281 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
282 | mtspr SRR0,r12 | ||
283 | mfspr r12,SRR1 /* and SRR1 */ | ||
284 | mtspr SRR1,r10 | ||
285 | rfid | ||
286 | b . /* prevent spec. execution */ | ||
287 | #endif /* __DISABLED__ */ | ||
288 | |||
289 | .align 7 | ||
290 | .globl __end_interrupts | ||
291 | __end_interrupts: | ||
292 | |||
293 | /* | ||
294 | * Code from here down to __end_handlers is invoked from the | ||
295 | * exception prologs above. Because the prologs assemble the | ||
296 | * addresses of these handlers using the LOAD_HANDLER macro, | ||
297 | * which uses an addi instruction, these handlers must be in | ||
298 | * the first 32k of the kernel image. | ||
299 | */ | ||
300 | |||
301 | /*** Common interrupt handlers ***/ | ||
302 | |||
303 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | ||
304 | |||
305 | /* | ||
306 | * Machine check is different because we use a different | ||
307 | * save area: PACA_EXMC instead of PACA_EXGEN. | ||
308 | */ | ||
309 | .align 7 | ||
310 | .globl machine_check_common | ||
311 | machine_check_common: | ||
312 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | ||
313 | FINISH_NAP | ||
314 | DISABLE_INTS | ||
315 | bl .save_nvgprs | ||
316 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
317 | bl .machine_check_exception | ||
318 | b .ret_from_except | ||
319 | |||
320 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | ||
321 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | ||
322 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | ||
323 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | ||
324 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | ||
325 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) | ||
326 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | ||
327 | #ifdef CONFIG_ALTIVEC | ||
328 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | ||
329 | #else | ||
330 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | ||
331 | #endif | ||
332 | #ifdef CONFIG_CBE_RAS | ||
333 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) | ||
334 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) | ||
335 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | ||
336 | #endif /* CONFIG_CBE_RAS */ | ||
337 | |||
338 | .align 7 | ||
339 | system_call_entry: | ||
340 | b system_call_common | ||
341 | |||
342 | /* | ||
343 | * Here we have detected that the kernel stack pointer is bad. | ||
344 | * R9 contains the saved CR, r13 points to the paca, | ||
345 | * r10 contains the (bad) kernel stack pointer, | ||
346 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
347 | * We switch to using an emergency stack, save the registers there, | ||
348 | * and call kernel_bad_stack(), which panics. | ||
349 | */ | ||
350 | bad_stack: | ||
351 | ld r1,PACAEMERGSP(r13) | ||
352 | subi r1,r1,64+INT_FRAME_SIZE | ||
353 | std r9,_CCR(r1) | ||
354 | std r10,GPR1(r1) | ||
355 | std r11,_NIP(r1) | ||
356 | std r12,_MSR(r1) | ||
357 | mfspr r11,SPRN_DAR | ||
358 | mfspr r12,SPRN_DSISR | ||
359 | std r11,_DAR(r1) | ||
360 | std r12,_DSISR(r1) | ||
361 | mflr r10 | ||
362 | mfctr r11 | ||
363 | mfxer r12 | ||
364 | std r10,_LINK(r1) | ||
365 | std r11,_CTR(r1) | ||
366 | std r12,_XER(r1) | ||
367 | SAVE_GPR(0,r1) | ||
368 | SAVE_GPR(2,r1) | ||
369 | SAVE_4GPRS(3,r1) | ||
370 | SAVE_2GPRS(7,r1) | ||
371 | SAVE_10GPRS(12,r1) | ||
372 | SAVE_10GPRS(22,r1) | ||
373 | lhz r12,PACA_TRAP_SAVE(r13) | ||
374 | std r12,_TRAP(r1) | ||
375 | addi r11,r1,INT_FRAME_SIZE | ||
376 | std r11,0(r1) | ||
377 | li r12,0 | ||
378 | std r12,0(r11) | ||
379 | ld r2,PACATOC(r13) | ||
380 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
381 | bl .kernel_bad_stack | ||
382 | b 1b | ||
383 | |||
384 | /* | ||
385 | * Here r13 points to the paca, r9 contains the saved CR, | ||
386 | * SRR0 and SRR1 are saved in r11 and r12, | ||
387 | * r9 - r13 are saved in paca->exgen. | ||
388 | */ | ||
389 | .align 7 | ||
390 | .globl data_access_common | ||
391 | data_access_common: | ||
392 | mfspr r10,SPRN_DAR | ||
393 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
394 | mfspr r10,SPRN_DSISR | ||
395 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
396 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | ||
397 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
398 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
399 | li r5,0x300 | ||
400 | b .do_hash_page /* Try to handle as hpte fault */ | ||
401 | |||
402 | .align 7 | ||
403 | .globl instruction_access_common | ||
404 | instruction_access_common: | ||
405 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | ||
406 | ld r3,_NIP(r1) | ||
407 | andis. r4,r12,0x5820 | ||
408 | li r5,0x400 | ||
409 | b .do_hash_page /* Try to handle as hpte fault */ | ||
410 | |||
411 | /* | ||
412 | * Here is the common SLB miss user that is used when going to virtual | ||
413 | * mode for SLB misses, that is currently not used | ||
414 | */ | ||
415 | #ifdef __DISABLED__ | ||
416 | .align 7 | ||
417 | .globl slb_miss_user_common | ||
418 | slb_miss_user_common: | ||
419 | mflr r10 | ||
420 | std r3,PACA_EXGEN+EX_DAR(r13) | ||
421 | stw r9,PACA_EXGEN+EX_CCR(r13) | ||
422 | std r10,PACA_EXGEN+EX_LR(r13) | ||
423 | std r11,PACA_EXGEN+EX_SRR0(r13) | ||
424 | bl .slb_allocate_user | ||
425 | |||
426 | ld r10,PACA_EXGEN+EX_LR(r13) | ||
427 | ld r3,PACA_EXGEN+EX_R3(r13) | ||
428 | lwz r9,PACA_EXGEN+EX_CCR(r13) | ||
429 | ld r11,PACA_EXGEN+EX_SRR0(r13) | ||
430 | mtlr r10 | ||
431 | beq- slb_miss_fault | ||
432 | |||
433 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
434 | beq- unrecov_user_slb | ||
435 | mfmsr r10 | ||
436 | |||
437 | .machine push | ||
438 | .machine "power4" | ||
439 | mtcrf 0x80,r9 | ||
440 | .machine pop | ||
441 | |||
442 | clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ | ||
443 | mtmsrd r10,1 | ||
444 | |||
445 | mtspr SRR0,r11 | ||
446 | mtspr SRR1,r12 | ||
447 | |||
448 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
449 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
450 | ld r11,PACA_EXGEN+EX_R11(r13) | ||
451 | ld r12,PACA_EXGEN+EX_R12(r13) | ||
452 | ld r13,PACA_EXGEN+EX_R13(r13) | ||
453 | rfid | ||
454 | b . | ||
455 | |||
456 | slb_miss_fault: | ||
457 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) | ||
458 | ld r4,PACA_EXGEN+EX_DAR(r13) | ||
459 | li r5,0 | ||
460 | std r4,_DAR(r1) | ||
461 | std r5,_DSISR(r1) | ||
462 | b handle_page_fault | ||
463 | |||
464 | unrecov_user_slb: | ||
465 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | ||
466 | DISABLE_INTS | ||
467 | bl .save_nvgprs | ||
468 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
469 | bl .unrecoverable_exception | ||
470 | b 1b | ||
471 | |||
472 | #endif /* __DISABLED__ */ | ||
473 | |||
474 | |||
475 | /* | ||
476 | * r13 points to the PACA, r9 contains the saved CR, | ||
477 | * r12 contain the saved SRR1, SRR0 is still ready for return | ||
478 | * r3 has the faulting address | ||
479 | * r9 - r13 are saved in paca->exslb. | ||
480 | * r3 is saved in paca->slb_r3 | ||
481 | * We assume we aren't going to take any exceptions during this procedure. | ||
482 | */ | ||
483 | _GLOBAL(slb_miss_realmode) | ||
484 | mflr r10 | ||
485 | #ifdef CONFIG_RELOCATABLE | ||
486 | mtctr r11 | ||
487 | #endif | ||
488 | |||
489 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
490 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | ||
491 | |||
492 | bl .slb_allocate_realmode | ||
493 | |||
494 | /* All done -- return from exception. */ | ||
495 | |||
496 | ld r10,PACA_EXSLB+EX_LR(r13) | ||
497 | ld r3,PACA_EXSLB+EX_R3(r13) | ||
498 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
499 | #ifdef CONFIG_PPC_ISERIES | ||
500 | BEGIN_FW_FTR_SECTION | ||
501 | ld r11,PACALPPACAPTR(r13) | ||
502 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ | ||
503 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
504 | #endif /* CONFIG_PPC_ISERIES */ | ||
505 | |||
506 | mtlr r10 | ||
507 | |||
508 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
509 | beq- 2f | ||
510 | |||
511 | .machine push | ||
512 | .machine "power4" | ||
513 | mtcrf 0x80,r9 | ||
514 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
515 | .machine pop | ||
516 | |||
517 | #ifdef CONFIG_PPC_ISERIES | ||
518 | BEGIN_FW_FTR_SECTION | ||
519 | mtspr SPRN_SRR0,r11 | ||
520 | mtspr SPRN_SRR1,r12 | ||
521 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
522 | #endif /* CONFIG_PPC_ISERIES */ | ||
523 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
524 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
525 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
526 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
527 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
528 | rfid | ||
529 | b . /* prevent speculative execution */ | ||
530 | |||
531 | 2: | ||
532 | #ifdef CONFIG_PPC_ISERIES | ||
533 | BEGIN_FW_FTR_SECTION | ||
534 | b unrecov_slb | ||
535 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
536 | #endif /* CONFIG_PPC_ISERIES */ | ||
537 | mfspr r11,SPRN_SRR0 | ||
538 | ld r10,PACAKBASE(r13) | ||
539 | LOAD_HANDLER(r10,unrecov_slb) | ||
540 | mtspr SPRN_SRR0,r10 | ||
541 | ld r10,PACAKMSR(r13) | ||
542 | mtspr SPRN_SRR1,r10 | ||
543 | rfid | ||
544 | b . | ||
545 | |||
546 | unrecov_slb: | ||
547 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | ||
548 | DISABLE_INTS | ||
549 | bl .save_nvgprs | ||
550 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
551 | bl .unrecoverable_exception | ||
552 | b 1b | ||
553 | |||
554 | .align 7 | ||
555 | .globl hardware_interrupt_common | ||
556 | .globl hardware_interrupt_entry | ||
557 | hardware_interrupt_common: | ||
558 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | ||
559 | FINISH_NAP | ||
560 | hardware_interrupt_entry: | ||
561 | DISABLE_INTS | ||
562 | BEGIN_FTR_SECTION | ||
563 | bl .ppc64_runlatch_on | ||
564 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) | ||
565 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
566 | bl .do_IRQ | ||
567 | b .ret_from_except_lite | ||
568 | |||
569 | #ifdef CONFIG_PPC_970_NAP | ||
570 | power4_fixup_nap: | ||
571 | andc r9,r9,r10 | ||
572 | std r9,TI_LOCAL_FLAGS(r11) | ||
573 | ld r10,_LINK(r1) /* make idle task do the */ | ||
574 | std r10,_NIP(r1) /* equivalent of a blr */ | ||
575 | blr | ||
576 | #endif | ||
577 | |||
578 | .align 7 | ||
579 | .globl alignment_common | ||
580 | alignment_common: | ||
581 | mfspr r10,SPRN_DAR | ||
582 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
583 | mfspr r10,SPRN_DSISR | ||
584 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
585 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | ||
586 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
587 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
588 | std r3,_DAR(r1) | ||
589 | std r4,_DSISR(r1) | ||
590 | bl .save_nvgprs | ||
591 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
592 | ENABLE_INTS | ||
593 | bl .alignment_exception | ||
594 | b .ret_from_except | ||
595 | |||
596 | .align 7 | ||
597 | .globl program_check_common | ||
598 | program_check_common: | ||
599 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | ||
600 | bl .save_nvgprs | ||
601 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
602 | ENABLE_INTS | ||
603 | bl .program_check_exception | ||
604 | b .ret_from_except | ||
605 | |||
606 | .align 7 | ||
607 | .globl fp_unavailable_common | ||
608 | fp_unavailable_common: | ||
609 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | ||
610 | bne 1f /* if from user, just load it up */ | ||
611 | bl .save_nvgprs | ||
612 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
613 | ENABLE_INTS | ||
614 | bl .kernel_fp_unavailable_exception | ||
615 | BUG_OPCODE | ||
616 | 1: bl .load_up_fpu | ||
617 | b fast_exception_return | ||
618 | |||
619 | .align 7 | ||
620 | .globl altivec_unavailable_common | ||
621 | altivec_unavailable_common: | ||
622 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | ||
623 | #ifdef CONFIG_ALTIVEC | ||
624 | BEGIN_FTR_SECTION | ||
625 | beq 1f | ||
626 | bl .load_up_altivec | ||
627 | b fast_exception_return | ||
628 | 1: | ||
629 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
630 | #endif | ||
631 | bl .save_nvgprs | ||
632 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
633 | ENABLE_INTS | ||
634 | bl .altivec_unavailable_exception | ||
635 | b .ret_from_except | ||
636 | |||
637 | .align 7 | ||
638 | .globl vsx_unavailable_common | ||
639 | vsx_unavailable_common: | ||
640 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | ||
641 | #ifdef CONFIG_VSX | ||
642 | BEGIN_FTR_SECTION | ||
643 | bne .load_up_vsx | ||
644 | 1: | ||
645 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
646 | #endif | ||
647 | bl .save_nvgprs | ||
648 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
649 | ENABLE_INTS | ||
650 | bl .vsx_unavailable_exception | ||
651 | b .ret_from_except | ||
652 | |||
653 | .align 7 | ||
654 | .globl __end_handlers | ||
655 | __end_handlers: | ||
656 | |||
657 | /* | ||
658 | * Return from an exception with minimal checks. | ||
659 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | ||
660 | * If interrupts have been enabled, or anything has been | ||
661 | * done that might have changed the scheduling status of | ||
662 | * any task or sent any task a signal, you should use | ||
663 | * ret_from_except or ret_from_except_lite instead of this. | ||
664 | */ | ||
665 | fast_exc_return_irq: /* restores irq state too */ | ||
666 | ld r3,SOFTE(r1) | ||
667 | TRACE_AND_RESTORE_IRQ(r3); | ||
668 | ld r12,_MSR(r1) | ||
669 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | ||
670 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | ||
671 | b 1f | ||
672 | |||
673 | .globl fast_exception_return | ||
674 | fast_exception_return: | ||
675 | ld r12,_MSR(r1) | ||
676 | 1: ld r11,_NIP(r1) | ||
677 | andi. r3,r12,MSR_RI /* check if RI is set */ | ||
678 | beq- unrecov_fer | ||
679 | |||
680 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
681 | andi. r3,r12,MSR_PR | ||
682 | beq 2f | ||
683 | ACCOUNT_CPU_USER_EXIT(r3, r4) | ||
684 | 2: | ||
685 | #endif | ||
686 | |||
687 | ld r3,_CCR(r1) | ||
688 | ld r4,_LINK(r1) | ||
689 | ld r5,_CTR(r1) | ||
690 | ld r6,_XER(r1) | ||
691 | mtcr r3 | ||
692 | mtlr r4 | ||
693 | mtctr r5 | ||
694 | mtxer r6 | ||
695 | REST_GPR(0, r1) | ||
696 | REST_8GPRS(2, r1) | ||
697 | |||
698 | mfmsr r10 | ||
699 | rldicl r10,r10,48,1 /* clear EE */ | ||
700 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | ||
701 | mtmsrd r10,1 | ||
702 | |||
703 | mtspr SPRN_SRR1,r12 | ||
704 | mtspr SPRN_SRR0,r11 | ||
705 | REST_4GPRS(10, r1) | ||
706 | ld r1,GPR1(r1) | ||
707 | rfid | ||
708 | b . /* prevent speculative execution */ | ||
709 | |||
710 | unrecov_fer: | ||
711 | bl .save_nvgprs | ||
712 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
713 | bl .unrecoverable_exception | ||
714 | b 1b | ||
715 | |||
716 | |||
717 | /* | ||
718 | * Hash table stuff | ||
719 | */ | ||
720 | .align 7 | ||
721 | _STATIC(do_hash_page) | ||
722 | std r3,_DAR(r1) | ||
723 | std r4,_DSISR(r1) | ||
724 | |||
725 | andis. r0,r4,0xa450 /* weird error? */ | ||
726 | bne- handle_page_fault /* if not, try to insert a HPTE */ | ||
727 | BEGIN_FTR_SECTION | ||
728 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | ||
729 | bne- do_ste_alloc /* If so handle it */ | ||
730 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
731 | |||
732 | /* | ||
733 | * On iSeries, we soft-disable interrupts here, then | ||
734 | * hard-enable interrupts so that the hash_page code can spin on | ||
735 | * the hash_table_lock without problems on a shared processor. | ||
736 | */ | ||
737 | DISABLE_INTS | ||
738 | |||
739 | /* | ||
740 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | ||
741 | * and will clobber volatile registers when irq tracing is enabled | ||
742 | * so we need to reload them. It may be possible to be smarter here | ||
743 | * and move the irq tracing elsewhere but let's keep it simple for | ||
744 | * now | ||
745 | */ | ||
746 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
747 | ld r3,_DAR(r1) | ||
748 | ld r4,_DSISR(r1) | ||
749 | ld r5,_TRAP(r1) | ||
750 | ld r12,_MSR(r1) | ||
751 | clrrdi r5,r5,4 | ||
752 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
753 | /* | ||
754 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | ||
755 | * accessing a userspace segment (even from the kernel). We assume | ||
756 | * kernel addresses always have the high bit set. | ||
757 | */ | ||
758 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | ||
759 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | ||
760 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | ||
761 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | ||
762 | ori r4,r4,1 /* add _PAGE_PRESENT */ | ||
763 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | ||
764 | |||
765 | /* | ||
766 | * r3 contains the faulting address | ||
767 | * r4 contains the required access permissions | ||
768 | * r5 contains the trap number | ||
769 | * | ||
770 | * at return r3 = 0 for success | ||
771 | */ | ||
772 | bl .hash_page /* build HPTE if possible */ | ||
773 | cmpdi r3,0 /* see if hash_page succeeded */ | ||
774 | |||
775 | BEGIN_FW_FTR_SECTION | ||
776 | /* | ||
777 | * If we had interrupts soft-enabled at the point where the | ||
778 | * DSI/ISI occurred, and an interrupt came in during hash_page, | ||
779 | * handle it now. | ||
780 | * We jump to ret_from_except_lite rather than fast_exception_return | ||
781 | * because ret_from_except_lite will check for and handle pending | ||
782 | * interrupts if necessary. | ||
783 | */ | ||
784 | beq 13f | ||
785 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
786 | |||
787 | BEGIN_FW_FTR_SECTION | ||
788 | /* | ||
789 | * Here we have interrupts hard-disabled, so it is sufficient | ||
790 | * to restore paca->{soft,hard}_enable and get out. | ||
791 | */ | ||
792 | beq fast_exc_return_irq /* Return from exception on success */ | ||
793 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | ||
794 | |||
795 | /* For a hash failure, we don't bother re-enabling interrupts */ | ||
796 | ble- 12f | ||
797 | |||
798 | /* | ||
799 | * hash_page couldn't handle it, set soft interrupt enable back | ||
800 | * to what it was before the trap. Note that .raw_local_irq_restore | ||
801 | * handles any interrupts pending at this point. | ||
802 | */ | ||
803 | ld r3,SOFTE(r1) | ||
804 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) | ||
805 | bl .raw_local_irq_restore | ||
806 | b 11f | ||
807 | |||
808 | /* Here we have a page fault that hash_page can't handle. */ | ||
809 | handle_page_fault: | ||
810 | ENABLE_INTS | ||
811 | 11: ld r4,_DAR(r1) | ||
812 | ld r5,_DSISR(r1) | ||
813 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
814 | bl .do_page_fault | ||
815 | cmpdi r3,0 | ||
816 | beq+ 13f | ||
817 | bl .save_nvgprs | ||
818 | mr r5,r3 | ||
819 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
820 | lwz r4,_DAR(r1) | ||
821 | bl .bad_page_fault | ||
822 | b .ret_from_except | ||
823 | |||
824 | 13: b .ret_from_except_lite | ||
825 | |||
826 | /* We have a page fault that hash_page could handle but HV refused | ||
827 | * the PTE insertion | ||
828 | */ | ||
829 | 12: bl .save_nvgprs | ||
830 | mr r5,r3 | ||
831 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
832 | ld r4,_DAR(r1) | ||
833 | bl .low_hash_fault | ||
834 | b .ret_from_except | ||
835 | |||
836 | /* here we have a segment miss */ | ||
837 | do_ste_alloc: | ||
838 | bl .ste_allocate /* try to insert stab entry */ | ||
839 | cmpdi r3,0 | ||
840 | bne- handle_page_fault | ||
841 | b fast_exception_return | ||
842 | |||
843 | /* | ||
844 | * r13 points to the PACA, r9 contains the saved CR, | ||
845 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
846 | * r9 - r13 are saved in paca->exslb. | ||
847 | * We assume we aren't going to take any exceptions during this procedure. | ||
848 | * We assume (DAR >> 60) == 0xc. | ||
849 | */ | ||
850 | .align 7 | ||
851 | _GLOBAL(do_stab_bolted) | ||
852 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
853 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | ||
854 | |||
855 | /* Hash to the primary group */ | ||
856 | ld r10,PACASTABVIRT(r13) | ||
857 | mfspr r11,SPRN_DAR | ||
858 | srdi r11,r11,28 | ||
859 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | ||
860 | |||
861 | /* Calculate VSID */ | ||
862 | /* This is a kernel address, so protovsid = ESID */ | ||
863 | ASM_VSID_SCRAMBLE(r11, r9, 256M) | ||
864 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | ||
865 | |||
866 | /* Search the primary group for a free entry */ | ||
867 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | ||
868 | andi. r11,r11,0x80 | ||
869 | beq 2f | ||
870 | addi r10,r10,16 | ||
871 | andi. r11,r10,0x70 | ||
872 | bne 1b | ||
873 | |||
874 | /* Stick for only searching the primary group for now. */ | ||
875 | /* At least for now, we use a very simple random castout scheme */ | ||
876 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | ||
877 | mftb r11 | ||
878 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | ||
879 | ori r11,r11,0x10 | ||
880 | |||
881 | /* r10 currently points to an ste one past the group of interest */ | ||
882 | /* make it point to the randomly selected entry */ | ||
883 | subi r10,r10,128 | ||
884 | or r10,r10,r11 /* r10 is the entry to invalidate */ | ||
885 | |||
886 | isync /* mark the entry invalid */ | ||
887 | ld r11,0(r10) | ||
888 | rldicl r11,r11,56,1 /* clear the valid bit */ | ||
889 | rotldi r11,r11,8 | ||
890 | std r11,0(r10) | ||
891 | sync | ||
892 | |||
893 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | ||
894 | slbie r11 | ||
895 | |||
896 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | ||
897 | eieio | ||
898 | |||
899 | mfspr r11,SPRN_DAR /* Get the new esid */ | ||
900 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ | ||
901 | ori r11,r11,0x90 /* Turn on valid and kp */ | ||
902 | std r11,0(r10) /* Put new entry back into the stab */ | ||
903 | |||
904 | sync | ||
905 | |||
906 | /* All done -- return from exception. */ | ||
907 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
908 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | ||
909 | |||
910 | andi. r10,r12,MSR_RI | ||
911 | beq- unrecov_slb | ||
912 | |||
913 | mtcrf 0x80,r9 /* restore CR */ | ||
914 | |||
915 | mfmsr r10 | ||
916 | clrrdi r10,r10,2 | ||
917 | mtmsrd r10,1 | ||
918 | |||
919 | mtspr SPRN_SRR0,r11 | ||
920 | mtspr SPRN_SRR1,r12 | ||
921 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
922 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
923 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
924 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
925 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
926 | rfid | ||
927 | b . /* prevent speculative execution */ | ||
928 | |||
929 | /* | ||
930 | * Space for CPU0's segment table. | ||
931 | * | ||
932 | * On iSeries, the hypervisor must fill in at least one entry before | ||
933 | * we get control (with relocate on). The address is given to the hv | ||
934 | * as a page number (see xLparMap below), so this must be at a | ||
935 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
936 | * PAGE_SHIFT). | ||
937 | */ | ||
938 | . = STAB0_OFFSET /* 0x6000 */ | ||
939 | .globl initial_stab | ||
940 | initial_stab: | ||
941 | .space 4096 | ||
942 | |||
943 | #ifdef CONFIG_PPC_PSERIES | ||
944 | /* | ||
945 | * Data area reserved for FWNMI option. | ||
946 | * This address (0x7000) is fixed by the RPA. | ||
947 | */ | ||
948 | .= 0x7000 | ||
949 | .globl fwnmi_data_area | ||
950 | fwnmi_data_area: | ||
951 | #endif /* CONFIG_PPC_PSERIES */ | ||
952 | |||
953 | /* iSeries does not use the FWNMI stuff, so it is safe to put | ||
954 | * this here, even if we later allow kernels that will boot on | ||
955 | * both pSeries and iSeries */ | ||
956 | #ifdef CONFIG_PPC_ISERIES | ||
957 | . = LPARMAP_PHYS | ||
958 | .globl xLparMap | ||
959 | xLparMap: | ||
960 | .quad HvEsidsToMap /* xNumberEsids */ | ||
961 | .quad HvRangesToMap /* xNumberRanges */ | ||
962 | .quad STAB0_PAGE /* xSegmentTableOffs */ | ||
963 | .zero 40 /* xRsvd */ | ||
964 | /* xEsids (HvEsidsToMap entries of 2 quads) */ | ||
965 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ | ||
966 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ | ||
967 | .quad VMALLOC_START_ESID /* xKernelEsid */ | ||
968 | .quad VMALLOC_START_VSID /* xKernelVsid */ | ||
969 | /* xRanges (HvRangesToMap entries of 3 quads) */ | ||
970 | .quad HvPagesToMap /* xPages */ | ||
971 | .quad 0 /* xOffset */ | ||
972 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ | ||
973 | |||
974 | #endif /* CONFIG_PPC_ISERIES */ | ||
975 | |||
976 | #ifdef CONFIG_PPC_PSERIES | ||
977 | . = 0x8000 | ||
978 | #endif /* CONFIG_PPC_PSERIES */ | ||
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 2d182f119d1d..1b12696cca06 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -23,25 +23,14 @@ | |||
23 | #include <asm/code-patching.h> | 23 | #include <asm/code-patching.h> |
24 | #include <asm/ftrace.h> | 24 | #include <asm/ftrace.h> |
25 | 25 | ||
26 | #ifdef CONFIG_PPC32 | ||
27 | # define GET_ADDR(addr) addr | ||
28 | #else | ||
29 | /* PowerPC64's functions are data that points to the functions */ | ||
30 | # define GET_ADDR(addr) (*(unsigned long *)addr) | ||
31 | #endif | ||
32 | 26 | ||
33 | #ifdef CONFIG_DYNAMIC_FTRACE | 27 | #ifdef CONFIG_DYNAMIC_FTRACE |
34 | static unsigned int ftrace_nop_replace(void) | ||
35 | { | ||
36 | return PPC_INST_NOP; | ||
37 | } | ||
38 | |||
39 | static unsigned int | 28 | static unsigned int |
40 | ftrace_call_replace(unsigned long ip, unsigned long addr, int link) | 29 | ftrace_call_replace(unsigned long ip, unsigned long addr, int link) |
41 | { | 30 | { |
42 | unsigned int op; | 31 | unsigned int op; |
43 | 32 | ||
44 | addr = GET_ADDR(addr); | 33 | addr = ppc_function_entry((void *)addr); |
45 | 34 | ||
46 | /* if (link) set op to 'bl' else 'b' */ | 35 | /* if (link) set op to 'bl' else 'b' */ |
47 | op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); | 36 | op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); |
@@ -49,14 +38,6 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link) | |||
49 | return op; | 38 | return op; |
50 | } | 39 | } |
51 | 40 | ||
52 | #ifdef CONFIG_PPC64 | ||
53 | # define _ASM_ALIGN " .align 3 " | ||
54 | # define _ASM_PTR " .llong " | ||
55 | #else | ||
56 | # define _ASM_ALIGN " .align 2 " | ||
57 | # define _ASM_PTR " .long " | ||
58 | #endif | ||
59 | |||
60 | static int | 41 | static int |
61 | ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) | 42 | ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) |
62 | { | 43 | { |
@@ -197,7 +178,7 @@ __ftrace_make_nop(struct module *mod, | |||
197 | ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; | 178 | ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; |
198 | 179 | ||
199 | /* This should match what was called */ | 180 | /* This should match what was called */ |
200 | if (ptr != GET_ADDR(addr)) { | 181 | if (ptr != ppc_function_entry((void *)addr)) { |
201 | printk(KERN_ERR "addr does not match %lx\n", ptr); | 182 | printk(KERN_ERR "addr does not match %lx\n", ptr); |
202 | return -EINVAL; | 183 | return -EINVAL; |
203 | } | 184 | } |
@@ -328,7 +309,7 @@ int ftrace_make_nop(struct module *mod, | |||
328 | if (test_24bit_addr(ip, addr)) { | 309 | if (test_24bit_addr(ip, addr)) { |
329 | /* within range */ | 310 | /* within range */ |
330 | old = ftrace_call_replace(ip, addr, 1); | 311 | old = ftrace_call_replace(ip, addr, 1); |
331 | new = ftrace_nop_replace(); | 312 | new = PPC_INST_NOP; |
332 | return ftrace_modify_code(ip, old, new); | 313 | return ftrace_modify_code(ip, old, new); |
333 | } | 314 | } |
334 | 315 | ||
@@ -466,7 +447,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
466 | */ | 447 | */ |
467 | if (test_24bit_addr(ip, addr)) { | 448 | if (test_24bit_addr(ip, addr)) { |
468 | /* within range */ | 449 | /* within range */ |
469 | old = ftrace_nop_replace(); | 450 | old = PPC_INST_NOP; |
470 | new = ftrace_call_replace(ip, addr, 1); | 451 | new = ftrace_call_replace(ip, addr, 1); |
471 | return ftrace_modify_code(ip, old, new); | 452 | return ftrace_modify_code(ip, old, new); |
472 | } | 453 | } |
@@ -570,7 +551,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
570 | return_hooker = (unsigned long)&mod_return_to_handler; | 551 | return_hooker = (unsigned long)&mod_return_to_handler; |
571 | #endif | 552 | #endif |
572 | 553 | ||
573 | return_hooker = GET_ADDR(return_hooker); | 554 | return_hooker = ppc_function_entry((void *)return_hooker); |
574 | 555 | ||
575 | /* | 556 | /* |
576 | * Protect against fault, even if it shouldn't | 557 | * Protect against fault, even if it shouldn't |
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index c01467f952d3..48469463f89e 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
@@ -733,9 +733,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) | |||
733 | AltiVecUnavailable: | 733 | AltiVecUnavailable: |
734 | EXCEPTION_PROLOG | 734 | EXCEPTION_PROLOG |
735 | #ifdef CONFIG_ALTIVEC | 735 | #ifdef CONFIG_ALTIVEC |
736 | bne load_up_altivec /* if from user, just load it up */ | 736 | beq 1f |
737 | bl load_up_altivec /* if from user, just load it up */ | ||
738 | b fast_exception_return | ||
737 | #endif /* CONFIG_ALTIVEC */ | 739 | #endif /* CONFIG_ALTIVEC */ |
738 | addi r3,r1,STACK_FRAME_OVERHEAD | 740 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
739 | EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) | 741 | EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) |
740 | 742 | ||
741 | PerformanceMonitor: | 743 | PerformanceMonitor: |
@@ -743,101 +745,6 @@ PerformanceMonitor: | |||
743 | addi r3,r1,STACK_FRAME_OVERHEAD | 745 | addi r3,r1,STACK_FRAME_OVERHEAD |
744 | EXC_XFER_STD(0xf00, performance_monitor_exception) | 746 | EXC_XFER_STD(0xf00, performance_monitor_exception) |
745 | 747 | ||
746 | #ifdef CONFIG_ALTIVEC | ||
747 | /* Note that the AltiVec support is closely modeled after the FP | ||
748 | * support. Changes to one are likely to be applicable to the | ||
749 | * other! */ | ||
750 | load_up_altivec: | ||
751 | /* | ||
752 | * Disable AltiVec for the task which had AltiVec previously, | ||
753 | * and save its AltiVec registers in its thread_struct. | ||
754 | * Enables AltiVec for use in the kernel on return. | ||
755 | * On SMP we know the AltiVec units are free, since we give it up every | ||
756 | * switch. -- Kumar | ||
757 | */ | ||
758 | mfmsr r5 | ||
759 | oris r5,r5,MSR_VEC@h | ||
760 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
761 | isync | ||
762 | /* | ||
763 | * For SMP, we don't do lazy AltiVec switching because it just gets too | ||
764 | * horrendously complex, especially when a task switches from one CPU | ||
765 | * to another. Instead we call giveup_altivec in switch_to. | ||
766 | */ | ||
767 | #ifndef CONFIG_SMP | ||
768 | tophys(r6,0) | ||
769 | addis r3,r6,last_task_used_altivec@ha | ||
770 | lwz r4,last_task_used_altivec@l(r3) | ||
771 | cmpwi 0,r4,0 | ||
772 | beq 1f | ||
773 | add r4,r4,r6 | ||
774 | addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */ | ||
775 | SAVE_32VRS(0,r10,r4) | ||
776 | mfvscr vr0 | ||
777 | li r10,THREAD_VSCR | ||
778 | stvx vr0,r10,r4 | ||
779 | lwz r5,PT_REGS(r4) | ||
780 | add r5,r5,r6 | ||
781 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
782 | lis r10,MSR_VEC@h | ||
783 | andc r4,r4,r10 /* disable altivec for previous task */ | ||
784 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
785 | 1: | ||
786 | #endif /* CONFIG_SMP */ | ||
787 | /* enable use of AltiVec after return */ | ||
788 | oris r9,r9,MSR_VEC@h | ||
789 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
790 | li r4,1 | ||
791 | li r10,THREAD_VSCR | ||
792 | stw r4,THREAD_USED_VR(r5) | ||
793 | lvx vr0,r10,r5 | ||
794 | mtvscr vr0 | ||
795 | REST_32VRS(0,r10,r5) | ||
796 | #ifndef CONFIG_SMP | ||
797 | subi r4,r5,THREAD | ||
798 | sub r4,r4,r6 | ||
799 | stw r4,last_task_used_altivec@l(r3) | ||
800 | #endif /* CONFIG_SMP */ | ||
801 | /* restore registers and return */ | ||
802 | /* we haven't used ctr or xer or lr */ | ||
803 | b fast_exception_return | ||
804 | |||
805 | /* | ||
806 | * giveup_altivec(tsk) | ||
807 | * Disable AltiVec for the task given as the argument, | ||
808 | * and save the AltiVec registers in its thread_struct. | ||
809 | * Enables AltiVec for use in the kernel on return. | ||
810 | */ | ||
811 | |||
812 | .globl giveup_altivec | ||
813 | giveup_altivec: | ||
814 | mfmsr r5 | ||
815 | oris r5,r5,MSR_VEC@h | ||
816 | SYNC | ||
817 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
818 | isync | ||
819 | cmpwi 0,r3,0 | ||
820 | beqlr- /* if no previous owner, done */ | ||
821 | addi r3,r3,THREAD /* want THREAD of task */ | ||
822 | lwz r5,PT_REGS(r3) | ||
823 | cmpwi 0,r5,0 | ||
824 | SAVE_32VRS(0, r4, r3) | ||
825 | mfvscr vr0 | ||
826 | li r4,THREAD_VSCR | ||
827 | stvx vr0,r4,r3 | ||
828 | beq 1f | ||
829 | lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
830 | lis r3,MSR_VEC@h | ||
831 | andc r4,r4,r3 /* disable AltiVec for previous task */ | ||
832 | stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
833 | 1: | ||
834 | #ifndef CONFIG_SMP | ||
835 | li r5,0 | ||
836 | lis r4,last_task_used_altivec@ha | ||
837 | stw r5,last_task_used_altivec@l(r4) | ||
838 | #endif /* CONFIG_SMP */ | ||
839 | blr | ||
840 | #endif /* CONFIG_ALTIVEC */ | ||
841 | 748 | ||
842 | /* | 749 | /* |
843 | * This code is jumped to from the startup code to copy | 750 | * This code is jumped to from the startup code to copy |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 50ef505b8fb6..012505ebd9f9 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -12,8 +12,9 @@ | |||
12 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and | 12 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and |
13 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com | 13 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com |
14 | * | 14 | * |
15 | * This file contains the low-level support and setup for the | 15 | * This file contains the entry point for the 64-bit kernel along |
16 | * PowerPC-64 platform, including trap and interrupt dispatch. | 16 | * with some early initialization code common to all 64-bit powerpc |
17 | * variants. | ||
17 | * | 18 | * |
18 | * This program is free software; you can redistribute it and/or | 19 | * This program is free software; you can redistribute it and/or |
19 | * modify it under the terms of the GNU General Public License | 20 | * modify it under the terms of the GNU General Public License |
@@ -38,36 +39,25 @@ | |||
38 | #include <asm/exception.h> | 39 | #include <asm/exception.h> |
39 | #include <asm/irqflags.h> | 40 | #include <asm/irqflags.h> |
40 | 41 | ||
41 | /* | 42 | /* The physical memory is layed out such that the secondary processor |
42 | * We layout physical memory as follows: | 43 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow |
43 | * 0x0000 - 0x00ff : Secondary processor spin code | 44 | * using the layout described in exceptions-64s.S |
44 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | ||
45 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | ||
46 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | ||
47 | * 0x7000 - 0x7fff : FWNMI data area | ||
48 | * 0x8000 - : Early init and support code | ||
49 | */ | ||
50 | |||
51 | /* | ||
52 | * SPRG Usage | ||
53 | * | ||
54 | * Register Definition | ||
55 | * | ||
56 | * SPRG0 reserved for hypervisor | ||
57 | * SPRG1 temp - used to save gpr | ||
58 | * SPRG2 temp - used to save gpr | ||
59 | * SPRG3 virt addr of paca | ||
60 | */ | 45 | */ |
61 | 46 | ||
62 | /* | 47 | /* |
63 | * Entering into this code we make the following assumptions: | 48 | * Entering into this code we make the following assumptions: |
64 | * For pSeries: | 49 | * |
50 | * For pSeries or server processors: | ||
65 | * 1. The MMU is off & open firmware is running in real mode. | 51 | * 1. The MMU is off & open firmware is running in real mode. |
66 | * 2. The kernel is entered at __start | 52 | * 2. The kernel is entered at __start |
67 | * | 53 | * |
68 | * For iSeries: | 54 | * For iSeries: |
69 | * 1. The MMU is on (as it always is for iSeries) | 55 | * 1. The MMU is on (as it always is for iSeries) |
70 | * 2. The kernel is entered at system_reset_iSeries | 56 | * 2. The kernel is entered at system_reset_iSeries |
57 | * | ||
58 | * For Book3E processors: | ||
59 | * 1. The MMU is on running in AS0 in a state defined in ePAPR | ||
60 | * 2. The kernel is entered at __start | ||
71 | */ | 61 | */ |
72 | 62 | ||
73 | .text | 63 | .text |
@@ -166,1065 +156,14 @@ exception_marker: | |||
166 | .text | 156 | .text |
167 | 157 | ||
168 | /* | 158 | /* |
169 | * This is the start of the interrupt handlers for pSeries | 159 | * On server, we include the exception vectors code here as it |
170 | * This code runs with relocation off. | 160 | * relies on absolute addressing which is only possible within |
171 | * Code from here to __end_interrupts gets copied down to real | 161 | * this compilation unit |
172 | * address 0x100 when we are running a relocatable kernel. | ||
173 | * Therefore any relative branches in this section must only | ||
174 | * branch to labels in this section. | ||
175 | */ | ||
176 | . = 0x100 | ||
177 | .globl __start_interrupts | ||
178 | __start_interrupts: | ||
179 | |||
180 | STD_EXCEPTION_PSERIES(0x100, system_reset) | ||
181 | |||
182 | . = 0x200 | ||
183 | _machine_check_pSeries: | ||
184 | HMT_MEDIUM | ||
185 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
186 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
187 | |||
188 | . = 0x300 | ||
189 | .globl data_access_pSeries | ||
190 | data_access_pSeries: | ||
191 | HMT_MEDIUM | ||
192 | mtspr SPRN_SPRG1,r13 | ||
193 | BEGIN_FTR_SECTION | ||
194 | mtspr SPRN_SPRG2,r12 | ||
195 | mfspr r13,SPRN_DAR | ||
196 | mfspr r12,SPRN_DSISR | ||
197 | srdi r13,r13,60 | ||
198 | rlwimi r13,r12,16,0x20 | ||
199 | mfcr r12 | ||
200 | cmpwi r13,0x2c | ||
201 | beq do_stab_bolted_pSeries | ||
202 | mtcrf 0x80,r12 | ||
203 | mfspr r12,SPRN_SPRG2 | ||
204 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
205 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | ||
206 | |||
207 | . = 0x380 | ||
208 | .globl data_access_slb_pSeries | ||
209 | data_access_slb_pSeries: | ||
210 | HMT_MEDIUM | ||
211 | mtspr SPRN_SPRG1,r13 | ||
212 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | ||
213 | std r3,PACA_EXSLB+EX_R3(r13) | ||
214 | mfspr r3,SPRN_DAR | ||
215 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
216 | mfcr r9 | ||
217 | #ifdef __DISABLED__ | ||
218 | /* Keep that around for when we re-implement dynamic VSIDs */ | ||
219 | cmpdi r3,0 | ||
220 | bge slb_miss_user_pseries | ||
221 | #endif /* __DISABLED__ */ | ||
222 | std r10,PACA_EXSLB+EX_R10(r13) | ||
223 | std r11,PACA_EXSLB+EX_R11(r13) | ||
224 | std r12,PACA_EXSLB+EX_R12(r13) | ||
225 | mfspr r10,SPRN_SPRG1 | ||
226 | std r10,PACA_EXSLB+EX_R13(r13) | ||
227 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | ||
228 | #ifndef CONFIG_RELOCATABLE | ||
229 | b .slb_miss_realmode | ||
230 | #else | ||
231 | /* | ||
232 | * We can't just use a direct branch to .slb_miss_realmode | ||
233 | * because the distance from here to there depends on where | ||
234 | * the kernel ends up being put. | ||
235 | */ | ||
236 | mfctr r11 | ||
237 | ld r10,PACAKBASE(r13) | ||
238 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
239 | mtctr r10 | ||
240 | bctr | ||
241 | #endif | ||
242 | |||
243 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | ||
244 | |||
245 | . = 0x480 | ||
246 | .globl instruction_access_slb_pSeries | ||
247 | instruction_access_slb_pSeries: | ||
248 | HMT_MEDIUM | ||
249 | mtspr SPRN_SPRG1,r13 | ||
250 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ | ||
251 | std r3,PACA_EXSLB+EX_R3(r13) | ||
252 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | ||
253 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | ||
254 | mfcr r9 | ||
255 | #ifdef __DISABLED__ | ||
256 | /* Keep that around for when we re-implement dynamic VSIDs */ | ||
257 | cmpdi r3,0 | ||
258 | bge slb_miss_user_pseries | ||
259 | #endif /* __DISABLED__ */ | ||
260 | std r10,PACA_EXSLB+EX_R10(r13) | ||
261 | std r11,PACA_EXSLB+EX_R11(r13) | ||
262 | std r12,PACA_EXSLB+EX_R12(r13) | ||
263 | mfspr r10,SPRN_SPRG1 | ||
264 | std r10,PACA_EXSLB+EX_R13(r13) | ||
265 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | ||
266 | #ifndef CONFIG_RELOCATABLE | ||
267 | b .slb_miss_realmode | ||
268 | #else | ||
269 | mfctr r11 | ||
270 | ld r10,PACAKBASE(r13) | ||
271 | LOAD_HANDLER(r10, .slb_miss_realmode) | ||
272 | mtctr r10 | ||
273 | bctr | ||
274 | #endif | ||
275 | |||
276 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) | ||
277 | STD_EXCEPTION_PSERIES(0x600, alignment) | ||
278 | STD_EXCEPTION_PSERIES(0x700, program_check) | ||
279 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | ||
280 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) | ||
281 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) | ||
282 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | ||
283 | |||
284 | . = 0xc00 | ||
285 | .globl system_call_pSeries | ||
286 | system_call_pSeries: | ||
287 | HMT_MEDIUM | ||
288 | BEGIN_FTR_SECTION | ||
289 | cmpdi r0,0x1ebe | ||
290 | beq- 1f | ||
291 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | ||
292 | mr r9,r13 | ||
293 | mfspr r13,SPRN_SPRG3 | ||
294 | mfspr r11,SPRN_SRR0 | ||
295 | ld r12,PACAKBASE(r13) | ||
296 | ld r10,PACAKMSR(r13) | ||
297 | LOAD_HANDLER(r12, system_call_entry) | ||
298 | mtspr SPRN_SRR0,r12 | ||
299 | mfspr r12,SPRN_SRR1 | ||
300 | mtspr SPRN_SRR1,r10 | ||
301 | rfid | ||
302 | b . /* prevent speculative execution */ | ||
303 | |||
304 | /* Fast LE/BE switch system call */ | ||
305 | 1: mfspr r12,SPRN_SRR1 | ||
306 | xori r12,r12,MSR_LE | ||
307 | mtspr SPRN_SRR1,r12 | ||
308 | rfid /* return to userspace */ | ||
309 | b . | ||
310 | |||
311 | STD_EXCEPTION_PSERIES(0xd00, single_step) | ||
312 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | ||
313 | |||
314 | /* We need to deal with the Altivec unavailable exception | ||
315 | * here which is at 0xf20, thus in the middle of the | ||
316 | * prolog code of the PerformanceMonitor one. A little | ||
317 | * trickery is thus necessary | ||
318 | */ | ||
319 | . = 0xf00 | ||
320 | b performance_monitor_pSeries | ||
321 | |||
322 | . = 0xf20 | ||
323 | b altivec_unavailable_pSeries | ||
324 | |||
325 | . = 0xf40 | ||
326 | b vsx_unavailable_pSeries | ||
327 | |||
328 | #ifdef CONFIG_CBE_RAS | ||
329 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | ||
330 | #endif /* CONFIG_CBE_RAS */ | ||
331 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | ||
332 | #ifdef CONFIG_CBE_RAS | ||
333 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) | ||
334 | #endif /* CONFIG_CBE_RAS */ | ||
335 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | ||
336 | #ifdef CONFIG_CBE_RAS | ||
337 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) | ||
338 | #endif /* CONFIG_CBE_RAS */ | ||
339 | |||
340 | . = 0x3000 | ||
341 | |||
342 | /*** pSeries interrupt support ***/ | ||
343 | |||
344 | /* moved from 0xf00 */ | ||
345 | STD_EXCEPTION_PSERIES(., performance_monitor) | ||
346 | STD_EXCEPTION_PSERIES(., altivec_unavailable) | ||
347 | STD_EXCEPTION_PSERIES(., vsx_unavailable) | ||
348 | |||
349 | /* | ||
350 | * An interrupt came in while soft-disabled; clear EE in SRR1, | ||
351 | * clear paca->hard_enabled and return. | ||
352 | */ | ||
353 | masked_interrupt: | ||
354 | stb r10,PACAHARDIRQEN(r13) | ||
355 | mtcrf 0x80,r9 | ||
356 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
357 | mfspr r10,SPRN_SRR1 | ||
358 | rldicl r10,r10,48,1 /* clear MSR_EE */ | ||
359 | rotldi r10,r10,16 | ||
360 | mtspr SPRN_SRR1,r10 | ||
361 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
362 | mfspr r13,SPRN_SPRG1 | ||
363 | rfid | ||
364 | b . | ||
365 | |||
366 | .align 7 | ||
367 | do_stab_bolted_pSeries: | ||
368 | mtcrf 0x80,r12 | ||
369 | mfspr r12,SPRN_SPRG2 | ||
370 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) | ||
371 | |||
372 | #ifdef CONFIG_PPC_PSERIES | ||
373 | /* | ||
374 | * Vectors for the FWNMI option. Share common code. | ||
375 | */ | ||
376 | .globl system_reset_fwnmi | ||
377 | .align 7 | ||
378 | system_reset_fwnmi: | ||
379 | HMT_MEDIUM | ||
380 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
381 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | ||
382 | |||
383 | .globl machine_check_fwnmi | ||
384 | .align 7 | ||
385 | machine_check_fwnmi: | ||
386 | HMT_MEDIUM | ||
387 | mtspr SPRN_SPRG1,r13 /* save r13 */ | ||
388 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | ||
389 | |||
390 | #endif /* CONFIG_PPC_PSERIES */ | ||
391 | |||
392 | #ifdef __DISABLED__ | ||
393 | /* | ||
394 | * This is used for when the SLB miss handler has to go virtual, | ||
395 | * which doesn't happen for now anymore but will once we re-implement | ||
396 | * dynamic VSIDs for shared page tables | ||
397 | */ | ||
398 | slb_miss_user_pseries: | ||
399 | std r10,PACA_EXGEN+EX_R10(r13) | ||
400 | std r11,PACA_EXGEN+EX_R11(r13) | ||
401 | std r12,PACA_EXGEN+EX_R12(r13) | ||
402 | mfspr r10,SPRG1 | ||
403 | ld r11,PACA_EXSLB+EX_R9(r13) | ||
404 | ld r12,PACA_EXSLB+EX_R3(r13) | ||
405 | std r10,PACA_EXGEN+EX_R13(r13) | ||
406 | std r11,PACA_EXGEN+EX_R9(r13) | ||
407 | std r12,PACA_EXGEN+EX_R3(r13) | ||
408 | clrrdi r12,r13,32 | ||
409 | mfmsr r10 | ||
410 | mfspr r11,SRR0 /* save SRR0 */ | ||
411 | ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ | ||
412 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | ||
413 | mtspr SRR0,r12 | ||
414 | mfspr r12,SRR1 /* and SRR1 */ | ||
415 | mtspr SRR1,r10 | ||
416 | rfid | ||
417 | b . /* prevent spec. execution */ | ||
418 | #endif /* __DISABLED__ */ | ||
419 | |||
420 | .align 7 | ||
421 | .globl __end_interrupts | ||
422 | __end_interrupts: | ||
423 | |||
424 | /* | ||
425 | * Code from here down to __end_handlers is invoked from the | ||
426 | * exception prologs above. Because the prologs assemble the | ||
427 | * addresses of these handlers using the LOAD_HANDLER macro, | ||
428 | * which uses an addi instruction, these handlers must be in | ||
429 | * the first 32k of the kernel image. | ||
430 | */ | ||
431 | |||
432 | /*** Common interrupt handlers ***/ | ||
433 | |||
434 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | ||
435 | |||
436 | /* | ||
437 | * Machine check is different because we use a different | ||
438 | * save area: PACA_EXMC instead of PACA_EXGEN. | ||
439 | */ | ||
440 | .align 7 | ||
441 | .globl machine_check_common | ||
442 | machine_check_common: | ||
443 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | ||
444 | FINISH_NAP | ||
445 | DISABLE_INTS | ||
446 | bl .save_nvgprs | ||
447 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
448 | bl .machine_check_exception | ||
449 | b .ret_from_except | ||
450 | |||
451 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | ||
452 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | ||
453 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | ||
454 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | ||
455 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | ||
456 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) | ||
457 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | ||
458 | #ifdef CONFIG_ALTIVEC | ||
459 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | ||
460 | #else | ||
461 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | ||
462 | #endif | ||
463 | #ifdef CONFIG_CBE_RAS | ||
464 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) | ||
465 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) | ||
466 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | ||
467 | #endif /* CONFIG_CBE_RAS */ | ||
468 | |||
469 | .align 7 | ||
470 | system_call_entry: | ||
471 | b system_call_common | ||
472 | |||
473 | /* | ||
474 | * Here we have detected that the kernel stack pointer is bad. | ||
475 | * R9 contains the saved CR, r13 points to the paca, | ||
476 | * r10 contains the (bad) kernel stack pointer, | ||
477 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
478 | * We switch to using an emergency stack, save the registers there, | ||
479 | * and call kernel_bad_stack(), which panics. | ||
480 | */ | ||
481 | bad_stack: | ||
482 | ld r1,PACAEMERGSP(r13) | ||
483 | subi r1,r1,64+INT_FRAME_SIZE | ||
484 | std r9,_CCR(r1) | ||
485 | std r10,GPR1(r1) | ||
486 | std r11,_NIP(r1) | ||
487 | std r12,_MSR(r1) | ||
488 | mfspr r11,SPRN_DAR | ||
489 | mfspr r12,SPRN_DSISR | ||
490 | std r11,_DAR(r1) | ||
491 | std r12,_DSISR(r1) | ||
492 | mflr r10 | ||
493 | mfctr r11 | ||
494 | mfxer r12 | ||
495 | std r10,_LINK(r1) | ||
496 | std r11,_CTR(r1) | ||
497 | std r12,_XER(r1) | ||
498 | SAVE_GPR(0,r1) | ||
499 | SAVE_GPR(2,r1) | ||
500 | SAVE_4GPRS(3,r1) | ||
501 | SAVE_2GPRS(7,r1) | ||
502 | SAVE_10GPRS(12,r1) | ||
503 | SAVE_10GPRS(22,r1) | ||
504 | lhz r12,PACA_TRAP_SAVE(r13) | ||
505 | std r12,_TRAP(r1) | ||
506 | addi r11,r1,INT_FRAME_SIZE | ||
507 | std r11,0(r1) | ||
508 | li r12,0 | ||
509 | std r12,0(r11) | ||
510 | ld r2,PACATOC(r13) | ||
511 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
512 | bl .kernel_bad_stack | ||
513 | b 1b | ||
514 | |||
515 | /* | ||
516 | * Here r13 points to the paca, r9 contains the saved CR, | ||
517 | * SRR0 and SRR1 are saved in r11 and r12, | ||
518 | * r9 - r13 are saved in paca->exgen. | ||
519 | */ | ||
520 | .align 7 | ||
521 | .globl data_access_common | ||
522 | data_access_common: | ||
523 | mfspr r10,SPRN_DAR | ||
524 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
525 | mfspr r10,SPRN_DSISR | ||
526 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
527 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | ||
528 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
529 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
530 | li r5,0x300 | ||
531 | b .do_hash_page /* Try to handle as hpte fault */ | ||
532 | |||
533 | .align 7 | ||
534 | .globl instruction_access_common | ||
535 | instruction_access_common: | ||
536 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | ||
537 | ld r3,_NIP(r1) | ||
538 | andis. r4,r12,0x5820 | ||
539 | li r5,0x400 | ||
540 | b .do_hash_page /* Try to handle as hpte fault */ | ||
541 | |||
542 | /* | ||
543 | * Here is the common SLB miss user that is used when going to virtual | ||
544 | * mode for SLB misses, that is currently not used | ||
545 | */ | ||
546 | #ifdef __DISABLED__ | ||
547 | .align 7 | ||
548 | .globl slb_miss_user_common | ||
549 | slb_miss_user_common: | ||
550 | mflr r10 | ||
551 | std r3,PACA_EXGEN+EX_DAR(r13) | ||
552 | stw r9,PACA_EXGEN+EX_CCR(r13) | ||
553 | std r10,PACA_EXGEN+EX_LR(r13) | ||
554 | std r11,PACA_EXGEN+EX_SRR0(r13) | ||
555 | bl .slb_allocate_user | ||
556 | |||
557 | ld r10,PACA_EXGEN+EX_LR(r13) | ||
558 | ld r3,PACA_EXGEN+EX_R3(r13) | ||
559 | lwz r9,PACA_EXGEN+EX_CCR(r13) | ||
560 | ld r11,PACA_EXGEN+EX_SRR0(r13) | ||
561 | mtlr r10 | ||
562 | beq- slb_miss_fault | ||
563 | |||
564 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
565 | beq- unrecov_user_slb | ||
566 | mfmsr r10 | ||
567 | |||
568 | .machine push | ||
569 | .machine "power4" | ||
570 | mtcrf 0x80,r9 | ||
571 | .machine pop | ||
572 | |||
573 | clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ | ||
574 | mtmsrd r10,1 | ||
575 | |||
576 | mtspr SRR0,r11 | ||
577 | mtspr SRR1,r12 | ||
578 | |||
579 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
580 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
581 | ld r11,PACA_EXGEN+EX_R11(r13) | ||
582 | ld r12,PACA_EXGEN+EX_R12(r13) | ||
583 | ld r13,PACA_EXGEN+EX_R13(r13) | ||
584 | rfid | ||
585 | b . | ||
586 | |||
587 | slb_miss_fault: | ||
588 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) | ||
589 | ld r4,PACA_EXGEN+EX_DAR(r13) | ||
590 | li r5,0 | ||
591 | std r4,_DAR(r1) | ||
592 | std r5,_DSISR(r1) | ||
593 | b handle_page_fault | ||
594 | |||
595 | unrecov_user_slb: | ||
596 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | ||
597 | DISABLE_INTS | ||
598 | bl .save_nvgprs | ||
599 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
600 | bl .unrecoverable_exception | ||
601 | b 1b | ||
602 | |||
603 | #endif /* __DISABLED__ */ | ||
604 | |||
605 | |||
606 | /* | ||
607 | * r13 points to the PACA, r9 contains the saved CR, | ||
608 | * r12 contain the saved SRR1, SRR0 is still ready for return | ||
609 | * r3 has the faulting address | ||
610 | * r9 - r13 are saved in paca->exslb. | ||
611 | * r3 is saved in paca->slb_r3 | ||
612 | * We assume we aren't going to take any exceptions during this procedure. | ||
613 | */ | ||
614 | _GLOBAL(slb_miss_realmode) | ||
615 | mflr r10 | ||
616 | #ifdef CONFIG_RELOCATABLE | ||
617 | mtctr r11 | ||
618 | #endif | ||
619 | |||
620 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
621 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | ||
622 | |||
623 | bl .slb_allocate_realmode | ||
624 | |||
625 | /* All done -- return from exception. */ | ||
626 | |||
627 | ld r10,PACA_EXSLB+EX_LR(r13) | ||
628 | ld r3,PACA_EXSLB+EX_R3(r13) | ||
629 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
630 | #ifdef CONFIG_PPC_ISERIES | ||
631 | BEGIN_FW_FTR_SECTION | ||
632 | ld r11,PACALPPACAPTR(r13) | ||
633 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ | ||
634 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
635 | #endif /* CONFIG_PPC_ISERIES */ | ||
636 | |||
637 | mtlr r10 | ||
638 | |||
639 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | ||
640 | beq- 2f | ||
641 | |||
642 | .machine push | ||
643 | .machine "power4" | ||
644 | mtcrf 0x80,r9 | ||
645 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | ||
646 | .machine pop | ||
647 | |||
648 | #ifdef CONFIG_PPC_ISERIES | ||
649 | BEGIN_FW_FTR_SECTION | ||
650 | mtspr SPRN_SRR0,r11 | ||
651 | mtspr SPRN_SRR1,r12 | ||
652 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
653 | #endif /* CONFIG_PPC_ISERIES */ | ||
654 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
655 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
656 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
657 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
658 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
659 | rfid | ||
660 | b . /* prevent speculative execution */ | ||
661 | |||
662 | 2: | ||
663 | #ifdef CONFIG_PPC_ISERIES | ||
664 | BEGIN_FW_FTR_SECTION | ||
665 | b unrecov_slb | ||
666 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
667 | #endif /* CONFIG_PPC_ISERIES */ | ||
668 | mfspr r11,SPRN_SRR0 | ||
669 | ld r10,PACAKBASE(r13) | ||
670 | LOAD_HANDLER(r10,unrecov_slb) | ||
671 | mtspr SPRN_SRR0,r10 | ||
672 | ld r10,PACAKMSR(r13) | ||
673 | mtspr SPRN_SRR1,r10 | ||
674 | rfid | ||
675 | b . | ||
676 | |||
677 | unrecov_slb: | ||
678 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | ||
679 | DISABLE_INTS | ||
680 | bl .save_nvgprs | ||
681 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
682 | bl .unrecoverable_exception | ||
683 | b 1b | ||
684 | |||
685 | .align 7 | ||
686 | .globl hardware_interrupt_common | ||
687 | .globl hardware_interrupt_entry | ||
688 | hardware_interrupt_common: | ||
689 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | ||
690 | FINISH_NAP | ||
691 | hardware_interrupt_entry: | ||
692 | DISABLE_INTS | ||
693 | BEGIN_FTR_SECTION | ||
694 | bl .ppc64_runlatch_on | ||
695 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) | ||
696 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
697 | bl .do_IRQ | ||
698 | b .ret_from_except_lite | ||
699 | |||
700 | #ifdef CONFIG_PPC_970_NAP | ||
701 | power4_fixup_nap: | ||
702 | andc r9,r9,r10 | ||
703 | std r9,TI_LOCAL_FLAGS(r11) | ||
704 | ld r10,_LINK(r1) /* make idle task do the */ | ||
705 | std r10,_NIP(r1) /* equivalent of a blr */ | ||
706 | blr | ||
707 | #endif | ||
708 | |||
709 | .align 7 | ||
710 | .globl alignment_common | ||
711 | alignment_common: | ||
712 | mfspr r10,SPRN_DAR | ||
713 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
714 | mfspr r10,SPRN_DSISR | ||
715 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
716 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | ||
717 | ld r3,PACA_EXGEN+EX_DAR(r13) | ||
718 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | ||
719 | std r3,_DAR(r1) | ||
720 | std r4,_DSISR(r1) | ||
721 | bl .save_nvgprs | ||
722 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
723 | ENABLE_INTS | ||
724 | bl .alignment_exception | ||
725 | b .ret_from_except | ||
726 | |||
727 | .align 7 | ||
728 | .globl program_check_common | ||
729 | program_check_common: | ||
730 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | ||
731 | bl .save_nvgprs | ||
732 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
733 | ENABLE_INTS | ||
734 | bl .program_check_exception | ||
735 | b .ret_from_except | ||
736 | |||
737 | .align 7 | ||
738 | .globl fp_unavailable_common | ||
739 | fp_unavailable_common: | ||
740 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | ||
741 | bne 1f /* if from user, just load it up */ | ||
742 | bl .save_nvgprs | ||
743 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
744 | ENABLE_INTS | ||
745 | bl .kernel_fp_unavailable_exception | ||
746 | BUG_OPCODE | ||
747 | 1: bl .load_up_fpu | ||
748 | b fast_exception_return | ||
749 | |||
750 | .align 7 | ||
751 | .globl altivec_unavailable_common | ||
752 | altivec_unavailable_common: | ||
753 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | ||
754 | #ifdef CONFIG_ALTIVEC | ||
755 | BEGIN_FTR_SECTION | ||
756 | beq 1f | ||
757 | bl .load_up_altivec | ||
758 | b fast_exception_return | ||
759 | 1: | ||
760 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
761 | #endif | ||
762 | bl .save_nvgprs | ||
763 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
764 | ENABLE_INTS | ||
765 | bl .altivec_unavailable_exception | ||
766 | b .ret_from_except | ||
767 | |||
768 | .align 7 | ||
769 | .globl vsx_unavailable_common | ||
770 | vsx_unavailable_common: | ||
771 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | ||
772 | #ifdef CONFIG_VSX | ||
773 | BEGIN_FTR_SECTION | ||
774 | bne .load_up_vsx | ||
775 | 1: | ||
776 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
777 | #endif | ||
778 | bl .save_nvgprs | ||
779 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
780 | ENABLE_INTS | ||
781 | bl .vsx_unavailable_exception | ||
782 | b .ret_from_except | ||
783 | |||
784 | .align 7 | ||
785 | .globl __end_handlers | ||
786 | __end_handlers: | ||
787 | |||
788 | /* | ||
789 | * Return from an exception with minimal checks. | ||
790 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | ||
791 | * If interrupts have been enabled, or anything has been | ||
792 | * done that might have changed the scheduling status of | ||
793 | * any task or sent any task a signal, you should use | ||
794 | * ret_from_except or ret_from_except_lite instead of this. | ||
795 | */ | 162 | */ |
796 | fast_exc_return_irq: /* restores irq state too */ | 163 | #ifdef CONFIG_PPC_BOOK3S |
797 | ld r3,SOFTE(r1) | 164 | #include "exceptions-64s.S" |
798 | TRACE_AND_RESTORE_IRQ(r3); | ||
799 | ld r12,_MSR(r1) | ||
800 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | ||
801 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | ||
802 | b 1f | ||
803 | |||
804 | .globl fast_exception_return | ||
805 | fast_exception_return: | ||
806 | ld r12,_MSR(r1) | ||
807 | 1: ld r11,_NIP(r1) | ||
808 | andi. r3,r12,MSR_RI /* check if RI is set */ | ||
809 | beq- unrecov_fer | ||
810 | |||
811 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
812 | andi. r3,r12,MSR_PR | ||
813 | beq 2f | ||
814 | ACCOUNT_CPU_USER_EXIT(r3, r4) | ||
815 | 2: | ||
816 | #endif | 165 | #endif |
817 | 166 | ||
818 | ld r3,_CCR(r1) | ||
819 | ld r4,_LINK(r1) | ||
820 | ld r5,_CTR(r1) | ||
821 | ld r6,_XER(r1) | ||
822 | mtcr r3 | ||
823 | mtlr r4 | ||
824 | mtctr r5 | ||
825 | mtxer r6 | ||
826 | REST_GPR(0, r1) | ||
827 | REST_8GPRS(2, r1) | ||
828 | |||
829 | mfmsr r10 | ||
830 | rldicl r10,r10,48,1 /* clear EE */ | ||
831 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | ||
832 | mtmsrd r10,1 | ||
833 | |||
834 | mtspr SPRN_SRR1,r12 | ||
835 | mtspr SPRN_SRR0,r11 | ||
836 | REST_4GPRS(10, r1) | ||
837 | ld r1,GPR1(r1) | ||
838 | rfid | ||
839 | b . /* prevent speculative execution */ | ||
840 | |||
841 | unrecov_fer: | ||
842 | bl .save_nvgprs | ||
843 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | ||
844 | bl .unrecoverable_exception | ||
845 | b 1b | ||
846 | |||
847 | #ifdef CONFIG_ALTIVEC | ||
848 | /* | ||
849 | * load_up_altivec(unused, unused, tsk) | ||
850 | * Disable VMX for the task which had it previously, | ||
851 | * and save its vector registers in its thread_struct. | ||
852 | * Enables the VMX for use in the kernel on return. | ||
853 | * On SMP we know the VMX is free, since we give it up every | ||
854 | * switch (ie, no lazy save of the vector registers). | ||
855 | * On entry: r13 == 'current' && last_task_used_altivec != 'current' | ||
856 | */ | ||
857 | _STATIC(load_up_altivec) | ||
858 | mfmsr r5 /* grab the current MSR */ | ||
859 | oris r5,r5,MSR_VEC@h | ||
860 | mtmsrd r5 /* enable use of VMX now */ | ||
861 | isync | ||
862 | |||
863 | /* | ||
864 | * For SMP, we don't do lazy VMX switching because it just gets too | ||
865 | * horrendously complex, especially when a task switches from one CPU | ||
866 | * to another. Instead we call giveup_altvec in switch_to. | ||
867 | * VRSAVE isn't dealt with here, that is done in the normal context | ||
868 | * switch code. Note that we could rely on vrsave value to eventually | ||
869 | * avoid saving all of the VREGs here... | ||
870 | */ | ||
871 | #ifndef CONFIG_SMP | ||
872 | ld r3,last_task_used_altivec@got(r2) | ||
873 | ld r4,0(r3) | ||
874 | cmpdi 0,r4,0 | ||
875 | beq 1f | ||
876 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | ||
877 | addi r4,r4,THREAD | ||
878 | SAVE_32VRS(0,r5,r4) | ||
879 | mfvscr vr0 | ||
880 | li r10,THREAD_VSCR | ||
881 | stvx vr0,r10,r4 | ||
882 | /* Disable VMX for last_task_used_altivec */ | ||
883 | ld r5,PT_REGS(r4) | ||
884 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
885 | lis r6,MSR_VEC@h | ||
886 | andc r4,r4,r6 | ||
887 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
888 | 1: | ||
889 | #endif /* CONFIG_SMP */ | ||
890 | /* Hack: if we get an altivec unavailable trap with VRSAVE | ||
891 | * set to all zeros, we assume this is a broken application | ||
892 | * that fails to set it properly, and thus we switch it to | ||
893 | * all 1's | ||
894 | */ | ||
895 | mfspr r4,SPRN_VRSAVE | ||
896 | cmpdi 0,r4,0 | ||
897 | bne+ 1f | ||
898 | li r4,-1 | ||
899 | mtspr SPRN_VRSAVE,r4 | ||
900 | 1: | ||
901 | /* enable use of VMX after return */ | ||
902 | ld r4,PACACURRENT(r13) | ||
903 | addi r5,r4,THREAD /* Get THREAD */ | ||
904 | oris r12,r12,MSR_VEC@h | ||
905 | std r12,_MSR(r1) | ||
906 | li r4,1 | ||
907 | li r10,THREAD_VSCR | ||
908 | stw r4,THREAD_USED_VR(r5) | ||
909 | lvx vr0,r10,r5 | ||
910 | mtvscr vr0 | ||
911 | REST_32VRS(0,r4,r5) | ||
912 | #ifndef CONFIG_SMP | ||
913 | /* Update last_task_used_math to 'current' */ | ||
914 | subi r4,r5,THREAD /* Back to 'current' */ | ||
915 | std r4,0(r3) | ||
916 | #endif /* CONFIG_SMP */ | ||
917 | /* restore registers and return */ | ||
918 | blr | ||
919 | #endif /* CONFIG_ALTIVEC */ | ||
920 | |||
921 | #ifdef CONFIG_VSX | ||
922 | /* | ||
923 | * load_up_vsx(unused, unused, tsk) | ||
924 | * Disable VSX for the task which had it previously, | ||
925 | * and save its vector registers in its thread_struct. | ||
926 | * Reuse the fp and vsx saves, but first check to see if they have | ||
927 | * been saved already. | ||
928 | * On entry: r13 == 'current' && last_task_used_vsx != 'current' | ||
929 | */ | ||
930 | _STATIC(load_up_vsx) | ||
931 | /* Load FP and VSX registers if they haven't been done yet */ | ||
932 | andi. r5,r12,MSR_FP | ||
933 | beql+ load_up_fpu /* skip if already loaded */ | ||
934 | andis. r5,r12,MSR_VEC@h | ||
935 | beql+ load_up_altivec /* skip if already loaded */ | ||
936 | |||
937 | #ifndef CONFIG_SMP | ||
938 | ld r3,last_task_used_vsx@got(r2) | ||
939 | ld r4,0(r3) | ||
940 | cmpdi 0,r4,0 | ||
941 | beq 1f | ||
942 | /* Disable VSX for last_task_used_vsx */ | ||
943 | addi r4,r4,THREAD | ||
944 | ld r5,PT_REGS(r4) | ||
945 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
946 | lis r6,MSR_VSX@h | ||
947 | andc r6,r4,r6 | ||
948 | std r6,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
949 | 1: | ||
950 | #endif /* CONFIG_SMP */ | ||
951 | ld r4,PACACURRENT(r13) | ||
952 | addi r4,r4,THREAD /* Get THREAD */ | ||
953 | li r6,1 | ||
954 | stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ | ||
955 | /* enable use of VSX after return */ | ||
956 | oris r12,r12,MSR_VSX@h | ||
957 | std r12,_MSR(r1) | ||
958 | #ifndef CONFIG_SMP | ||
959 | /* Update last_task_used_math to 'current' */ | ||
960 | ld r4,PACACURRENT(r13) | ||
961 | std r4,0(r3) | ||
962 | #endif /* CONFIG_SMP */ | ||
963 | b fast_exception_return | ||
964 | #endif /* CONFIG_VSX */ | ||
965 | |||
966 | /* | ||
967 | * Hash table stuff | ||
968 | */ | ||
969 | .align 7 | ||
970 | _STATIC(do_hash_page) | ||
971 | std r3,_DAR(r1) | ||
972 | std r4,_DSISR(r1) | ||
973 | |||
974 | andis. r0,r4,0xa450 /* weird error? */ | ||
975 | bne- handle_page_fault /* if not, try to insert a HPTE */ | ||
976 | BEGIN_FTR_SECTION | ||
977 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | ||
978 | bne- do_ste_alloc /* If so handle it */ | ||
979 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | ||
980 | |||
981 | /* | ||
982 | * On iSeries, we soft-disable interrupts here, then | ||
983 | * hard-enable interrupts so that the hash_page code can spin on | ||
984 | * the hash_table_lock without problems on a shared processor. | ||
985 | */ | ||
986 | DISABLE_INTS | ||
987 | |||
988 | /* | ||
989 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | ||
990 | * and will clobber volatile registers when irq tracing is enabled | ||
991 | * so we need to reload them. It may be possible to be smarter here | ||
992 | * and move the irq tracing elsewhere but let's keep it simple for | ||
993 | * now | ||
994 | */ | ||
995 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
996 | ld r3,_DAR(r1) | ||
997 | ld r4,_DSISR(r1) | ||
998 | ld r5,_TRAP(r1) | ||
999 | ld r12,_MSR(r1) | ||
1000 | clrrdi r5,r5,4 | ||
1001 | #endif /* CONFIG_TRACE_IRQFLAGS */ | ||
1002 | /* | ||
1003 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | ||
1004 | * accessing a userspace segment (even from the kernel). We assume | ||
1005 | * kernel addresses always have the high bit set. | ||
1006 | */ | ||
1007 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | ||
1008 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | ||
1009 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | ||
1010 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | ||
1011 | ori r4,r4,1 /* add _PAGE_PRESENT */ | ||
1012 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | ||
1013 | |||
1014 | /* | ||
1015 | * r3 contains the faulting address | ||
1016 | * r4 contains the required access permissions | ||
1017 | * r5 contains the trap number | ||
1018 | * | ||
1019 | * at return r3 = 0 for success | ||
1020 | */ | ||
1021 | bl .hash_page /* build HPTE if possible */ | ||
1022 | cmpdi r3,0 /* see if hash_page succeeded */ | ||
1023 | |||
1024 | BEGIN_FW_FTR_SECTION | ||
1025 | /* | ||
1026 | * If we had interrupts soft-enabled at the point where the | ||
1027 | * DSI/ISI occurred, and an interrupt came in during hash_page, | ||
1028 | * handle it now. | ||
1029 | * We jump to ret_from_except_lite rather than fast_exception_return | ||
1030 | * because ret_from_except_lite will check for and handle pending | ||
1031 | * interrupts if necessary. | ||
1032 | */ | ||
1033 | beq 13f | ||
1034 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
1035 | |||
1036 | BEGIN_FW_FTR_SECTION | ||
1037 | /* | ||
1038 | * Here we have interrupts hard-disabled, so it is sufficient | ||
1039 | * to restore paca->{soft,hard}_enable and get out. | ||
1040 | */ | ||
1041 | beq fast_exc_return_irq /* Return from exception on success */ | ||
1042 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | ||
1043 | |||
1044 | /* For a hash failure, we don't bother re-enabling interrupts */ | ||
1045 | ble- 12f | ||
1046 | |||
1047 | /* | ||
1048 | * hash_page couldn't handle it, set soft interrupt enable back | ||
1049 | * to what it was before the trap. Note that .raw_local_irq_restore | ||
1050 | * handles any interrupts pending at this point. | ||
1051 | */ | ||
1052 | ld r3,SOFTE(r1) | ||
1053 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) | ||
1054 | bl .raw_local_irq_restore | ||
1055 | b 11f | ||
1056 | |||
1057 | /* Here we have a page fault that hash_page can't handle. */ | ||
1058 | handle_page_fault: | ||
1059 | ENABLE_INTS | ||
1060 | 11: ld r4,_DAR(r1) | ||
1061 | ld r5,_DSISR(r1) | ||
1062 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
1063 | bl .do_page_fault | ||
1064 | cmpdi r3,0 | ||
1065 | beq+ 13f | ||
1066 | bl .save_nvgprs | ||
1067 | mr r5,r3 | ||
1068 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
1069 | lwz r4,_DAR(r1) | ||
1070 | bl .bad_page_fault | ||
1071 | b .ret_from_except | ||
1072 | |||
1073 | 13: b .ret_from_except_lite | ||
1074 | |||
1075 | /* We have a page fault that hash_page could handle but HV refused | ||
1076 | * the PTE insertion | ||
1077 | */ | ||
1078 | 12: bl .save_nvgprs | ||
1079 | mr r5,r3 | ||
1080 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
1081 | ld r4,_DAR(r1) | ||
1082 | bl .low_hash_fault | ||
1083 | b .ret_from_except | ||
1084 | |||
1085 | /* here we have a segment miss */ | ||
1086 | do_ste_alloc: | ||
1087 | bl .ste_allocate /* try to insert stab entry */ | ||
1088 | cmpdi r3,0 | ||
1089 | bne- handle_page_fault | ||
1090 | b fast_exception_return | ||
1091 | |||
1092 | /* | ||
1093 | * r13 points to the PACA, r9 contains the saved CR, | ||
1094 | * r11 and r12 contain the saved SRR0 and SRR1. | ||
1095 | * r9 - r13 are saved in paca->exslb. | ||
1096 | * We assume we aren't going to take any exceptions during this procedure. | ||
1097 | * We assume (DAR >> 60) == 0xc. | ||
1098 | */ | ||
1099 | .align 7 | ||
1100 | _GLOBAL(do_stab_bolted) | ||
1101 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | ||
1102 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | ||
1103 | |||
1104 | /* Hash to the primary group */ | ||
1105 | ld r10,PACASTABVIRT(r13) | ||
1106 | mfspr r11,SPRN_DAR | ||
1107 | srdi r11,r11,28 | ||
1108 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | ||
1109 | |||
1110 | /* Calculate VSID */ | ||
1111 | /* This is a kernel address, so protovsid = ESID */ | ||
1112 | ASM_VSID_SCRAMBLE(r11, r9, 256M) | ||
1113 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | ||
1114 | |||
1115 | /* Search the primary group for a free entry */ | ||
1116 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | ||
1117 | andi. r11,r11,0x80 | ||
1118 | beq 2f | ||
1119 | addi r10,r10,16 | ||
1120 | andi. r11,r10,0x70 | ||
1121 | bne 1b | ||
1122 | |||
1123 | /* Stick for only searching the primary group for now. */ | ||
1124 | /* At least for now, we use a very simple random castout scheme */ | ||
1125 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | ||
1126 | mftb r11 | ||
1127 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | ||
1128 | ori r11,r11,0x10 | ||
1129 | |||
1130 | /* r10 currently points to an ste one past the group of interest */ | ||
1131 | /* make it point to the randomly selected entry */ | ||
1132 | subi r10,r10,128 | ||
1133 | or r10,r10,r11 /* r10 is the entry to invalidate */ | ||
1134 | |||
1135 | isync /* mark the entry invalid */ | ||
1136 | ld r11,0(r10) | ||
1137 | rldicl r11,r11,56,1 /* clear the valid bit */ | ||
1138 | rotldi r11,r11,8 | ||
1139 | std r11,0(r10) | ||
1140 | sync | ||
1141 | |||
1142 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | ||
1143 | slbie r11 | ||
1144 | |||
1145 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | ||
1146 | eieio | ||
1147 | |||
1148 | mfspr r11,SPRN_DAR /* Get the new esid */ | ||
1149 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ | ||
1150 | ori r11,r11,0x90 /* Turn on valid and kp */ | ||
1151 | std r11,0(r10) /* Put new entry back into the stab */ | ||
1152 | |||
1153 | sync | ||
1154 | |||
1155 | /* All done -- return from exception. */ | ||
1156 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | ||
1157 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | ||
1158 | |||
1159 | andi. r10,r12,MSR_RI | ||
1160 | beq- unrecov_slb | ||
1161 | |||
1162 | mtcrf 0x80,r9 /* restore CR */ | ||
1163 | |||
1164 | mfmsr r10 | ||
1165 | clrrdi r10,r10,2 | ||
1166 | mtmsrd r10,1 | ||
1167 | |||
1168 | mtspr SPRN_SRR0,r11 | ||
1169 | mtspr SPRN_SRR1,r12 | ||
1170 | ld r9,PACA_EXSLB+EX_R9(r13) | ||
1171 | ld r10,PACA_EXSLB+EX_R10(r13) | ||
1172 | ld r11,PACA_EXSLB+EX_R11(r13) | ||
1173 | ld r12,PACA_EXSLB+EX_R12(r13) | ||
1174 | ld r13,PACA_EXSLB+EX_R13(r13) | ||
1175 | rfid | ||
1176 | b . /* prevent speculative execution */ | ||
1177 | |||
1178 | /* | ||
1179 | * Space for CPU0's segment table. | ||
1180 | * | ||
1181 | * On iSeries, the hypervisor must fill in at least one entry before | ||
1182 | * we get control (with relocate on). The address is given to the hv | ||
1183 | * as a page number (see xLparMap below), so this must be at a | ||
1184 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
1185 | * PAGE_SHIFT). | ||
1186 | */ | ||
1187 | . = STAB0_OFFSET /* 0x6000 */ | ||
1188 | .globl initial_stab | ||
1189 | initial_stab: | ||
1190 | .space 4096 | ||
1191 | |||
1192 | #ifdef CONFIG_PPC_PSERIES | ||
1193 | /* | ||
1194 | * Data area reserved for FWNMI option. | ||
1195 | * This address (0x7000) is fixed by the RPA. | ||
1196 | */ | ||
1197 | .= 0x7000 | ||
1198 | .globl fwnmi_data_area | ||
1199 | fwnmi_data_area: | ||
1200 | #endif /* CONFIG_PPC_PSERIES */ | ||
1201 | |||
1202 | /* iSeries does not use the FWNMI stuff, so it is safe to put | ||
1203 | * this here, even if we later allow kernels that will boot on | ||
1204 | * both pSeries and iSeries */ | ||
1205 | #ifdef CONFIG_PPC_ISERIES | ||
1206 | . = LPARMAP_PHYS | ||
1207 | .globl xLparMap | ||
1208 | xLparMap: | ||
1209 | .quad HvEsidsToMap /* xNumberEsids */ | ||
1210 | .quad HvRangesToMap /* xNumberRanges */ | ||
1211 | .quad STAB0_PAGE /* xSegmentTableOffs */ | ||
1212 | .zero 40 /* xRsvd */ | ||
1213 | /* xEsids (HvEsidsToMap entries of 2 quads) */ | ||
1214 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ | ||
1215 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ | ||
1216 | .quad VMALLOC_START_ESID /* xKernelEsid */ | ||
1217 | .quad VMALLOC_START_VSID /* xKernelVsid */ | ||
1218 | /* xRanges (HvRangesToMap entries of 3 quads) */ | ||
1219 | .quad HvPagesToMap /* xPages */ | ||
1220 | .quad 0 /* xOffset */ | ||
1221 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ | ||
1222 | |||
1223 | #endif /* CONFIG_PPC_ISERIES */ | ||
1224 | |||
1225 | #ifdef CONFIG_PPC_PSERIES | ||
1226 | . = 0x8000 | ||
1227 | #endif /* CONFIG_PPC_PSERIES */ | ||
1228 | 167 | ||
1229 | /* | 168 | /* |
1230 | * On pSeries and most other platforms, secondary processors spin | 169 | * On pSeries and most other platforms, secondary processors spin |
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h index 95f39f1e68d4..5f9febc8d143 100644 --- a/arch/powerpc/kernel/head_booke.h +++ b/arch/powerpc/kernel/head_booke.h | |||
@@ -256,7 +256,7 @@ label: | |||
256 | * off DE in the DSRR1 value and clearing the debug status. \ | 256 | * off DE in the DSRR1 value and clearing the debug status. \ |
257 | */ \ | 257 | */ \ |
258 | mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \ | 258 | mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \ |
259 | andis. r10,r10,DBSR_IC@h; \ | 259 | andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \ |
260 | beq+ 2f; \ | 260 | beq+ 2f; \ |
261 | \ | 261 | \ |
262 | lis r10,KERNELBASE@h; /* check if exception in vectors */ \ | 262 | lis r10,KERNELBASE@h; /* check if exception in vectors */ \ |
@@ -271,7 +271,7 @@ label: | |||
271 | \ | 271 | \ |
272 | /* here it looks like we got an inappropriate debug exception. */ \ | 272 | /* here it looks like we got an inappropriate debug exception. */ \ |
273 | 1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CDRR1 value */ \ | 273 | 1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CDRR1 value */ \ |
274 | lis r10,DBSR_IC@h; /* clear the IC event */ \ | 274 | lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \ |
275 | mtspr SPRN_DBSR,r10; \ | 275 | mtspr SPRN_DBSR,r10; \ |
276 | /* restore state and get out */ \ | 276 | /* restore state and get out */ \ |
277 | lwz r10,_CCR(r11); \ | 277 | lwz r10,_CCR(r11); \ |
@@ -309,7 +309,7 @@ label: | |||
309 | * off DE in the CSRR1 value and clearing the debug status. \ | 309 | * off DE in the CSRR1 value and clearing the debug status. \ |
310 | */ \ | 310 | */ \ |
311 | mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \ | 311 | mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \ |
312 | andis. r10,r10,DBSR_IC@h; \ | 312 | andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \ |
313 | beq+ 2f; \ | 313 | beq+ 2f; \ |
314 | \ | 314 | \ |
315 | lis r10,KERNELBASE@h; /* check if exception in vectors */ \ | 315 | lis r10,KERNELBASE@h; /* check if exception in vectors */ \ |
@@ -317,14 +317,14 @@ label: | |||
317 | cmplw r12,r10; \ | 317 | cmplw r12,r10; \ |
318 | blt+ 2f; /* addr below exception vectors */ \ | 318 | blt+ 2f; /* addr below exception vectors */ \ |
319 | \ | 319 | \ |
320 | lis r10,DebugCrit@h; \ | 320 | lis r10,DebugCrit@h; \ |
321 | ori r10,r10,DebugCrit@l; \ | 321 | ori r10,r10,DebugCrit@l; \ |
322 | cmplw r12,r10; \ | 322 | cmplw r12,r10; \ |
323 | bgt+ 2f; /* addr above exception vectors */ \ | 323 | bgt+ 2f; /* addr above exception vectors */ \ |
324 | \ | 324 | \ |
325 | /* here it looks like we got an inappropriate debug exception. */ \ | 325 | /* here it looks like we got an inappropriate debug exception. */ \ |
326 | 1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CSRR1 value */ \ | 326 | 1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CSRR1 value */ \ |
327 | lis r10,DBSR_IC@h; /* clear the IC event */ \ | 327 | lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \ |
328 | mtspr SPRN_DBSR,r10; \ | 328 | mtspr SPRN_DBSR,r10; \ |
329 | /* restore state and get out */ \ | 329 | /* restore state and get out */ \ |
330 | lwz r10,_CCR(r11); \ | 330 | lwz r10,_CCR(r11); \ |
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c index 688b329800bd..ffc4253fef55 100644 --- a/arch/powerpc/kernel/init_task.c +++ b/arch/powerpc/kernel/init_task.c | |||
@@ -9,10 +9,6 @@ | |||
9 | 9 | ||
10 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); | 10 | static struct signal_struct init_signals = INIT_SIGNALS(init_signals); |
11 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); | 11 | static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); |
12 | struct mm_struct init_mm = INIT_MM(init_mm); | ||
13 | |||
14 | EXPORT_SYMBOL(init_mm); | ||
15 | |||
16 | /* | 12 | /* |
17 | * Initial thread structure. | 13 | * Initial thread structure. |
18 | * | 14 | * |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 844d3f882a15..f7f376ea7b17 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -118,6 +118,7 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
118 | if (!en) | 118 | if (!en) |
119 | return; | 119 | return; |
120 | 120 | ||
121 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
121 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | 122 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { |
122 | /* | 123 | /* |
123 | * Do we need to disable preemption here? Not really: in the | 124 | * Do we need to disable preemption here? Not really: in the |
@@ -135,6 +136,7 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
135 | if (local_paca->lppaca_ptr->int_dword.any_int) | 136 | if (local_paca->lppaca_ptr->int_dword.any_int) |
136 | iseries_handle_interrupts(); | 137 | iseries_handle_interrupts(); |
137 | } | 138 | } |
139 | #endif /* CONFIG_PPC_STD_MMU_64 */ | ||
138 | 140 | ||
139 | if (test_perf_counter_pending()) { | 141 | if (test_perf_counter_pending()) { |
140 | clear_perf_counter_pending(); | 142 | clear_perf_counter_pending(); |
@@ -254,77 +256,84 @@ void fixup_irqs(cpumask_t map) | |||
254 | } | 256 | } |
255 | #endif | 257 | #endif |
256 | 258 | ||
257 | void do_IRQ(struct pt_regs *regs) | ||
258 | { | ||
259 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
260 | unsigned int irq; | ||
261 | #ifdef CONFIG_IRQSTACKS | 259 | #ifdef CONFIG_IRQSTACKS |
260 | static inline void handle_one_irq(unsigned int irq) | ||
261 | { | ||
262 | struct thread_info *curtp, *irqtp; | 262 | struct thread_info *curtp, *irqtp; |
263 | #endif | 263 | unsigned long saved_sp_limit; |
264 | struct irq_desc *desc; | ||
264 | 265 | ||
265 | irq_enter(); | 266 | /* Switch to the irq stack to handle this */ |
267 | curtp = current_thread_info(); | ||
268 | irqtp = hardirq_ctx[smp_processor_id()]; | ||
269 | |||
270 | if (curtp == irqtp) { | ||
271 | /* We're already on the irq stack, just handle it */ | ||
272 | generic_handle_irq(irq); | ||
273 | return; | ||
274 | } | ||
275 | |||
276 | desc = irq_desc + irq; | ||
277 | saved_sp_limit = current->thread.ksp_limit; | ||
278 | |||
279 | irqtp->task = curtp->task; | ||
280 | irqtp->flags = 0; | ||
281 | |||
282 | /* Copy the softirq bits in preempt_count so that the | ||
283 | * softirq checks work in the hardirq context. */ | ||
284 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | | ||
285 | (curtp->preempt_count & SOFTIRQ_MASK); | ||
286 | |||
287 | current->thread.ksp_limit = (unsigned long)irqtp + | ||
288 | _ALIGN_UP(sizeof(struct thread_info), 16); | ||
289 | |||
290 | call_handle_irq(irq, desc, irqtp, desc->handle_irq); | ||
291 | current->thread.ksp_limit = saved_sp_limit; | ||
292 | irqtp->task = NULL; | ||
293 | |||
294 | /* Set any flag that may have been set on the | ||
295 | * alternate stack | ||
296 | */ | ||
297 | if (irqtp->flags) | ||
298 | set_bits(irqtp->flags, &curtp->flags); | ||
299 | } | ||
300 | #else | ||
301 | static inline void handle_one_irq(unsigned int irq) | ||
302 | { | ||
303 | generic_handle_irq(irq); | ||
304 | } | ||
305 | #endif | ||
266 | 306 | ||
307 | static inline void check_stack_overflow(void) | ||
308 | { | ||
267 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 309 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
268 | /* Debugging check for stack overflow: is there less than 2KB free? */ | 310 | long sp; |
269 | { | ||
270 | long sp; | ||
271 | 311 | ||
272 | sp = __get_SP() & (THREAD_SIZE-1); | 312 | sp = __get_SP() & (THREAD_SIZE-1); |
273 | 313 | ||
274 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | 314 | /* check for stack overflow: is there less than 2KB free? */ |
275 | printk("do_IRQ: stack overflow: %ld\n", | 315 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { |
276 | sp - sizeof(struct thread_info)); | 316 | printk("do_IRQ: stack overflow: %ld\n", |
277 | dump_stack(); | 317 | sp - sizeof(struct thread_info)); |
278 | } | 318 | dump_stack(); |
279 | } | 319 | } |
280 | #endif | 320 | #endif |
321 | } | ||
281 | 322 | ||
282 | /* | 323 | void do_IRQ(struct pt_regs *regs) |
283 | * Every platform is required to implement ppc_md.get_irq. | 324 | { |
284 | * This function will either return an irq number or NO_IRQ to | 325 | struct pt_regs *old_regs = set_irq_regs(regs); |
285 | * indicate there are no more pending. | 326 | unsigned int irq; |
286 | * The value NO_IRQ_IGNORE is for buggy hardware and means that this | ||
287 | * IRQ has already been handled. -- Tom | ||
288 | */ | ||
289 | irq = ppc_md.get_irq(); | ||
290 | 327 | ||
291 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { | 328 | irq_enter(); |
292 | #ifdef CONFIG_IRQSTACKS | ||
293 | /* Switch to the irq stack to handle this */ | ||
294 | curtp = current_thread_info(); | ||
295 | irqtp = hardirq_ctx[smp_processor_id()]; | ||
296 | if (curtp != irqtp) { | ||
297 | struct irq_desc *desc = irq_desc + irq; | ||
298 | void *handler = desc->handle_irq; | ||
299 | unsigned long saved_sp_limit = current->thread.ksp_limit; | ||
300 | if (handler == NULL) | ||
301 | handler = &__do_IRQ; | ||
302 | irqtp->task = curtp->task; | ||
303 | irqtp->flags = 0; | ||
304 | |||
305 | /* Copy the softirq bits in preempt_count so that the | ||
306 | * softirq checks work in the hardirq context. | ||
307 | */ | ||
308 | irqtp->preempt_count = | ||
309 | (irqtp->preempt_count & ~SOFTIRQ_MASK) | | ||
310 | (curtp->preempt_count & SOFTIRQ_MASK); | ||
311 | 329 | ||
312 | current->thread.ksp_limit = (unsigned long)irqtp + | 330 | check_stack_overflow(); |
313 | _ALIGN_UP(sizeof(struct thread_info), 16); | ||
314 | call_handle_irq(irq, desc, irqtp, handler); | ||
315 | current->thread.ksp_limit = saved_sp_limit; | ||
316 | irqtp->task = NULL; | ||
317 | 331 | ||
332 | irq = ppc_md.get_irq(); | ||
318 | 333 | ||
319 | /* Set any flag that may have been set on the | 334 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) |
320 | * alternate stack | 335 | handle_one_irq(irq); |
321 | */ | 336 | else if (irq != NO_IRQ_IGNORE) |
322 | if (irqtp->flags) | ||
323 | set_bits(irqtp->flags, &curtp->flags); | ||
324 | } else | ||
325 | #endif | ||
326 | generic_handle_irq(irq); | ||
327 | } else if (irq != NO_IRQ_IGNORE) | ||
328 | /* That's not SMP safe ... but who cares ? */ | 337 | /* That's not SMP safe ... but who cares ? */ |
329 | ppc_spurious_interrupts++; | 338 | ppc_spurious_interrupts++; |
330 | 339 | ||
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 78b3f7840ade..2419cc706ff1 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -169,6 +169,9 @@ struct hvcall_ppp_data { | |||
169 | u8 unallocated_weight; | 169 | u8 unallocated_weight; |
170 | u16 active_procs_in_pool; | 170 | u16 active_procs_in_pool; |
171 | u16 active_system_procs; | 171 | u16 active_system_procs; |
172 | u16 phys_platform_procs; | ||
173 | u32 max_proc_cap_avail; | ||
174 | u32 entitled_proc_cap_avail; | ||
172 | }; | 175 | }; |
173 | 176 | ||
174 | /* | 177 | /* |
@@ -190,13 +193,18 @@ struct hvcall_ppp_data { | |||
190 | * XX - Unallocated Variable Processor Capacity Weight. | 193 | * XX - Unallocated Variable Processor Capacity Weight. |
191 | * XXXX - Active processors in Physical Processor Pool. | 194 | * XXXX - Active processors in Physical Processor Pool. |
192 | * XXXX - Processors active on platform. | 195 | * XXXX - Processors active on platform. |
196 | * R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1 | ||
197 | * XXXX - Physical platform procs allocated to virtualization. | ||
198 | * XXXXXX - Max procs capacity % available to the partitions pool. | ||
199 | * XXXXXX - Entitled procs capacity % available to the | ||
200 | * partitions pool. | ||
193 | */ | 201 | */ |
194 | static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data) | 202 | static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data) |
195 | { | 203 | { |
196 | unsigned long rc; | 204 | unsigned long rc; |
197 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | 205 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; |
198 | 206 | ||
199 | rc = plpar_hcall(H_GET_PPP, retbuf); | 207 | rc = plpar_hcall9(H_GET_PPP, retbuf); |
200 | 208 | ||
201 | ppp_data->entitlement = retbuf[0]; | 209 | ppp_data->entitlement = retbuf[0]; |
202 | ppp_data->unallocated_entitlement = retbuf[1]; | 210 | ppp_data->unallocated_entitlement = retbuf[1]; |
@@ -210,6 +218,10 @@ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data) | |||
210 | ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff; | 218 | ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff; |
211 | ppp_data->active_system_procs = retbuf[3] & 0xffff; | 219 | ppp_data->active_system_procs = retbuf[3] & 0xffff; |
212 | 220 | ||
221 | ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8; | ||
222 | ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff; | ||
223 | ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff; | ||
224 | |||
213 | return rc; | 225 | return rc; |
214 | } | 226 | } |
215 | 227 | ||
@@ -234,6 +246,8 @@ static unsigned h_pic(unsigned long *pool_idle_time, | |||
234 | static void parse_ppp_data(struct seq_file *m) | 246 | static void parse_ppp_data(struct seq_file *m) |
235 | { | 247 | { |
236 | struct hvcall_ppp_data ppp_data; | 248 | struct hvcall_ppp_data ppp_data; |
249 | struct device_node *root; | ||
250 | const int *perf_level; | ||
237 | int rc; | 251 | int rc; |
238 | 252 | ||
239 | rc = h_get_ppp(&ppp_data); | 253 | rc = h_get_ppp(&ppp_data); |
@@ -267,6 +281,28 @@ static void parse_ppp_data(struct seq_file *m) | |||
267 | seq_printf(m, "capped=%d\n", ppp_data.capped); | 281 | seq_printf(m, "capped=%d\n", ppp_data.capped); |
268 | seq_printf(m, "unallocated_capacity=%lld\n", | 282 | seq_printf(m, "unallocated_capacity=%lld\n", |
269 | ppp_data.unallocated_entitlement); | 283 | ppp_data.unallocated_entitlement); |
284 | |||
285 | /* The last bits of information returned from h_get_ppp are only | ||
286 | * valid if the ibm,partition-performance-parameters-level | ||
287 | * property is >= 1. | ||
288 | */ | ||
289 | root = of_find_node_by_path("/"); | ||
290 | if (root) { | ||
291 | perf_level = of_get_property(root, | ||
292 | "ibm,partition-performance-parameters-level", | ||
293 | NULL); | ||
294 | if (perf_level && (*perf_level >= 1)) { | ||
295 | seq_printf(m, | ||
296 | "physical_procs_allocated_to_virtualization=%d\n", | ||
297 | ppp_data.phys_platform_procs); | ||
298 | seq_printf(m, "max_proc_capacity_available=%d\n", | ||
299 | ppp_data.max_proc_cap_avail); | ||
300 | seq_printf(m, "entitled_proc_capacity_available=%d\n", | ||
301 | ppp_data.entitled_proc_cap_avail); | ||
302 | } | ||
303 | |||
304 | of_node_put(root); | ||
305 | } | ||
270 | } | 306 | } |
271 | 307 | ||
272 | /** | 308 | /** |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index b9530b2395a2..a5cf9c1356a6 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -457,98 +457,6 @@ _GLOBAL(disable_kernel_fp) | |||
457 | isync | 457 | isync |
458 | blr | 458 | blr |
459 | 459 | ||
460 | #ifdef CONFIG_ALTIVEC | ||
461 | |||
462 | #if 0 /* this has no callers for now */ | ||
463 | /* | ||
464 | * disable_kernel_altivec() | ||
465 | * Disable the VMX. | ||
466 | */ | ||
467 | _GLOBAL(disable_kernel_altivec) | ||
468 | mfmsr r3 | ||
469 | rldicl r0,r3,(63-MSR_VEC_LG),1 | ||
470 | rldicl r3,r0,(MSR_VEC_LG+1),0 | ||
471 | mtmsrd r3 /* disable use of VMX now */ | ||
472 | isync | ||
473 | blr | ||
474 | #endif /* 0 */ | ||
475 | |||
476 | /* | ||
477 | * giveup_altivec(tsk) | ||
478 | * Disable VMX for the task given as the argument, | ||
479 | * and save the vector registers in its thread_struct. | ||
480 | * Enables the VMX for use in the kernel on return. | ||
481 | */ | ||
482 | _GLOBAL(giveup_altivec) | ||
483 | mfmsr r5 | ||
484 | oris r5,r5,MSR_VEC@h | ||
485 | mtmsrd r5 /* enable use of VMX now */ | ||
486 | isync | ||
487 | cmpdi 0,r3,0 | ||
488 | beqlr- /* if no previous owner, done */ | ||
489 | addi r3,r3,THREAD /* want THREAD of task */ | ||
490 | ld r5,PT_REGS(r3) | ||
491 | cmpdi 0,r5,0 | ||
492 | SAVE_32VRS(0,r4,r3) | ||
493 | mfvscr vr0 | ||
494 | li r4,THREAD_VSCR | ||
495 | stvx vr0,r4,r3 | ||
496 | beq 1f | ||
497 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
498 | #ifdef CONFIG_VSX | ||
499 | BEGIN_FTR_SECTION | ||
500 | lis r3,(MSR_VEC|MSR_VSX)@h | ||
501 | FTR_SECTION_ELSE | ||
502 | lis r3,MSR_VEC@h | ||
503 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | ||
504 | #else | ||
505 | lis r3,MSR_VEC@h | ||
506 | #endif | ||
507 | andc r4,r4,r3 /* disable FP for previous task */ | ||
508 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
509 | 1: | ||
510 | #ifndef CONFIG_SMP | ||
511 | li r5,0 | ||
512 | ld r4,last_task_used_altivec@got(r2) | ||
513 | std r5,0(r4) | ||
514 | #endif /* CONFIG_SMP */ | ||
515 | blr | ||
516 | |||
517 | #endif /* CONFIG_ALTIVEC */ | ||
518 | |||
519 | #ifdef CONFIG_VSX | ||
520 | /* | ||
521 | * __giveup_vsx(tsk) | ||
522 | * Disable VSX for the task given as the argument. | ||
523 | * Does NOT save vsx registers. | ||
524 | * Enables the VSX for use in the kernel on return. | ||
525 | */ | ||
526 | _GLOBAL(__giveup_vsx) | ||
527 | mfmsr r5 | ||
528 | oris r5,r5,MSR_VSX@h | ||
529 | mtmsrd r5 /* enable use of VSX now */ | ||
530 | isync | ||
531 | |||
532 | cmpdi 0,r3,0 | ||
533 | beqlr- /* if no previous owner, done */ | ||
534 | addi r3,r3,THREAD /* want THREAD of task */ | ||
535 | ld r5,PT_REGS(r3) | ||
536 | cmpdi 0,r5,0 | ||
537 | beq 1f | ||
538 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
539 | lis r3,MSR_VSX@h | ||
540 | andc r4,r4,r3 /* disable VSX for previous task */ | ||
541 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
542 | 1: | ||
543 | #ifndef CONFIG_SMP | ||
544 | li r5,0 | ||
545 | ld r4,last_task_used_vsx@got(r2) | ||
546 | std r5,0(r4) | ||
547 | #endif /* CONFIG_SMP */ | ||
548 | blr | ||
549 | |||
550 | #endif /* CONFIG_VSX */ | ||
551 | |||
552 | /* kexec_wait(phys_cpu) | 460 | /* kexec_wait(phys_cpu) |
553 | * | 461 | * |
554 | * wait for the flag to change, indicating this kernel is going away but | 462 | * wait for the flag to change, indicating this kernel is going away but |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index c744b327bcab..e9962c7f8a09 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -18,6 +18,8 @@ | |||
18 | * field correctly */ | 18 | * field correctly */ |
19 | extern unsigned long __toc_start; | 19 | extern unsigned long __toc_start; |
20 | 20 | ||
21 | #ifdef CONFIG_PPC_BOOK3S | ||
22 | |||
21 | /* | 23 | /* |
22 | * The structure which the hypervisor knows about - this structure | 24 | * The structure which the hypervisor knows about - this structure |
23 | * should not cross a page boundary. The vpa_init/register_vpa call | 25 | * should not cross a page boundary. The vpa_init/register_vpa call |
@@ -41,6 +43,10 @@ struct lppaca lppaca[] = { | |||
41 | }, | 43 | }, |
42 | }; | 44 | }; |
43 | 45 | ||
46 | #endif /* CONFIG_PPC_BOOK3S */ | ||
47 | |||
48 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
49 | |||
44 | /* | 50 | /* |
45 | * 3 persistent SLBs are registered here. The buffer will be zero | 51 | * 3 persistent SLBs are registered here. The buffer will be zero |
46 | * initially, hence will all be invaild until we actually write them. | 52 | * initially, hence will all be invaild until we actually write them. |
@@ -52,6 +58,8 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = { | |||
52 | }, | 58 | }, |
53 | }; | 59 | }; |
54 | 60 | ||
61 | #endif /* CONFIG_PPC_STD_MMU_64 */ | ||
62 | |||
55 | /* The Paca is an array with one entry per processor. Each contains an | 63 | /* The Paca is an array with one entry per processor. Each contains an |
56 | * lppaca, which contains the information shared between the | 64 | * lppaca, which contains the information shared between the |
57 | * hypervisor and Linux. | 65 | * hypervisor and Linux. |
@@ -77,15 +85,19 @@ void __init initialise_pacas(void) | |||
77 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 85 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
78 | struct paca_struct *new_paca = &paca[cpu]; | 86 | struct paca_struct *new_paca = &paca[cpu]; |
79 | 87 | ||
88 | #ifdef CONFIG_PPC_BOOK3S | ||
80 | new_paca->lppaca_ptr = &lppaca[cpu]; | 89 | new_paca->lppaca_ptr = &lppaca[cpu]; |
90 | #endif | ||
81 | new_paca->lock_token = 0x8000; | 91 | new_paca->lock_token = 0x8000; |
82 | new_paca->paca_index = cpu; | 92 | new_paca->paca_index = cpu; |
83 | new_paca->kernel_toc = kernel_toc; | 93 | new_paca->kernel_toc = kernel_toc; |
84 | new_paca->kernelbase = (unsigned long) _stext; | 94 | new_paca->kernelbase = (unsigned long) _stext; |
85 | new_paca->kernel_msr = MSR_KERNEL; | 95 | new_paca->kernel_msr = MSR_KERNEL; |
86 | new_paca->hw_cpu_id = 0xffff; | 96 | new_paca->hw_cpu_id = 0xffff; |
87 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; | ||
88 | new_paca->__current = &init_task; | 97 | new_paca->__current = &init_task; |
98 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
99 | new_paca->slb_shadow_ptr = &slb_shadow[cpu]; | ||
100 | #endif /* CONFIG_PPC_STD_MMU_64 */ | ||
89 | 101 | ||
90 | } | 102 | } |
91 | } | 103 | } |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 4fee63cb53ff..5a56e97c5ac0 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -1505,7 +1505,7 @@ void __init pcibios_resource_survey(void) | |||
1505 | * rest of the code later, for now, keep it as-is as our main | 1505 | * rest of the code later, for now, keep it as-is as our main |
1506 | * resource allocation function doesn't deal with sub-trees yet. | 1506 | * resource allocation function doesn't deal with sub-trees yet. |
1507 | */ | 1507 | */ |
1508 | void __devinit pcibios_claim_one_bus(struct pci_bus *bus) | 1508 | void pcibios_claim_one_bus(struct pci_bus *bus) |
1509 | { | 1509 | { |
1510 | struct pci_dev *dev; | 1510 | struct pci_dev *dev; |
1511 | struct pci_bus *child_bus; | 1511 | struct pci_bus *child_bus; |
@@ -1533,7 +1533,6 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus) | |||
1533 | list_for_each_entry(child_bus, &bus->children, node) | 1533 | list_for_each_entry(child_bus, &bus->children, node) |
1534 | pcibios_claim_one_bus(child_bus); | 1534 | pcibios_claim_one_bus(child_bus); |
1535 | } | 1535 | } |
1536 | EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); | ||
1537 | 1536 | ||
1538 | 1537 | ||
1539 | /* pcibios_finish_adding_to_bus | 1538 | /* pcibios_finish_adding_to_bus |
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index d473634e39e3..3ae1c666ff92 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c | |||
@@ -33,7 +33,6 @@ int pcibios_assign_bus_offset = 1; | |||
33 | 33 | ||
34 | void pcibios_make_OF_bus_map(void); | 34 | void pcibios_make_OF_bus_map(void); |
35 | 35 | ||
36 | static void fixup_broken_pcnet32(struct pci_dev* dev); | ||
37 | static void fixup_cpc710_pci64(struct pci_dev* dev); | 36 | static void fixup_cpc710_pci64(struct pci_dev* dev); |
38 | #ifdef CONFIG_PPC_OF | 37 | #ifdef CONFIG_PPC_OF |
39 | static u8* pci_to_OF_bus_map; | 38 | static u8* pci_to_OF_bus_map; |
@@ -72,16 +71,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_res | |||
72 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); | 71 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl); |
73 | 72 | ||
74 | static void | 73 | static void |
75 | fixup_broken_pcnet32(struct pci_dev* dev) | ||
76 | { | ||
77 | if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { | ||
78 | dev->vendor = PCI_VENDOR_ID_AMD; | ||
79 | pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); | ||
80 | } | ||
81 | } | ||
82 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); | ||
83 | |||
84 | static void | ||
85 | fixup_cpc710_pci64(struct pci_dev* dev) | 74 | fixup_cpc710_pci64(struct pci_dev* dev) |
86 | { | 75 | { |
87 | /* Hide the PCI64 BARs from the kernel as their content doesn't | 76 | /* Hide the PCI64 BARs from the kernel as their content doesn't |
@@ -447,14 +436,6 @@ static int __init pcibios_init(void) | |||
447 | 436 | ||
448 | subsys_initcall(pcibios_init); | 437 | subsys_initcall(pcibios_init); |
449 | 438 | ||
450 | /* the next one is stolen from the alpha port... */ | ||
451 | void __init | ||
452 | pcibios_update_irq(struct pci_dev *dev, int irq) | ||
453 | { | ||
454 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | ||
455 | /* XXX FIXME - update OF device tree node interrupt property */ | ||
456 | } | ||
457 | |||
458 | static struct pci_controller* | 439 | static struct pci_controller* |
459 | pci_bus_to_hose(int bus) | 440 | pci_bus_to_hose(int bus) |
460 | { | 441 | { |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 96edb6f8babb..9e8902fa14c7 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -43,16 +43,6 @@ unsigned long pci_probe_only = 1; | |||
43 | unsigned long pci_io_base = ISA_IO_BASE; | 43 | unsigned long pci_io_base = ISA_IO_BASE; |
44 | EXPORT_SYMBOL(pci_io_base); | 44 | EXPORT_SYMBOL(pci_io_base); |
45 | 45 | ||
46 | static void fixup_broken_pcnet32(struct pci_dev* dev) | ||
47 | { | ||
48 | if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { | ||
49 | dev->vendor = PCI_VENDOR_ID_AMD; | ||
50 | pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); | ||
51 | } | ||
52 | } | ||
53 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); | ||
54 | |||
55 | |||
56 | static u32 get_int_prop(struct device_node *np, const char *name, u32 def) | 46 | static u32 get_int_prop(struct device_node *np, const char *name, u32 def) |
57 | { | 47 | { |
58 | const u32 *prop; | 48 | const u32 *prop; |
@@ -430,6 +420,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus) | |||
430 | * so flushing the hash table is the only sane way to make sure | 420 | * so flushing the hash table is the only sane way to make sure |
431 | * that no hash entries are covering that removed bridge area | 421 | * that no hash entries are covering that removed bridge area |
432 | * while still allowing other busses overlapping those pages | 422 | * while still allowing other busses overlapping those pages |
423 | * | ||
424 | * Note: If we ever support P2P hotplug on Book3E, we'll have | ||
425 | * to do an appropriate TLB flush here too | ||
433 | */ | 426 | */ |
434 | if (bus->self) { | 427 | if (bus->self) { |
435 | struct resource *res = bus->resource[0]; | 428 | struct resource *res = bus->resource[0]; |
@@ -437,8 +430,10 @@ int pcibios_unmap_io_space(struct pci_bus *bus) | |||
437 | pr_debug("IO unmapping for PCI-PCI bridge %s\n", | 430 | pr_debug("IO unmapping for PCI-PCI bridge %s\n", |
438 | pci_name(bus->self)); | 431 | pci_name(bus->self)); |
439 | 432 | ||
433 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
440 | __flush_hash_table_range(&init_mm, res->start + _IO_BASE, | 434 | __flush_hash_table_range(&init_mm, res->start + _IO_BASE, |
441 | res->end + _IO_BASE + 1); | 435 | res->end + _IO_BASE + 1); |
436 | #endif | ||
442 | return 0; | 437 | return 0; |
443 | } | 438 | } |
444 | 439 | ||
@@ -511,7 +506,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) | |||
511 | pr_debug("IO mapping for PHB %s\n", hose->dn->full_name); | 506 | pr_debug("IO mapping for PHB %s\n", hose->dn->full_name); |
512 | pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n", | 507 | pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n", |
513 | hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); | 508 | hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); |
514 | pr_debug(" size=0x%016lx (alloc=0x%016lx)\n", | 509 | pr_debug(" size=0x%016llx (alloc=0x%016lx)\n", |
515 | hose->pci_io_size, size_page); | 510 | hose->pci_io_size, size_page); |
516 | 511 | ||
517 | /* Establish the mapping */ | 512 | /* Establish the mapping */ |
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index 1c67de52e3ce..d5e36e5dc7c2 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <asm/io.h> | 27 | #include <asm/io.h> |
28 | #include <asm/prom.h> | 28 | #include <asm/prom.h> |
29 | #include <asm/pci-bridge.h> | 29 | #include <asm/pci-bridge.h> |
30 | #include <asm/pSeries_reconfig.h> | ||
31 | #include <asm/ppc-pci.h> | 30 | #include <asm/ppc-pci.h> |
32 | #include <asm/firmware.h> | 31 | #include <asm/firmware.h> |
33 | 32 | ||
@@ -35,7 +34,7 @@ | |||
35 | * Traverse_func that inits the PCI fields of the device node. | 34 | * Traverse_func that inits the PCI fields of the device node. |
36 | * NOTE: this *must* be done before read/write config to the device. | 35 | * NOTE: this *must* be done before read/write config to the device. |
37 | */ | 36 | */ |
38 | static void * __devinit update_dn_pci_info(struct device_node *dn, void *data) | 37 | void * __devinit update_dn_pci_info(struct device_node *dn, void *data) |
39 | { | 38 | { |
40 | struct pci_controller *phb = data; | 39 | struct pci_controller *phb = data; |
41 | const int *type = | 40 | const int *type = |
@@ -184,29 +183,6 @@ struct device_node *fetch_dev_dn(struct pci_dev *dev) | |||
184 | } | 183 | } |
185 | EXPORT_SYMBOL(fetch_dev_dn); | 184 | EXPORT_SYMBOL(fetch_dev_dn); |
186 | 185 | ||
187 | static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) | ||
188 | { | ||
189 | struct device_node *np = node; | ||
190 | struct pci_dn *pci = NULL; | ||
191 | int err = NOTIFY_OK; | ||
192 | |||
193 | switch (action) { | ||
194 | case PSERIES_RECONFIG_ADD: | ||
195 | pci = np->parent->data; | ||
196 | if (pci) | ||
197 | update_dn_pci_info(np, pci->phb); | ||
198 | break; | ||
199 | default: | ||
200 | err = NOTIFY_DONE; | ||
201 | break; | ||
202 | } | ||
203 | return err; | ||
204 | } | ||
205 | |||
206 | static struct notifier_block pci_dn_reconfig_nb = { | ||
207 | .notifier_call = pci_dn_reconfig_notifier, | ||
208 | }; | ||
209 | |||
210 | /** | 186 | /** |
211 | * pci_devs_phb_init - Initialize phbs and pci devs under them. | 187 | * pci_devs_phb_init - Initialize phbs and pci devs under them. |
212 | * | 188 | * |
@@ -223,6 +199,4 @@ void __init pci_devs_phb_init(void) | |||
223 | /* This must be done first so the device nodes have valid pci info! */ | 199 | /* This must be done first so the device nodes have valid pci info! */ |
224 | list_for_each_entry_safe(phb, tmp, &hose_list, list_node) | 200 | list_for_each_entry_safe(phb, tmp, &hose_list, list_node) |
225 | pci_devs_phb_init_dynamic(phb); | 201 | pci_devs_phb_init_dynamic(phb); |
226 | |||
227 | pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); | ||
228 | } | 202 | } |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7b44a33f03c2..3e7135bbe40f 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -650,7 +650,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
650 | p->thread.ksp_limit = (unsigned long)task_stack_page(p) + | 650 | p->thread.ksp_limit = (unsigned long)task_stack_page(p) + |
651 | _ALIGN_UP(sizeof(struct thread_info), 16); | 651 | _ALIGN_UP(sizeof(struct thread_info), 16); |
652 | 652 | ||
653 | #ifdef CONFIG_PPC64 | 653 | #ifdef CONFIG_PPC_STD_MMU_64 |
654 | if (cpu_has_feature(CPU_FTR_SLB)) { | 654 | if (cpu_has_feature(CPU_FTR_SLB)) { |
655 | unsigned long sp_vsid; | 655 | unsigned long sp_vsid; |
656 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; | 656 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index ce01ff2474da..d4405b95bfaa 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -585,7 +585,7 @@ static void __init check_cpu_pa_features(unsigned long node) | |||
585 | ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); | 585 | ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); |
586 | } | 586 | } |
587 | 587 | ||
588 | #ifdef CONFIG_PPC64 | 588 | #ifdef CONFIG_PPC_STD_MMU_64 |
589 | static void __init check_cpu_slb_size(unsigned long node) | 589 | static void __init check_cpu_slb_size(unsigned long node) |
590 | { | 590 | { |
591 | u32 *slb_size_ptr; | 591 | u32 *slb_size_ptr; |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 2f0e64b53642..ef6f64950e9b 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -44,10 +44,7 @@ | |||
44 | #include <asm/sections.h> | 44 | #include <asm/sections.h> |
45 | #include <asm/machdep.h> | 45 | #include <asm/machdep.h> |
46 | 46 | ||
47 | #ifdef CONFIG_LOGO_LINUX_CLUT224 | ||
48 | #include <linux/linux_logo.h> | 47 | #include <linux/linux_logo.h> |
49 | extern const struct linux_logo logo_linux_clut224; | ||
50 | #endif | ||
51 | 48 | ||
52 | /* | 49 | /* |
53 | * Properties whose value is longer than this get excluded from our | 50 | * Properties whose value is longer than this get excluded from our |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 3635be61f899..9fa2c7dcd05a 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -704,15 +704,34 @@ void user_enable_single_step(struct task_struct *task) | |||
704 | 704 | ||
705 | if (regs != NULL) { | 705 | if (regs != NULL) { |
706 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 706 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) |
707 | task->thread.dbcr0 &= ~DBCR0_BT; | ||
707 | task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; | 708 | task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; |
708 | regs->msr |= MSR_DE; | 709 | regs->msr |= MSR_DE; |
709 | #else | 710 | #else |
711 | regs->msr &= ~MSR_BE; | ||
710 | regs->msr |= MSR_SE; | 712 | regs->msr |= MSR_SE; |
711 | #endif | 713 | #endif |
712 | } | 714 | } |
713 | set_tsk_thread_flag(task, TIF_SINGLESTEP); | 715 | set_tsk_thread_flag(task, TIF_SINGLESTEP); |
714 | } | 716 | } |
715 | 717 | ||
718 | void user_enable_block_step(struct task_struct *task) | ||
719 | { | ||
720 | struct pt_regs *regs = task->thread.regs; | ||
721 | |||
722 | if (regs != NULL) { | ||
723 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | ||
724 | task->thread.dbcr0 &= ~DBCR0_IC; | ||
725 | task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT; | ||
726 | regs->msr |= MSR_DE; | ||
727 | #else | ||
728 | regs->msr &= ~MSR_SE; | ||
729 | regs->msr |= MSR_BE; | ||
730 | #endif | ||
731 | } | ||
732 | set_tsk_thread_flag(task, TIF_SINGLESTEP); | ||
733 | } | ||
734 | |||
716 | void user_disable_single_step(struct task_struct *task) | 735 | void user_disable_single_step(struct task_struct *task) |
717 | { | 736 | { |
718 | struct pt_regs *regs = task->thread.regs; | 737 | struct pt_regs *regs = task->thread.regs; |
@@ -726,10 +745,10 @@ void user_disable_single_step(struct task_struct *task) | |||
726 | 745 | ||
727 | if (regs != NULL) { | 746 | if (regs != NULL) { |
728 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) | 747 | #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) |
729 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_IDM); | 748 | task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM); |
730 | regs->msr &= ~MSR_DE; | 749 | regs->msr &= ~MSR_DE; |
731 | #else | 750 | #else |
732 | regs->msr &= ~MSR_SE; | 751 | regs->msr &= ~(MSR_SE | MSR_BE); |
733 | #endif | 752 | #endif |
734 | } | 753 | } |
735 | clear_tsk_thread_flag(task, TIF_SINGLESTEP); | 754 | clear_tsk_thread_flag(task, TIF_SINGLESTEP); |
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 8869001ab5d7..54e66da8f743 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c | |||
@@ -93,10 +93,7 @@ static int rtas_pci_read_config(struct pci_bus *bus, | |||
93 | { | 93 | { |
94 | struct device_node *busdn, *dn; | 94 | struct device_node *busdn, *dn; |
95 | 95 | ||
96 | if (bus->self) | 96 | busdn = pci_bus_to_OF_node(bus); |
97 | busdn = pci_device_to_OF_node(bus->self); | ||
98 | else | ||
99 | busdn = bus->sysdata; /* must be a phb */ | ||
100 | 97 | ||
101 | /* Search only direct children of the bus */ | 98 | /* Search only direct children of the bus */ |
102 | for (dn = busdn->child; dn; dn = dn->sibling) { | 99 | for (dn = busdn->child; dn; dn = dn->sibling) { |
@@ -140,10 +137,7 @@ static int rtas_pci_write_config(struct pci_bus *bus, | |||
140 | { | 137 | { |
141 | struct device_node *busdn, *dn; | 138 | struct device_node *busdn, *dn; |
142 | 139 | ||
143 | if (bus->self) | 140 | busdn = pci_bus_to_OF_node(bus); |
144 | busdn = pci_device_to_OF_node(bus->self); | ||
145 | else | ||
146 | busdn = bus->sysdata; /* must be a phb */ | ||
147 | 141 | ||
148 | /* Search only direct children of the bus */ | 142 | /* Search only direct children of the bus */ |
149 | for (dn = busdn->child; dn; dn = dn->sibling) { | 143 | for (dn = busdn->child; dn; dn = dn->sibling) { |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 9e1ca745d8f0..1d154248cf40 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/serial.h> | 39 | #include <asm/serial.h> |
40 | #include <asm/udbg.h> | 40 | #include <asm/udbg.h> |
41 | #include <asm/mmu_context.h> | 41 | #include <asm/mmu_context.h> |
42 | #include <asm/swiotlb.h> | ||
42 | 43 | ||
43 | #include "setup.h" | 44 | #include "setup.h" |
44 | 45 | ||
@@ -332,6 +333,11 @@ void __init setup_arch(char **cmdline_p) | |||
332 | ppc_md.setup_arch(); | 333 | ppc_md.setup_arch(); |
333 | if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); | 334 | if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab); |
334 | 335 | ||
336 | #ifdef CONFIG_SWIOTLB | ||
337 | if (ppc_swiotlb_enable) | ||
338 | swiotlb_init(); | ||
339 | #endif | ||
340 | |||
335 | paging_init(); | 341 | paging_init(); |
336 | 342 | ||
337 | /* Initialize the MMU context management stuff */ | 343 | /* Initialize the MMU context management stuff */ |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index c410c606955d..1f6816003ebe 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -61,6 +61,7 @@ | |||
61 | #include <asm/xmon.h> | 61 | #include <asm/xmon.h> |
62 | #include <asm/udbg.h> | 62 | #include <asm/udbg.h> |
63 | #include <asm/kexec.h> | 63 | #include <asm/kexec.h> |
64 | #include <asm/swiotlb.h> | ||
64 | 65 | ||
65 | #include "setup.h" | 66 | #include "setup.h" |
66 | 67 | ||
@@ -417,12 +418,14 @@ void __init setup_system(void) | |||
417 | if (ppc64_caches.iline_size != 0x80) | 418 | if (ppc64_caches.iline_size != 0x80) |
418 | printk("ppc64_caches.icache_line_size = 0x%x\n", | 419 | printk("ppc64_caches.icache_line_size = 0x%x\n", |
419 | ppc64_caches.iline_size); | 420 | ppc64_caches.iline_size); |
421 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
420 | if (htab_address) | 422 | if (htab_address) |
421 | printk("htab_address = 0x%p\n", htab_address); | 423 | printk("htab_address = 0x%p\n", htab_address); |
422 | printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); | 424 | printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); |
425 | #endif /* CONFIG_PPC_STD_MMU_64 */ | ||
423 | if (PHYSICAL_START > 0) | 426 | if (PHYSICAL_START > 0) |
424 | printk("physical_start = 0x%lx\n", | 427 | printk("physical_start = 0x%llx\n", |
425 | PHYSICAL_START); | 428 | (unsigned long long)PHYSICAL_START); |
426 | printk("-----------------------------------------------------\n"); | 429 | printk("-----------------------------------------------------\n"); |
427 | 430 | ||
428 | DBG(" <- setup_system()\n"); | 431 | DBG(" <- setup_system()\n"); |
@@ -511,8 +514,9 @@ void __init setup_arch(char **cmdline_p) | |||
511 | irqstack_early_init(); | 514 | irqstack_early_init(); |
512 | emergency_stack_init(); | 515 | emergency_stack_init(); |
513 | 516 | ||
517 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
514 | stabs_alloc(); | 518 | stabs_alloc(); |
515 | 519 | #endif | |
516 | /* set up the bootmem stuff with available memory */ | 520 | /* set up the bootmem stuff with available memory */ |
517 | do_init_bootmem(); | 521 | do_init_bootmem(); |
518 | sparse_init(); | 522 | sparse_init(); |
@@ -524,6 +528,11 @@ void __init setup_arch(char **cmdline_p) | |||
524 | if (ppc_md.setup_arch) | 528 | if (ppc_md.setup_arch) |
525 | ppc_md.setup_arch(); | 529 | ppc_md.setup_arch(); |
526 | 530 | ||
531 | #ifdef CONFIG_SWIOTLB | ||
532 | if (ppc_swiotlb_enable) | ||
533 | swiotlb_init(); | ||
534 | #endif | ||
535 | |||
527 | paging_init(); | 536 | paging_init(); |
528 | ppc64_boot_msg(0x15, "Setup Done"); | 537 | ppc64_boot_msg(0x15, "Setup Done"); |
529 | } | 538 | } |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 48571ac56fb7..15391c2ab013 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/jiffies.h> | 52 | #include <linux/jiffies.h> |
53 | #include <linux/posix-timers.h> | 53 | #include <linux/posix-timers.h> |
54 | #include <linux/irq.h> | 54 | #include <linux/irq.h> |
55 | #include <linux/delay.h> | ||
55 | 56 | ||
56 | #include <asm/io.h> | 57 | #include <asm/io.h> |
57 | #include <asm/processor.h> | 58 | #include <asm/processor.h> |
@@ -109,7 +110,7 @@ static void decrementer_set_mode(enum clock_event_mode mode, | |||
109 | static struct clock_event_device decrementer_clockevent = { | 110 | static struct clock_event_device decrementer_clockevent = { |
110 | .name = "decrementer", | 111 | .name = "decrementer", |
111 | .rating = 200, | 112 | .rating = 200, |
112 | .shift = 16, | 113 | .shift = 0, /* To be filled in */ |
113 | .mult = 0, /* To be filled in */ | 114 | .mult = 0, /* To be filled in */ |
114 | .irq = 0, | 115 | .irq = 0, |
115 | .set_next_event = decrementer_set_next_event, | 116 | .set_next_event = decrementer_set_next_event, |
@@ -843,6 +844,22 @@ static void decrementer_set_mode(enum clock_event_mode mode, | |||
843 | decrementer_set_next_event(DECREMENTER_MAX, dev); | 844 | decrementer_set_next_event(DECREMENTER_MAX, dev); |
844 | } | 845 | } |
845 | 846 | ||
847 | static void __init setup_clockevent_multiplier(unsigned long hz) | ||
848 | { | ||
849 | u64 mult, shift = 32; | ||
850 | |||
851 | while (1) { | ||
852 | mult = div_sc(hz, NSEC_PER_SEC, shift); | ||
853 | if (mult && (mult >> 32UL) == 0UL) | ||
854 | break; | ||
855 | |||
856 | shift--; | ||
857 | } | ||
858 | |||
859 | decrementer_clockevent.shift = shift; | ||
860 | decrementer_clockevent.mult = mult; | ||
861 | } | ||
862 | |||
846 | static void register_decrementer_clockevent(int cpu) | 863 | static void register_decrementer_clockevent(int cpu) |
847 | { | 864 | { |
848 | struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; | 865 | struct clock_event_device *dec = &per_cpu(decrementers, cpu).event; |
@@ -860,8 +877,7 @@ static void __init init_decrementer_clockevent(void) | |||
860 | { | 877 | { |
861 | int cpu = smp_processor_id(); | 878 | int cpu = smp_processor_id(); |
862 | 879 | ||
863 | decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC, | 880 | setup_clockevent_multiplier(ppc_tb_freq); |
864 | decrementer_clockevent.shift); | ||
865 | decrementer_clockevent.max_delta_ns = | 881 | decrementer_clockevent.max_delta_ns = |
866 | clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); | 882 | clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent); |
867 | decrementer_clockevent.min_delta_ns = | 883 | decrementer_clockevent.min_delta_ns = |
@@ -1128,6 +1144,15 @@ void div128_by_32(u64 dividend_high, u64 dividend_low, | |||
1128 | 1144 | ||
1129 | } | 1145 | } |
1130 | 1146 | ||
1147 | /* We don't need to calibrate delay, we use the CPU timebase for that */ | ||
1148 | void calibrate_delay(void) | ||
1149 | { | ||
1150 | /* Some generic code (such as spinlock debug) use loops_per_jiffy | ||
1151 | * as the number of __delay(1) in a jiffy, so make it so | ||
1152 | */ | ||
1153 | loops_per_jiffy = tb_ticks_per_jiffy; | ||
1154 | } | ||
1155 | |||
1131 | static int __init rtc_init(void) | 1156 | static int __init rtc_init(void) |
1132 | { | 1157 | { |
1133 | struct platform_device *pdev; | 1158 | struct platform_device *pdev; |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 678fbff0d206..6f0ae1a9bfae 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -33,7 +33,9 @@ | |||
33 | #include <linux/backlight.h> | 33 | #include <linux/backlight.h> |
34 | #include <linux/bug.h> | 34 | #include <linux/bug.h> |
35 | #include <linux/kdebug.h> | 35 | #include <linux/kdebug.h> |
36 | #include <linux/debugfs.h> | ||
36 | 37 | ||
38 | #include <asm/emulated_ops.h> | ||
37 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
38 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
39 | #include <asm/system.h> | 41 | #include <asm/system.h> |
@@ -757,36 +759,44 @@ static int emulate_instruction(struct pt_regs *regs) | |||
757 | 759 | ||
758 | /* Emulate the mfspr rD, PVR. */ | 760 | /* Emulate the mfspr rD, PVR. */ |
759 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { | 761 | if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) { |
762 | PPC_WARN_EMULATED(mfpvr); | ||
760 | rd = (instword >> 21) & 0x1f; | 763 | rd = (instword >> 21) & 0x1f; |
761 | regs->gpr[rd] = mfspr(SPRN_PVR); | 764 | regs->gpr[rd] = mfspr(SPRN_PVR); |
762 | return 0; | 765 | return 0; |
763 | } | 766 | } |
764 | 767 | ||
765 | /* Emulating the dcba insn is just a no-op. */ | 768 | /* Emulating the dcba insn is just a no-op. */ |
766 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) | 769 | if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) { |
770 | PPC_WARN_EMULATED(dcba); | ||
767 | return 0; | 771 | return 0; |
772 | } | ||
768 | 773 | ||
769 | /* Emulate the mcrxr insn. */ | 774 | /* Emulate the mcrxr insn. */ |
770 | if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { | 775 | if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) { |
771 | int shift = (instword >> 21) & 0x1c; | 776 | int shift = (instword >> 21) & 0x1c; |
772 | unsigned long msk = 0xf0000000UL >> shift; | 777 | unsigned long msk = 0xf0000000UL >> shift; |
773 | 778 | ||
779 | PPC_WARN_EMULATED(mcrxr); | ||
774 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); | 780 | regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk); |
775 | regs->xer &= ~0xf0000000UL; | 781 | regs->xer &= ~0xf0000000UL; |
776 | return 0; | 782 | return 0; |
777 | } | 783 | } |
778 | 784 | ||
779 | /* Emulate load/store string insn. */ | 785 | /* Emulate load/store string insn. */ |
780 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) | 786 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { |
787 | PPC_WARN_EMULATED(string); | ||
781 | return emulate_string_inst(regs, instword); | 788 | return emulate_string_inst(regs, instword); |
789 | } | ||
782 | 790 | ||
783 | /* Emulate the popcntb (Population Count Bytes) instruction. */ | 791 | /* Emulate the popcntb (Population Count Bytes) instruction. */ |
784 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { | 792 | if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) { |
793 | PPC_WARN_EMULATED(popcntb); | ||
785 | return emulate_popcntb_inst(regs, instword); | 794 | return emulate_popcntb_inst(regs, instword); |
786 | } | 795 | } |
787 | 796 | ||
788 | /* Emulate isel (Integer Select) instruction */ | 797 | /* Emulate isel (Integer Select) instruction */ |
789 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { | 798 | if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) { |
799 | PPC_WARN_EMULATED(isel); | ||
790 | return emulate_isel(regs, instword); | 800 | return emulate_isel(regs, instword); |
791 | } | 801 | } |
792 | 802 | ||
@@ -984,6 +994,8 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
984 | 994 | ||
985 | #ifdef CONFIG_MATH_EMULATION | 995 | #ifdef CONFIG_MATH_EMULATION |
986 | errcode = do_mathemu(regs); | 996 | errcode = do_mathemu(regs); |
997 | if (errcode >= 0) | ||
998 | PPC_WARN_EMULATED(math); | ||
987 | 999 | ||
988 | switch (errcode) { | 1000 | switch (errcode) { |
989 | case 0: | 1001 | case 0: |
@@ -1005,6 +1017,9 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
1005 | 1017 | ||
1006 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) | 1018 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) |
1007 | errcode = Soft_emulate_8xx(regs); | 1019 | errcode = Soft_emulate_8xx(regs); |
1020 | if (errcode >= 0) | ||
1021 | PPC_WARN_EMULATED(8xx); | ||
1022 | |||
1008 | switch (errcode) { | 1023 | switch (errcode) { |
1009 | case 0: | 1024 | case 0: |
1010 | emulate_single_step(regs); | 1025 | emulate_single_step(regs); |
@@ -1026,7 +1041,34 @@ void SoftwareEmulation(struct pt_regs *regs) | |||
1026 | 1041 | ||
1027 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) | 1042 | void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) |
1028 | { | 1043 | { |
1029 | if (debug_status & DBSR_IC) { /* instruction completion */ | 1044 | /* Hack alert: On BookE, Branch Taken stops on the branch itself, while |
1045 | * on server, it stops on the target of the branch. In order to simulate | ||
1046 | * the server behaviour, we thus restart right away with a single step | ||
1047 | * instead of stopping here when hitting a BT | ||
1048 | */ | ||
1049 | if (debug_status & DBSR_BT) { | ||
1050 | regs->msr &= ~MSR_DE; | ||
1051 | |||
1052 | /* Disable BT */ | ||
1053 | mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT); | ||
1054 | /* Clear the BT event */ | ||
1055 | mtspr(SPRN_DBSR, DBSR_BT); | ||
1056 | |||
1057 | /* Do the single step trick only when coming from userspace */ | ||
1058 | if (user_mode(regs)) { | ||
1059 | current->thread.dbcr0 &= ~DBCR0_BT; | ||
1060 | current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC; | ||
1061 | regs->msr |= MSR_DE; | ||
1062 | return; | ||
1063 | } | ||
1064 | |||
1065 | if (notify_die(DIE_SSTEP, "block_step", regs, 5, | ||
1066 | 5, SIGTRAP) == NOTIFY_STOP) { | ||
1067 | return; | ||
1068 | } | ||
1069 | if (debugger_sstep(regs)) | ||
1070 | return; | ||
1071 | } else if (debug_status & DBSR_IC) { /* Instruction complete */ | ||
1030 | regs->msr &= ~MSR_DE; | 1072 | regs->msr &= ~MSR_DE; |
1031 | 1073 | ||
1032 | /* Disable instruction completion */ | 1074 | /* Disable instruction completion */ |
@@ -1042,9 +1084,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status) | |||
1042 | if (debugger_sstep(regs)) | 1084 | if (debugger_sstep(regs)) |
1043 | return; | 1085 | return; |
1044 | 1086 | ||
1045 | if (user_mode(regs)) { | 1087 | if (user_mode(regs)) |
1046 | current->thread.dbcr0 &= ~DBCR0_IC; | 1088 | current->thread.dbcr0 &= ~(DBCR0_IC); |
1047 | } | ||
1048 | 1089 | ||
1049 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | 1090 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); |
1050 | } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { | 1091 | } else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) { |
@@ -1088,6 +1129,7 @@ void altivec_assist_exception(struct pt_regs *regs) | |||
1088 | 1129 | ||
1089 | flush_altivec_to_thread(current); | 1130 | flush_altivec_to_thread(current); |
1090 | 1131 | ||
1132 | PPC_WARN_EMULATED(altivec); | ||
1091 | err = emulate_altivec(regs); | 1133 | err = emulate_altivec(regs); |
1092 | if (err == 0) { | 1134 | if (err == 0) { |
1093 | regs->nip += 4; /* skip emulated instruction */ | 1135 | regs->nip += 4; /* skip emulated instruction */ |
@@ -1286,3 +1328,79 @@ void kernel_bad_stack(struct pt_regs *regs) | |||
1286 | void __init trap_init(void) | 1328 | void __init trap_init(void) |
1287 | { | 1329 | { |
1288 | } | 1330 | } |
1331 | |||
1332 | |||
1333 | #ifdef CONFIG_PPC_EMULATED_STATS | ||
1334 | |||
1335 | #define WARN_EMULATED_SETUP(type) .type = { .name = #type } | ||
1336 | |||
1337 | struct ppc_emulated ppc_emulated = { | ||
1338 | #ifdef CONFIG_ALTIVEC | ||
1339 | WARN_EMULATED_SETUP(altivec), | ||
1340 | #endif | ||
1341 | WARN_EMULATED_SETUP(dcba), | ||
1342 | WARN_EMULATED_SETUP(dcbz), | ||
1343 | WARN_EMULATED_SETUP(fp_pair), | ||
1344 | WARN_EMULATED_SETUP(isel), | ||
1345 | WARN_EMULATED_SETUP(mcrxr), | ||
1346 | WARN_EMULATED_SETUP(mfpvr), | ||
1347 | WARN_EMULATED_SETUP(multiple), | ||
1348 | WARN_EMULATED_SETUP(popcntb), | ||
1349 | WARN_EMULATED_SETUP(spe), | ||
1350 | WARN_EMULATED_SETUP(string), | ||
1351 | WARN_EMULATED_SETUP(unaligned), | ||
1352 | #ifdef CONFIG_MATH_EMULATION | ||
1353 | WARN_EMULATED_SETUP(math), | ||
1354 | #elif defined(CONFIG_8XX_MINIMAL_FPEMU) | ||
1355 | WARN_EMULATED_SETUP(8xx), | ||
1356 | #endif | ||
1357 | #ifdef CONFIG_VSX | ||
1358 | WARN_EMULATED_SETUP(vsx), | ||
1359 | #endif | ||
1360 | }; | ||
1361 | |||
1362 | u32 ppc_warn_emulated; | ||
1363 | |||
1364 | void ppc_warn_emulated_print(const char *type) | ||
1365 | { | ||
1366 | if (printk_ratelimit()) | ||
1367 | pr_warning("%s used emulated %s instruction\n", current->comm, | ||
1368 | type); | ||
1369 | } | ||
1370 | |||
1371 | static int __init ppc_warn_emulated_init(void) | ||
1372 | { | ||
1373 | struct dentry *dir, *d; | ||
1374 | unsigned int i; | ||
1375 | struct ppc_emulated_entry *entries = (void *)&ppc_emulated; | ||
1376 | |||
1377 | if (!powerpc_debugfs_root) | ||
1378 | return -ENODEV; | ||
1379 | |||
1380 | dir = debugfs_create_dir("emulated_instructions", | ||
1381 | powerpc_debugfs_root); | ||
1382 | if (!dir) | ||
1383 | return -ENOMEM; | ||
1384 | |||
1385 | d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir, | ||
1386 | &ppc_warn_emulated); | ||
1387 | if (!d) | ||
1388 | goto fail; | ||
1389 | |||
1390 | for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) { | ||
1391 | d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir, | ||
1392 | (u32 *)&entries[i].val.counter); | ||
1393 | if (!d) | ||
1394 | goto fail; | ||
1395 | } | ||
1396 | |||
1397 | return 0; | ||
1398 | |||
1399 | fail: | ||
1400 | debugfs_remove_recursive(dir); | ||
1401 | return -ENOMEM; | ||
1402 | } | ||
1403 | |||
1404 | device_initcall(ppc_warn_emulated_init); | ||
1405 | |||
1406 | #endif /* CONFIG_PPC_EMULATED_STATS */ | ||
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 49ac3d6e1399..ef36cbbc5882 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S | |||
@@ -1,5 +1,215 @@ | |||
1 | #include <asm/processor.h> | ||
1 | #include <asm/ppc_asm.h> | 2 | #include <asm/ppc_asm.h> |
2 | #include <asm/reg.h> | 3 | #include <asm/reg.h> |
4 | #include <asm/asm-offsets.h> | ||
5 | #include <asm/cputable.h> | ||
6 | #include <asm/thread_info.h> | ||
7 | #include <asm/page.h> | ||
8 | |||
9 | /* | ||
10 | * load_up_altivec(unused, unused, tsk) | ||
11 | * Disable VMX for the task which had it previously, | ||
12 | * and save its vector registers in its thread_struct. | ||
13 | * Enables the VMX for use in the kernel on return. | ||
14 | * On SMP we know the VMX is free, since we give it up every | ||
15 | * switch (ie, no lazy save of the vector registers). | ||
16 | */ | ||
17 | _GLOBAL(load_up_altivec) | ||
18 | mfmsr r5 /* grab the current MSR */ | ||
19 | oris r5,r5,MSR_VEC@h | ||
20 | MTMSRD(r5) /* enable use of AltiVec now */ | ||
21 | isync | ||
22 | |||
23 | /* | ||
24 | * For SMP, we don't do lazy VMX switching because it just gets too | ||
25 | * horrendously complex, especially when a task switches from one CPU | ||
26 | * to another. Instead we call giveup_altvec in switch_to. | ||
27 | * VRSAVE isn't dealt with here, that is done in the normal context | ||
28 | * switch code. Note that we could rely on vrsave value to eventually | ||
29 | * avoid saving all of the VREGs here... | ||
30 | */ | ||
31 | #ifndef CONFIG_SMP | ||
32 | LOAD_REG_ADDRBASE(r3, last_task_used_altivec) | ||
33 | toreal(r3) | ||
34 | PPC_LL r4,ADDROFF(last_task_used_altivec)(r3) | ||
35 | PPC_LCMPI 0,r4,0 | ||
36 | beq 1f | ||
37 | |||
38 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | ||
39 | toreal(r4) | ||
40 | addi r4,r4,THREAD | ||
41 | SAVE_32VRS(0,r5,r4) | ||
42 | mfvscr vr0 | ||
43 | li r10,THREAD_VSCR | ||
44 | stvx vr0,r10,r4 | ||
45 | /* Disable VMX for last_task_used_altivec */ | ||
46 | PPC_LL r5,PT_REGS(r4) | ||
47 | toreal(r5) | ||
48 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
49 | lis r10,MSR_VEC@h | ||
50 | andc r4,r4,r10 | ||
51 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
52 | 1: | ||
53 | #endif /* CONFIG_SMP */ | ||
54 | |||
55 | /* Hack: if we get an altivec unavailable trap with VRSAVE | ||
56 | * set to all zeros, we assume this is a broken application | ||
57 | * that fails to set it properly, and thus we switch it to | ||
58 | * all 1's | ||
59 | */ | ||
60 | mfspr r4,SPRN_VRSAVE | ||
61 | cmpdi 0,r4,0 | ||
62 | bne+ 1f | ||
63 | li r4,-1 | ||
64 | mtspr SPRN_VRSAVE,r4 | ||
65 | 1: | ||
66 | /* enable use of VMX after return */ | ||
67 | #ifdef CONFIG_PPC32 | ||
68 | mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ | ||
69 | oris r9,r9,MSR_VEC@h | ||
70 | #else | ||
71 | ld r4,PACACURRENT(r13) | ||
72 | addi r5,r4,THREAD /* Get THREAD */ | ||
73 | oris r12,r12,MSR_VEC@h | ||
74 | std r12,_MSR(r1) | ||
75 | #endif | ||
76 | li r4,1 | ||
77 | li r10,THREAD_VSCR | ||
78 | stw r4,THREAD_USED_VR(r5) | ||
79 | lvx vr0,r10,r5 | ||
80 | mtvscr vr0 | ||
81 | REST_32VRS(0,r4,r5) | ||
82 | #ifndef CONFIG_SMP | ||
83 | /* Update last_task_used_math to 'current' */ | ||
84 | subi r4,r5,THREAD /* Back to 'current' */ | ||
85 | fromreal(r4) | ||
86 | PPC_STL r4,ADDROFF(last_task_used_math)(r3) | ||
87 | #endif /* CONFIG_SMP */ | ||
88 | /* restore registers and return */ | ||
89 | blr | ||
90 | |||
91 | /* | ||
92 | * giveup_altivec(tsk) | ||
93 | * Disable VMX for the task given as the argument, | ||
94 | * and save the vector registers in its thread_struct. | ||
95 | * Enables the VMX for use in the kernel on return. | ||
96 | */ | ||
97 | _GLOBAL(giveup_altivec) | ||
98 | mfmsr r5 | ||
99 | oris r5,r5,MSR_VEC@h | ||
100 | SYNC | ||
101 | MTMSRD(r5) /* enable use of VMX now */ | ||
102 | isync | ||
103 | PPC_LCMPI 0,r3,0 | ||
104 | beqlr- /* if no previous owner, done */ | ||
105 | addi r3,r3,THREAD /* want THREAD of task */ | ||
106 | PPC_LL r5,PT_REGS(r3) | ||
107 | PPC_LCMPI 0,r5,0 | ||
108 | SAVE_32VRS(0,r4,r3) | ||
109 | mfvscr vr0 | ||
110 | li r4,THREAD_VSCR | ||
111 | stvx vr0,r4,r3 | ||
112 | beq 1f | ||
113 | PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
114 | #ifdef CONFIG_VSX | ||
115 | BEGIN_FTR_SECTION | ||
116 | lis r3,(MSR_VEC|MSR_VSX)@h | ||
117 | FTR_SECTION_ELSE | ||
118 | lis r3,MSR_VEC@h | ||
119 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | ||
120 | #else | ||
121 | lis r3,MSR_VEC@h | ||
122 | #endif | ||
123 | andc r4,r4,r3 /* disable FP for previous task */ | ||
124 | PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
125 | 1: | ||
126 | #ifndef CONFIG_SMP | ||
127 | li r5,0 | ||
128 | LOAD_REG_ADDRBASE(r4,last_task_used_altivec) | ||
129 | PPC_STL r5,ADDROFF(last_task_used_altivec)(r4) | ||
130 | #endif /* CONFIG_SMP */ | ||
131 | blr | ||
132 | |||
133 | #ifdef CONFIG_VSX | ||
134 | |||
135 | #ifdef CONFIG_PPC32 | ||
136 | #error This asm code isn't ready for 32-bit kernels | ||
137 | #endif | ||
138 | |||
139 | /* | ||
140 | * load_up_vsx(unused, unused, tsk) | ||
141 | * Disable VSX for the task which had it previously, | ||
142 | * and save its vector registers in its thread_struct. | ||
143 | * Reuse the fp and vsx saves, but first check to see if they have | ||
144 | * been saved already. | ||
145 | */ | ||
146 | _GLOBAL(load_up_vsx) | ||
147 | /* Load FP and VSX registers if they haven't been done yet */ | ||
148 | andi. r5,r12,MSR_FP | ||
149 | beql+ load_up_fpu /* skip if already loaded */ | ||
150 | andis. r5,r12,MSR_VEC@h | ||
151 | beql+ load_up_altivec /* skip if already loaded */ | ||
152 | |||
153 | #ifndef CONFIG_SMP | ||
154 | ld r3,last_task_used_vsx@got(r2) | ||
155 | ld r4,0(r3) | ||
156 | cmpdi 0,r4,0 | ||
157 | beq 1f | ||
158 | /* Disable VSX for last_task_used_vsx */ | ||
159 | addi r4,r4,THREAD | ||
160 | ld r5,PT_REGS(r4) | ||
161 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
162 | lis r6,MSR_VSX@h | ||
163 | andc r6,r4,r6 | ||
164 | std r6,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
165 | 1: | ||
166 | #endif /* CONFIG_SMP */ | ||
167 | ld r4,PACACURRENT(r13) | ||
168 | addi r4,r4,THREAD /* Get THREAD */ | ||
169 | li r6,1 | ||
170 | stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ | ||
171 | /* enable use of VSX after return */ | ||
172 | oris r12,r12,MSR_VSX@h | ||
173 | std r12,_MSR(r1) | ||
174 | #ifndef CONFIG_SMP | ||
175 | /* Update last_task_used_math to 'current' */ | ||
176 | ld r4,PACACURRENT(r13) | ||
177 | std r4,0(r3) | ||
178 | #endif /* CONFIG_SMP */ | ||
179 | b fast_exception_return | ||
180 | |||
181 | /* | ||
182 | * __giveup_vsx(tsk) | ||
183 | * Disable VSX for the task given as the argument. | ||
184 | * Does NOT save vsx registers. | ||
185 | * Enables the VSX for use in the kernel on return. | ||
186 | */ | ||
187 | _GLOBAL(__giveup_vsx) | ||
188 | mfmsr r5 | ||
189 | oris r5,r5,MSR_VSX@h | ||
190 | mtmsrd r5 /* enable use of VSX now */ | ||
191 | isync | ||
192 | |||
193 | cmpdi 0,r3,0 | ||
194 | beqlr- /* if no previous owner, done */ | ||
195 | addi r3,r3,THREAD /* want THREAD of task */ | ||
196 | ld r5,PT_REGS(r3) | ||
197 | cmpdi 0,r5,0 | ||
198 | beq 1f | ||
199 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
200 | lis r3,MSR_VSX@h | ||
201 | andc r4,r4,r3 /* disable VSX for previous task */ | ||
202 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | ||
203 | 1: | ||
204 | #ifndef CONFIG_SMP | ||
205 | li r5,0 | ||
206 | ld r4,last_task_used_vsx@got(r2) | ||
207 | std r5,0(r4) | ||
208 | #endif /* CONFIG_SMP */ | ||
209 | blr | ||
210 | |||
211 | #endif /* CONFIG_VSX */ | ||
212 | |||
3 | 213 | ||
4 | /* | 214 | /* |
5 | * The routines below are in assembler so we can closely control the | 215 | * The routines below are in assembler so we can closely control the |