aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-03-07 13:46:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-03-07 13:46:10 -0500
commitf7d6a7283aa1de430c6323a9714d1a787bc2d1ea (patch)
treece4a39ead0a853788344ae70f3f7f3afedb5ab52
parent8c2c8ed8b85d4012d26caf1c221c291c687d89f0 (diff)
parenta7d2475af7aedcb9b5c6343989a8bfadbf84429b (diff)
Merge tag 'powerpc-4.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman: "Five fairly small fixes for things that went in this cycle. A fairly large patch to rework the CAS logic on Power9, necessitated by a late change to the firmware API, and we can't boot without it. Three fixes going to stable, allowing more instructions to be emulated on LE, fixing a boot crash on 32-bit Freescale BookE machines, and the OPAL XICS workaround. And a patch from me to sort the selects under CONFIG PPC. Annoying churn, but worth it in the long run, and best for it to go in now to avoid conflicts. Thanks to: Alexey Kardashevskiy, Anton Blanchard, Balbir Singh, Gautham R. Shenoy, Laurentiu Tudor, Nicholas Piggin, Paul Mackerras, Ravi Bangoria, Sachin Sant, Shile Zhang, Suraj Jitindar Singh" * tag 'powerpc-4.11-3' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: powerpc: Sort the selects under CONFIG_PPC powerpc/64: Fix L1D cache shape vector reporting L1I values powerpc/64: Avoid panic during boot due to divide by zero in init_cache_info() powerpc: Update to new option-vector-5 format for CAS powerpc: Parse the command line before calling CAS powerpc/xics: Work around limitations of OPAL XICS priority handling powerpc/64: Fix checksum folding in csum_add() powerpc/powernv: Fix opal tracepoints with JUMP_LABEL=n powerpc/booke: Fix boot crash due to null hugepd powerpc: Fix compiling a BE kernel with a powerpc64le toolchain selftest/powerpc: Fix false failures for skipped tests powerpc/powernv: Fix bug due to labeling ambiguity in power_enter_stop powerpc/64: Invalidate process table caching after setting process table powerpc: emulate_step() tests for load/store instructions powerpc: Emulation support for load/store instructions on LE
-rw-r--r--arch/powerpc/Kconfig138
-rw-r--r--arch/powerpc/Makefile11
-rw-r--r--arch/powerpc/include/asm/checksum.h2
-rw-r--r--arch/powerpc/include/asm/cpuidle.h4
-rw-r--r--arch/powerpc/include/asm/elf.h4
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h7
-rw-r--r--arch/powerpc/include/asm/prom.h18
-rw-r--r--arch/powerpc/kernel/idle_book3s.S10
-rw-r--r--arch/powerpc/kernel/prom_init.c120
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/lib/Makefile1
-rw-r--r--arch/powerpc/lib/sstep.c20
-rw-r--r--arch/powerpc/lib/test_emulate_step.c434
-rw-r--r--arch/powerpc/mm/init_64.c36
-rw-r--r--arch/powerpc/mm/pgtable-radix.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S4
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c10
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c17
-rw-r--r--tools/testing/selftests/powerpc/harness.c6
20 files changed, 729 insertions, 124 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 494091762bd7..97a8bc8a095c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
80config PPC 80config PPC
81 bool 81 bool
82 default y 82 default y
83 select BUILDTIME_EXTABLE_SORT 83 #
84 # Please keep this list sorted alphabetically.
85 #
86 select ARCH_HAS_DEVMEM_IS_ALLOWED
87 select ARCH_HAS_DMA_SET_COHERENT_MASK
88 select ARCH_HAS_ELF_RANDOMIZE
89 select ARCH_HAS_GCOV_PROFILE_ALL
90 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
91 select ARCH_HAS_SG_CHAIN
92 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
93 select ARCH_HAS_UBSAN_SANITIZE_ALL
94 select ARCH_HAVE_NMI_SAFE_CMPXCHG
84 select ARCH_MIGHT_HAVE_PC_PARPORT 95 select ARCH_MIGHT_HAVE_PC_PARPORT
85 select ARCH_MIGHT_HAVE_PC_SERIO 96 select ARCH_MIGHT_HAVE_PC_SERIO
97 select ARCH_SUPPORTS_ATOMIC_RMW
98 select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
99 select ARCH_USE_BUILTIN_BSWAP
100 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
101 select ARCH_WANT_IPC_PARSE_VERSION
86 select BINFMT_ELF 102 select BINFMT_ELF
87 select ARCH_HAS_ELF_RANDOMIZE 103 select BUILDTIME_EXTABLE_SORT
88 select OF 104 select CLONE_BACKWARDS
89 select OF_EARLY_FLATTREE 105 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
90 select OF_RESERVED_MEM 106 select EDAC_ATOMIC_SCRUB
91 select HAVE_FTRACE_MCOUNT_RECORD 107 select EDAC_SUPPORT
108 select GENERIC_ATOMIC64 if PPC32
109 select GENERIC_CLOCKEVENTS
110 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
111 select GENERIC_CMOS_UPDATE
112 select GENERIC_CPU_AUTOPROBE
113 select GENERIC_IRQ_SHOW
114 select GENERIC_IRQ_SHOW_LEVEL
115 select GENERIC_SMP_IDLE_THREAD
116 select GENERIC_STRNCPY_FROM_USER
117 select GENERIC_STRNLEN_USER
118 select GENERIC_TIME_VSYSCALL_OLD
119 select HAVE_ARCH_AUDITSYSCALL
120 select HAVE_ARCH_HARDENED_USERCOPY
121 select HAVE_ARCH_JUMP_LABEL
122 select HAVE_ARCH_KGDB
123 select HAVE_ARCH_SECCOMP_FILTER
124 select HAVE_ARCH_TRACEHOOK
125 select HAVE_CBPF_JIT if !PPC64
126 select HAVE_CONTEXT_TRACKING if PPC64
127 select HAVE_DEBUG_KMEMLEAK
128 select HAVE_DEBUG_STACKOVERFLOW
129 select HAVE_DMA_API_DEBUG
92 select HAVE_DYNAMIC_FTRACE 130 select HAVE_DYNAMIC_FTRACE
93 select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL 131 select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
94 select HAVE_FUNCTION_TRACER 132 select HAVE_EBPF_JIT if PPC64
133 select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
134 select HAVE_FTRACE_MCOUNT_RECORD
95 select HAVE_FUNCTION_GRAPH_TRACER 135 select HAVE_FUNCTION_GRAPH_TRACER
136 select HAVE_FUNCTION_TRACER
96 select HAVE_GCC_PLUGINS 137 select HAVE_GCC_PLUGINS
97 select SYSCTL_EXCEPTION_TRACE 138 select HAVE_GENERIC_RCU_GUP
98 select VIRT_TO_BUS if !PPC64 139 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
99 select HAVE_IDE 140 select HAVE_IDE
100 select HAVE_IOREMAP_PROT 141 select HAVE_IOREMAP_PROT
101 select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) 142 select HAVE_IRQ_EXIT_ON_IRQ_STACK
143 select HAVE_KERNEL_GZIP
102 select HAVE_KPROBES 144 select HAVE_KPROBES
103 select HAVE_OPTPROBES if PPC64
104 select HAVE_ARCH_KGDB
105 select HAVE_KRETPROBES 145 select HAVE_KRETPROBES
106 select HAVE_ARCH_TRACEHOOK 146 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
107 select HAVE_MEMBLOCK 147 select HAVE_MEMBLOCK
108 select HAVE_MEMBLOCK_NODE_MAP 148 select HAVE_MEMBLOCK_NODE_MAP
109 select HAVE_DMA_API_DEBUG 149 select HAVE_MOD_ARCH_SPECIFIC
150 select HAVE_NMI if PERF_EVENTS
110 select HAVE_OPROFILE 151 select HAVE_OPROFILE
111 select HAVE_DEBUG_KMEMLEAK 152 select HAVE_OPTPROBES if PPC64
112 select ARCH_HAS_SG_CHAIN
113 select GENERIC_ATOMIC64 if PPC32
114 select HAVE_PERF_EVENTS 153 select HAVE_PERF_EVENTS
154 select HAVE_PERF_EVENTS_NMI if PPC64
115 select HAVE_PERF_REGS 155 select HAVE_PERF_REGS
116 select HAVE_PERF_USER_STACK_DUMP 156 select HAVE_PERF_USER_STACK_DUMP
157 select HAVE_RCU_TABLE_FREE if SMP
117 select HAVE_REGS_AND_STACK_ACCESS_API 158 select HAVE_REGS_AND_STACK_ACCESS_API
118 select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) 159 select HAVE_SYSCALL_TRACEPOINTS
119 select ARCH_WANT_IPC_PARSE_VERSION 160 select HAVE_VIRT_CPU_ACCOUNTING
120 select SPARSE_IRQ
121 select IRQ_DOMAIN 161 select IRQ_DOMAIN
122 select GENERIC_IRQ_SHOW
123 select GENERIC_IRQ_SHOW_LEVEL
124 select IRQ_FORCED_THREADING 162 select IRQ_FORCED_THREADING
125 select HAVE_RCU_TABLE_FREE if SMP
126 select HAVE_SYSCALL_TRACEPOINTS
127 select HAVE_CBPF_JIT if !PPC64
128 select HAVE_EBPF_JIT if PPC64
129 select HAVE_ARCH_JUMP_LABEL
130 select ARCH_HAVE_NMI_SAFE_CMPXCHG
131 select ARCH_HAS_GCOV_PROFILE_ALL
132 select GENERIC_SMP_IDLE_THREAD
133 select GENERIC_CMOS_UPDATE
134 select GENERIC_TIME_VSYSCALL_OLD
135 select GENERIC_CLOCKEVENTS
136 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
137 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
138 select GENERIC_STRNCPY_FROM_USER
139 select GENERIC_STRNLEN_USER
140 select HAVE_MOD_ARCH_SPECIFIC
141 select MODULES_USE_ELF_RELA 163 select MODULES_USE_ELF_RELA
142 select CLONE_BACKWARDS
143 select ARCH_USE_BUILTIN_BSWAP
144 select OLD_SIGSUSPEND
145 select OLD_SIGACTION if PPC32
146 select HAVE_DEBUG_STACKOVERFLOW
147 select HAVE_IRQ_EXIT_ON_IRQ_STACK
148 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
149 select HAVE_ARCH_AUDITSYSCALL
150 select ARCH_SUPPORTS_ATOMIC_RMW
151 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
152 select NO_BOOTMEM 164 select NO_BOOTMEM
153 select HAVE_GENERIC_RCU_GUP 165 select OF
154 select HAVE_PERF_EVENTS_NMI if PPC64 166 select OF_EARLY_FLATTREE
155 select HAVE_NMI if PERF_EVENTS 167 select OF_RESERVED_MEM
156 select EDAC_SUPPORT 168 select OLD_SIGACTION if PPC32
157 select EDAC_ATOMIC_SCRUB 169 select OLD_SIGSUSPEND
158 select ARCH_HAS_DMA_SET_COHERENT_MASK 170 select SPARSE_IRQ
159 select ARCH_HAS_DEVMEM_IS_ALLOWED 171 select SYSCTL_EXCEPTION_TRACE
160 select HAVE_ARCH_SECCOMP_FILTER 172 select VIRT_TO_BUS if !PPC64
161 select ARCH_HAS_UBSAN_SANITIZE_ALL 173 #
162 select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT 174 # Please keep this list sorted alphabetically.
163 select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS 175 #
164 select GENERIC_CPU_AUTOPROBE
165 select HAVE_VIRT_CPU_ACCOUNTING
166 select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
167 select HAVE_ARCH_HARDENED_USERCOPY
168 select HAVE_KERNEL_GZIP
169 select HAVE_CONTEXT_TRACKING if PPC64
170 176
171config GENERIC_CSUM 177config GENERIC_CSUM
172 def_bool n 178 def_bool n
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 31286fa7873c..19b0d1a81959 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -72,8 +72,15 @@ GNUTARGET := powerpc
72MULTIPLEWORD := -mmultiple 72MULTIPLEWORD := -mmultiple
73endif 73endif
74 74
75cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) 75ifdef CONFIG_PPC64
76cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
77cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
78aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
79aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
80endif
81
76cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian 82cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
83cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
77ifneq ($(cc-name),clang) 84ifneq ($(cc-name),clang)
78 cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align 85 cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
79endif 86endif
@@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
113CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc)) 120CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
114AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) 121AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
115else 122else
123CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
116CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc) 124CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
125AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
117endif 126endif
118CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) 127CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
119CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) 128CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 4e63787dc3be..842124b199b5 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
112 112
113#ifdef __powerpc64__ 113#ifdef __powerpc64__
114 res += (__force u64)addend; 114 res += (__force u64)addend;
115 return (__force __wsum)((u32)res + (res >> 32)); 115 return (__force __wsum) from64to32(res);
116#else 116#else
117 asm("addc %0,%0,%1;" 117 asm("addc %0,%0,%1;"
118 "addze %0,%0;" 118 "addze %0,%0;"
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index fd321eb423cb..155731557c9b 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err)
70 std r0,0(r1); \ 70 std r0,0(r1); \
71 ptesync; \ 71 ptesync; \
72 ld r0,0(r1); \ 72 ld r0,0(r1); \
731: cmpd cr0,r0,r0; \ 73236: cmpd cr0,r0,r0; \
74 bne 1b; \ 74 bne 236b; \
75 IDLE_INST; \ 75 IDLE_INST; \
76 76
77#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ 77#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 93b9b84568e8..09bde6e34f5d 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
144#define ARCH_DLINFO_CACHE_GEOMETRY \ 144#define ARCH_DLINFO_CACHE_GEOMETRY \
145 NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \ 145 NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \
146 NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \ 146 NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \
147 NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size); \ 147 NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \
148 NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i)); \ 148 NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \
149 NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \ 149 NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \
150 NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \ 150 NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \
151 NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \ 151 NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 0cd8a3852763..e5805ad78e12 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd)
230 return ((hpd_val(hpd) & 0x4) != 0); 230 return ((hpd_val(hpd) & 0x4) != 0);
231#else 231#else
232 /* We clear the top bit to indicate hugepd */ 232 /* We clear the top bit to indicate hugepd */
233 return ((hpd_val(hpd) & PD_HUGE) == 0); 233 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
234#endif 234#endif
235} 235}
236 236
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index d99bd442aacb..e7d6d86563ee 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -284,6 +284,13 @@
284#define PPC_INST_BRANCH_COND 0x40800000 284#define PPC_INST_BRANCH_COND 0x40800000
285#define PPC_INST_LBZCIX 0x7c0006aa 285#define PPC_INST_LBZCIX 0x7c0006aa
286#define PPC_INST_STBCIX 0x7c0007aa 286#define PPC_INST_STBCIX 0x7c0007aa
287#define PPC_INST_LWZX 0x7c00002e
288#define PPC_INST_LFSX 0x7c00042e
289#define PPC_INST_STFSX 0x7c00052e
290#define PPC_INST_LFDX 0x7c0004ae
291#define PPC_INST_STFDX 0x7c0005ae
292#define PPC_INST_LVX 0x7c0000ce
293#define PPC_INST_STVX 0x7c0001ce
287 294
288/* macros to insert fields into opcodes */ 295/* macros to insert fields into opcodes */
289#define ___PPC_RA(a) (((a) & 0x1f) << 16) 296#define ___PPC_RA(a) (((a) & 0x1f) << 16)
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 4a90634e8322..35c00d7a0cf8 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -160,12 +160,18 @@ struct of_drconf_cell {
160#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */ 160#define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */
161#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */ 161#define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */
162#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */ 162#define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */
163#define OV5_MMU_RADIX_300 0x1880 /* ISA v3.00 radix MMU supported */ 163/* MMU Base Architecture */
164#define OV5_MMU_HASH_300 0x1840 /* ISA v3.00 hash MMU supported */ 164#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */
165#define OV5_MMU_SEGM_RADIX 0x1820 /* radix mode (no segmentation) */ 165#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */
166#define OV5_MMU_PROC_TBL 0x1810 /* hcall selects SLB or proc table */ 166#define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */
167#define OV5_MMU_SLB 0x1800 /* always use SLB */ 167#define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */
168#define OV5_MMU_GTSE 0x1808 /* Guest translation shootdown */ 168#define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */
169#define OV5_NMMU 0x1820 /* Nest MMU Available */
170/* Hash Table Extensions */
171#define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */
172#define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */
173/* Radix Table Extensions */
174#define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */
169 175
170/* Option Vector 6: IBM PAPR hints */ 176/* Option Vector 6: IBM PAPR hints */
171#define OV6_LINUX 0x02 /* Linux is our OS */ 177#define OV6_LINUX 0x02 /* Linux is our OS */
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 5f61cc0349c0..995728736677 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -276,19 +276,21 @@ power_enter_stop:
276 */ 276 */
277 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED 277 andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED
278 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ 278 clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */
279 bne 1f 279 bne .Lhandle_esl_ec_set
280 IDLE_STATE_ENTER_SEQ(PPC_STOP) 280 IDLE_STATE_ENTER_SEQ(PPC_STOP)
281 li r3,0 /* Since we didn't lose state, return 0 */ 281 li r3,0 /* Since we didn't lose state, return 0 */
282 b pnv_wakeup_noloss 282 b pnv_wakeup_noloss
283
284.Lhandle_esl_ec_set:
283/* 285/*
284 * Check if the requested state is a deep idle state. 286 * Check if the requested state is a deep idle state.
285 */ 287 */
2861: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) 288 LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state)
287 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) 289 ld r4,ADDROFF(pnv_first_deep_stop_state)(r5)
288 cmpd r3,r4 290 cmpd r3,r4
289 bge 2f 291 bge .Lhandle_deep_stop
290 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) 292 IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP)
2912: 293.Lhandle_deep_stop:
292/* 294/*
293 * Entering deep idle state. 295 * Entering deep idle state.
294 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to 296 * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index a3944540fe0d..1c1b44ec7642 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start;
168static unsigned long __initdata prom_tce_alloc_end; 168static unsigned long __initdata prom_tce_alloc_end;
169#endif 169#endif
170 170
171static bool __initdata prom_radix_disable;
172
173struct platform_support {
174 bool hash_mmu;
175 bool radix_mmu;
176 bool radix_gtse;
177};
178
171/* Platforms codes are now obsolete in the kernel. Now only used within this 179/* Platforms codes are now obsolete in the kernel. Now only used within this
172 * file and ultimately gone too. Feel free to change them if you need, they 180 * file and ultimately gone too. Feel free to change them if you need, they
173 * are not shared with anything outside of this file anymore 181 * are not shared with anything outside of this file anymore
@@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void)
626 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); 634 prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
627#endif 635#endif
628 } 636 }
637
638 opt = strstr(prom_cmd_line, "disable_radix");
639 if (opt) {
640 prom_debug("Radix disabled from cmdline\n");
641 prom_radix_disable = true;
642 }
629} 643}
630 644
631#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 645#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
@@ -695,6 +709,8 @@ struct option_vector5 {
695 u8 byte22; 709 u8 byte22;
696 u8 intarch; 710 u8 intarch;
697 u8 mmu; 711 u8 mmu;
712 u8 hash_ext;
713 u8 radix_ext;
698} __packed; 714} __packed;
699 715
700struct option_vector6 { 716struct option_vector6 {
@@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
850 .reserved3 = 0, 866 .reserved3 = 0,
851 .subprocessors = 1, 867 .subprocessors = 1,
852 .intarch = 0, 868 .intarch = 0,
853 .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) | 869 .mmu = 0,
854 OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE), 870 .hash_ext = 0,
871 .radix_ext = 0,
855 }, 872 },
856 873
857 /* option vector 6: IBM PAPR hints */ 874 /* option vector 6: IBM PAPR hints */
@@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void)
990 1007
991} 1008}
992 1009
1010static void __init prom_parse_mmu_model(u8 val,
1011 struct platform_support *support)
1012{
1013 switch (val) {
1014 case OV5_FEAT(OV5_MMU_DYNAMIC):
1015 case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
1016 prom_debug("MMU - either supported\n");
1017 support->radix_mmu = !prom_radix_disable;
1018 support->hash_mmu = true;
1019 break;
1020 case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
1021 prom_debug("MMU - radix only\n");
1022 if (prom_radix_disable) {
1023 /*
1024 * If we __have__ to do radix, we're better off ignoring
1025 * the command line rather than not booting.
1026 */
1027 prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
1028 }
1029 support->radix_mmu = true;
1030 break;
1031 case OV5_FEAT(OV5_MMU_HASH):
1032 prom_debug("MMU - hash only\n");
1033 support->hash_mmu = true;
1034 break;
1035 default:
1036 prom_debug("Unknown mmu support option: 0x%x\n", val);
1037 break;
1038 }
1039}
1040
1041static void __init prom_parse_platform_support(u8 index, u8 val,
1042 struct platform_support *support)
1043{
1044 switch (index) {
1045 case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */
1046 prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
1047 break;
1048 case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
1049 if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
1050 prom_debug("Radix - GTSE supported\n");
1051 support->radix_gtse = true;
1052 }
1053 break;
1054 }
1055}
1056
1057static void __init prom_check_platform_support(void)
1058{
1059 struct platform_support supported = {
1060 .hash_mmu = false,
1061 .radix_mmu = false,
1062 .radix_gtse = false
1063 };
1064 int prop_len = prom_getproplen(prom.chosen,
1065 "ibm,arch-vec-5-platform-support");
1066 if (prop_len > 1) {
1067 int i;
1068 u8 vec[prop_len];
1069 prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
1070 prop_len);
1071 prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
1072 &vec, sizeof(vec));
1073 for (i = 0; i < prop_len; i += 2) {
1074 prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
1075 , vec[i]
1076 , vec[i + 1]);
1077 prom_parse_platform_support(vec[i], vec[i + 1],
1078 &supported);
1079 }
1080 }
1081
1082 if (supported.radix_mmu && supported.radix_gtse) {
1083 /* Radix preferred - but we require GTSE for now */
1084 prom_debug("Asking for radix with GTSE\n");
1085 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
1086 ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
1087 } else if (supported.hash_mmu) {
1088 /* Default to hash mmu (if we can) */
1089 prom_debug("Asking for hash\n");
1090 ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH);
1091 } else {
1092 /* We're probably on a legacy hypervisor */
1093 prom_debug("Assuming legacy hash support\n");
1094 }
1095}
993 1096
994static void __init prom_send_capabilities(void) 1097static void __init prom_send_capabilities(void)
995{ 1098{
@@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void)
997 prom_arg_t ret; 1100 prom_arg_t ret;
998 u32 cores; 1101 u32 cores;
999 1102
1103 /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
1104 prom_check_platform_support();
1105
1000 root = call_prom("open", 1, 1, ADDR("/")); 1106 root = call_prom("open", 1, 1, ADDR("/"));
1001 if (root != 0) { 1107 if (root != 0) {
1002 /* We need to tell the FW about the number of cores we support. 1108 /* We need to tell the FW about the number of cores we support.
@@ -2993,6 +3099,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2993 */ 3099 */
2994 prom_check_initrd(r3, r4); 3100 prom_check_initrd(r3, r4);
2995 3101
3102 /*
3103 * Do early parsing of command line
3104 */
3105 early_cmdline_parse();
3106
2996#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 3107#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
2997 /* 3108 /*
2998 * On pSeries, inform the firmware about our capabilities 3109 * On pSeries, inform the firmware about our capabilities
@@ -3009,11 +3120,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3009 copy_and_flush(0, kbase, 0x100, 0); 3120 copy_and_flush(0, kbase, 0x100, 0);
3010 3121
3011 /* 3122 /*
3012 * Do early parsing of command line
3013 */
3014 early_cmdline_parse();
3015
3016 /*
3017 * Initialize memory management within prom_init 3123 * Initialize memory management within prom_init
3018 */ 3124 */
3019 prom_init_mem(); 3125 prom_init_mem();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index adf2084f214b..9cfaa8b69b5f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
408 info->line_size = lsize; 408 info->line_size = lsize;
409 info->block_size = bsize; 409 info->block_size = bsize;
410 info->log_block_size = __ilog2(bsize); 410 info->log_block_size = __ilog2(bsize);
411 info->blocks_per_page = PAGE_SIZE / bsize; 411 if (bsize)
412 info->blocks_per_page = PAGE_SIZE / bsize;
413 else
414 info->blocks_per_page = 0;
412 415
413 if (sets == 0) 416 if (sets == 0)
414 info->assoc = 0xffff; 417 info->assoc = 0xffff;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 0e649d72fe8d..2b5e09020cfe 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -20,6 +20,7 @@ obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
20 20
21obj64-$(CONFIG_SMP) += locks.o 21obj64-$(CONFIG_SMP) += locks.o
22obj64-$(CONFIG_ALTIVEC) += vmx-helper.o 22obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
23obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
23 24
24obj-y += checksum_$(BITS).o checksum_wrappers.o 25obj-y += checksum_$(BITS).o checksum_wrappers.o
25 26
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 846dba2c6360..9c542ec70c5b 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1799 goto instr_done; 1799 goto instr_done;
1800 1800
1801 case LARX: 1801 case LARX:
1802 if (regs->msr & MSR_LE)
1803 return 0;
1804 if (op.ea & (size - 1)) 1802 if (op.ea & (size - 1))
1805 break; /* can't handle misaligned */ 1803 break; /* can't handle misaligned */
1806 if (!address_ok(regs, op.ea, size)) 1804 if (!address_ok(regs, op.ea, size))
@@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1823 goto ldst_done; 1821 goto ldst_done;
1824 1822
1825 case STCX: 1823 case STCX:
1826 if (regs->msr & MSR_LE)
1827 return 0;
1828 if (op.ea & (size - 1)) 1824 if (op.ea & (size - 1))
1829 break; /* can't handle misaligned */ 1825 break; /* can't handle misaligned */
1830 if (!address_ok(regs, op.ea, size)) 1826 if (!address_ok(regs, op.ea, size))
@@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1849 goto ldst_done; 1845 goto ldst_done;
1850 1846
1851 case LOAD: 1847 case LOAD:
1852 if (regs->msr & MSR_LE)
1853 return 0;
1854 err = read_mem(&regs->gpr[op.reg], op.ea, size, regs); 1848 err = read_mem(&regs->gpr[op.reg], op.ea, size, regs);
1855 if (!err) { 1849 if (!err) {
1856 if (op.type & SIGNEXT) 1850 if (op.type & SIGNEXT)
@@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1862 1856
1863#ifdef CONFIG_PPC_FPU 1857#ifdef CONFIG_PPC_FPU
1864 case LOAD_FP: 1858 case LOAD_FP:
1865 if (regs->msr & MSR_LE)
1866 return 0;
1867 if (size == 4) 1859 if (size == 4)
1868 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); 1860 err = do_fp_load(op.reg, do_lfs, op.ea, size, regs);
1869 else 1861 else
@@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1872#endif 1864#endif
1873#ifdef CONFIG_ALTIVEC 1865#ifdef CONFIG_ALTIVEC
1874 case LOAD_VMX: 1866 case LOAD_VMX:
1875 if (regs->msr & MSR_LE)
1876 return 0;
1877 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); 1867 err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs);
1878 goto ldst_done; 1868 goto ldst_done;
1879#endif 1869#endif
1880#ifdef CONFIG_VSX 1870#ifdef CONFIG_VSX
1881 case LOAD_VSX: 1871 case LOAD_VSX:
1882 if (regs->msr & MSR_LE)
1883 return 0;
1884 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); 1872 err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs);
1885 goto ldst_done; 1873 goto ldst_done;
1886#endif 1874#endif
@@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1903 goto instr_done; 1891 goto instr_done;
1904 1892
1905 case STORE: 1893 case STORE:
1906 if (regs->msr & MSR_LE)
1907 return 0;
1908 if ((op.type & UPDATE) && size == sizeof(long) && 1894 if ((op.type & UPDATE) && size == sizeof(long) &&
1909 op.reg == 1 && op.update_reg == 1 && 1895 op.reg == 1 && op.update_reg == 1 &&
1910 !(regs->msr & MSR_PR) && 1896 !(regs->msr & MSR_PR) &&
@@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1917 1903
1918#ifdef CONFIG_PPC_FPU 1904#ifdef CONFIG_PPC_FPU
1919 case STORE_FP: 1905 case STORE_FP:
1920 if (regs->msr & MSR_LE)
1921 return 0;
1922 if (size == 4) 1906 if (size == 4)
1923 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); 1907 err = do_fp_store(op.reg, do_stfs, op.ea, size, regs);
1924 else 1908 else
@@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1927#endif 1911#endif
1928#ifdef CONFIG_ALTIVEC 1912#ifdef CONFIG_ALTIVEC
1929 case STORE_VMX: 1913 case STORE_VMX:
1930 if (regs->msr & MSR_LE)
1931 return 0;
1932 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); 1914 err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs);
1933 goto ldst_done; 1915 goto ldst_done;
1934#endif 1916#endif
1935#ifdef CONFIG_VSX 1917#ifdef CONFIG_VSX
1936 case STORE_VSX: 1918 case STORE_VSX:
1937 if (regs->msr & MSR_LE)
1938 return 0;
1939 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); 1919 err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs);
1940 goto ldst_done; 1920 goto ldst_done;
1941#endif 1921#endif
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
new file mode 100644
index 000000000000..2534c1447554
--- /dev/null
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -0,0 +1,434 @@
1/*
2 * Simple sanity test for emulate_step load/store instructions.
3 *
4 * Copyright IBM Corp. 2016
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#define pr_fmt(fmt) "emulate_step_test: " fmt
13
14#include <linux/ptrace.h>
15#include <asm/sstep.h>
16#include <asm/ppc-opcode.h>
17
18#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
19
20/*
21 * Defined with TEST_ prefix so it does not conflict with other
22 * definitions.
23 */
24#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \
25 ___PPC_RA(base) | IMM_L(i))
26#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \
27 ___PPC_RA(base) | IMM_L(i))
28#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \
29 ___PPC_RA(a) | ___PPC_RB(b))
30#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \
31 ___PPC_RA(base) | ((i) & 0xfffc))
32#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \
33 ___PPC_RA(a) | ___PPC_RB(b) | \
34 __PPC_EH(eh))
35#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \
36 ___PPC_RA(a) | ___PPC_RB(b))
37#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \
38 ___PPC_RA(a) | ___PPC_RB(b))
39#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \
40 ___PPC_RA(a) | ___PPC_RB(b))
41#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \
42 ___PPC_RA(a) | ___PPC_RB(b))
43#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \
44 ___PPC_RA(a) | ___PPC_RB(b))
45#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \
46 ___PPC_RA(a) | ___PPC_RB(b))
47#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \
48 ___PPC_RA(a) | ___PPC_RB(b))
49#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
50#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
51
52
53static void __init init_pt_regs(struct pt_regs *regs)
54{
55 static unsigned long msr;
56 static bool msr_cached;
57
58 memset(regs, 0, sizeof(struct pt_regs));
59
60 if (likely(msr_cached)) {
61 regs->msr = msr;
62 return;
63 }
64
65 asm volatile("mfmsr %0" : "=r"(regs->msr));
66
67 regs->msr |= MSR_FP;
68 regs->msr |= MSR_VEC;
69 regs->msr |= MSR_VSX;
70
71 msr = regs->msr;
72 msr_cached = true;
73}
74
75static void __init show_result(char *ins, char *result)
76{
77 pr_info("%-14s : %s\n", ins, result);
78}
79
80static void __init test_ld(void)
81{
82 struct pt_regs regs;
83 unsigned long a = 0x23;
84 int stepped = -1;
85
86 init_pt_regs(&regs);
87 regs.gpr[3] = (unsigned long) &a;
88
89 /* ld r5, 0(r3) */
90 stepped = emulate_step(&regs, TEST_LD(5, 3, 0));
91
92 if (stepped == 1 && regs.gpr[5] == a)
93 show_result("ld", "PASS");
94 else
95 show_result("ld", "FAIL");
96}
97
98static void __init test_lwz(void)
99{
100 struct pt_regs regs;
101 unsigned int a = 0x4545;
102 int stepped = -1;
103
104 init_pt_regs(&regs);
105 regs.gpr[3] = (unsigned long) &a;
106
107 /* lwz r5, 0(r3) */
108 stepped = emulate_step(&regs, TEST_LWZ(5, 3, 0));
109
110 if (stepped == 1 && regs.gpr[5] == a)
111 show_result("lwz", "PASS");
112 else
113 show_result("lwz", "FAIL");
114}
115
116static void __init test_lwzx(void)
117{
118 struct pt_regs regs;
119 unsigned int a[3] = {0x0, 0x0, 0x1234};
120 int stepped = -1;
121
122 init_pt_regs(&regs);
123 regs.gpr[3] = (unsigned long) a;
124 regs.gpr[4] = 8;
125 regs.gpr[5] = 0x8765;
126
127 /* lwzx r5, r3, r4 */
128 stepped = emulate_step(&regs, TEST_LWZX(5, 3, 4));
129 if (stepped == 1 && regs.gpr[5] == a[2])
130 show_result("lwzx", "PASS");
131 else
132 show_result("lwzx", "FAIL");
133}
134
135static void __init test_std(void)
136{
137 struct pt_regs regs;
138 unsigned long a = 0x1234;
139 int stepped = -1;
140
141 init_pt_regs(&regs);
142 regs.gpr[3] = (unsigned long) &a;
143 regs.gpr[5] = 0x5678;
144
145 /* std r5, 0(r3) */
146 stepped = emulate_step(&regs, TEST_STD(5, 3, 0));
147 if (stepped == 1 || regs.gpr[5] == a)
148 show_result("std", "PASS");
149 else
150 show_result("std", "FAIL");
151}
152
153static void __init test_ldarx_stdcx(void)
154{
155 struct pt_regs regs;
156 unsigned long a = 0x1234;
157 int stepped = -1;
158 unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */
159
160 init_pt_regs(&regs);
161 asm volatile("mfcr %0" : "=r"(regs.ccr));
162
163
164 /*** ldarx ***/
165
166 regs.gpr[3] = (unsigned long) &a;
167 regs.gpr[4] = 0;
168 regs.gpr[5] = 0x5678;
169
170 /* ldarx r5, r3, r4, 0 */
171 stepped = emulate_step(&regs, TEST_LDARX(5, 3, 4, 0));
172
173 /*
174 * Don't touch 'a' here. Touching 'a' can do Load/store
175 * of 'a' which result in failure of subsequent stdcx.
176 * Instead, use hardcoded value for comparison.
177 */
178 if (stepped <= 0 || regs.gpr[5] != 0x1234) {
179 show_result("ldarx / stdcx.", "FAIL (ldarx)");
180 return;
181 }
182
183
184 /*** stdcx. ***/
185
186 regs.gpr[5] = 0x9ABC;
187
188 /* stdcx. r5, r3, r4 */
189 stepped = emulate_step(&regs, TEST_STDCX(5, 3, 4));
190
191 /*
192 * Two possible scenarios that indicates successful emulation
193 * of stdcx. :
194 * 1. Reservation is active and store is performed. In this
195 * case cr0.eq bit will be set to 1.
196 * 2. Reservation is not active and store is not performed.
197 * In this case cr0.eq bit will be set to 0.
198 */
199 if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq))
200 || (regs.gpr[5] != a && !(regs.ccr & cr0_eq))))
201 show_result("ldarx / stdcx.", "PASS");
202 else
203 show_result("ldarx / stdcx.", "FAIL (stdcx.)");
204}
205
206#ifdef CONFIG_PPC_FPU
207static void __init test_lfsx_stfsx(void)
208{
209 struct pt_regs regs;
210 union {
211 float a;
212 int b;
213 } c;
214 int cached_b;
215 int stepped = -1;
216
217 init_pt_regs(&regs);
218
219
220 /*** lfsx ***/
221
222 c.a = 123.45;
223 cached_b = c.b;
224
225 regs.gpr[3] = (unsigned long) &c.a;
226 regs.gpr[4] = 0;
227
228 /* lfsx frt10, r3, r4 */
229 stepped = emulate_step(&regs, TEST_LFSX(10, 3, 4));
230
231 if (stepped == 1)
232 show_result("lfsx", "PASS");
233 else
234 show_result("lfsx", "FAIL");
235
236
237 /*** stfsx ***/
238
239 c.a = 678.91;
240
241 /* stfsx frs10, r3, r4 */
242 stepped = emulate_step(&regs, TEST_STFSX(10, 3, 4));
243
244 if (stepped == 1 && c.b == cached_b)
245 show_result("stfsx", "PASS");
246 else
247 show_result("stfsx", "FAIL");
248}
249
250static void __init test_lfdx_stfdx(void)
251{
252 struct pt_regs regs;
253 union {
254 double a;
255 long b;
256 } c;
257 long cached_b;
258 int stepped = -1;
259
260 init_pt_regs(&regs);
261
262
263 /*** lfdx ***/
264
265 c.a = 123456.78;
266 cached_b = c.b;
267
268 regs.gpr[3] = (unsigned long) &c.a;
269 regs.gpr[4] = 0;
270
271 /* lfdx frt10, r3, r4 */
272 stepped = emulate_step(&regs, TEST_LFDX(10, 3, 4));
273
274 if (stepped == 1)
275 show_result("lfdx", "PASS");
276 else
277 show_result("lfdx", "FAIL");
278
279
280 /*** stfdx ***/
281
282 c.a = 987654.32;
283
284 /* stfdx frs10, r3, r4 */
285 stepped = emulate_step(&regs, TEST_STFDX(10, 3, 4));
286
287 if (stepped == 1 && c.b == cached_b)
288 show_result("stfdx", "PASS");
289 else
290 show_result("stfdx", "FAIL");
291}
292#else
293static void __init test_lfsx_stfsx(void)
294{
295 show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)");
296 show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)");
297}
298
299static void __init test_lfdx_stfdx(void)
300{
301 show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)");
302 show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)");
303}
304#endif /* CONFIG_PPC_FPU */
305
306#ifdef CONFIG_ALTIVEC
307static void __init test_lvx_stvx(void)
308{
309 struct pt_regs regs;
310 union {
311 vector128 a;
312 u32 b[4];
313 } c;
314 u32 cached_b[4];
315 int stepped = -1;
316
317 init_pt_regs(&regs);
318
319
320 /*** lvx ***/
321
322 cached_b[0] = c.b[0] = 923745;
323 cached_b[1] = c.b[1] = 2139478;
324 cached_b[2] = c.b[2] = 9012;
325 cached_b[3] = c.b[3] = 982134;
326
327 regs.gpr[3] = (unsigned long) &c.a;
328 regs.gpr[4] = 0;
329
330 /* lvx vrt10, r3, r4 */
331 stepped = emulate_step(&regs, TEST_LVX(10, 3, 4));
332
333 if (stepped == 1)
334 show_result("lvx", "PASS");
335 else
336 show_result("lvx", "FAIL");
337
338
339 /*** stvx ***/
340
341 c.b[0] = 4987513;
342 c.b[1] = 84313948;
343 c.b[2] = 71;
344 c.b[3] = 498532;
345
346 /* stvx vrs10, r3, r4 */
347 stepped = emulate_step(&regs, TEST_STVX(10, 3, 4));
348
349 if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
350 cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
351 show_result("stvx", "PASS");
352 else
353 show_result("stvx", "FAIL");
354}
355#else
356static void __init test_lvx_stvx(void)
357{
358 show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)");
359 show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)");
360}
361#endif /* CONFIG_ALTIVEC */
362
363#ifdef CONFIG_VSX
364static void __init test_lxvd2x_stxvd2x(void)
365{
366 struct pt_regs regs;
367 union {
368 vector128 a;
369 u32 b[4];
370 } c;
371 u32 cached_b[4];
372 int stepped = -1;
373
374 init_pt_regs(&regs);
375
376
377 /*** lxvd2x ***/
378
379 cached_b[0] = c.b[0] = 18233;
380 cached_b[1] = c.b[1] = 34863571;
381 cached_b[2] = c.b[2] = 834;
382 cached_b[3] = c.b[3] = 6138911;
383
384 regs.gpr[3] = (unsigned long) &c.a;
385 regs.gpr[4] = 0;
386
387 /* lxvd2x vsr39, r3, r4 */
388 stepped = emulate_step(&regs, TEST_LXVD2X(39, 3, 4));
389
390 if (stepped == 1)
391 show_result("lxvd2x", "PASS");
392 else
393 show_result("lxvd2x", "FAIL");
394
395
396 /*** stxvd2x ***/
397
398 c.b[0] = 21379463;
399 c.b[1] = 87;
400 c.b[2] = 374234;
401 c.b[3] = 4;
402
403 /* stxvd2x vsr39, r3, r4 */
404 stepped = emulate_step(&regs, TEST_STXVD2X(39, 3, 4));
405
406 if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
407 cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
408 show_result("stxvd2x", "PASS");
409 else
410 show_result("stxvd2x", "FAIL");
411}
412#else
413static void __init test_lxvd2x_stxvd2x(void)
414{
415 show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)");
416 show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)");
417}
418#endif /* CONFIG_VSX */
419
420static int __init test_emulate_step(void)
421{
422 test_ld();
423 test_lwz();
424 test_lwzx();
425 test_std();
426 test_ldarx_stdcx();
427 test_lfsx_stfsx();
428 test_lfdx_stfdx();
429 test_lvx_stvx();
430 test_lxvd2x_stxvd2x();
431
432 return 0;
433}
434late_initcall(test_emulate_step);
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6aa3b76aa0d6..9be992083d2a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -356,18 +356,42 @@ static void early_check_vec5(void)
356 unsigned long root, chosen; 356 unsigned long root, chosen;
357 int size; 357 int size;
358 const u8 *vec5; 358 const u8 *vec5;
359 u8 mmu_supported;
359 360
360 root = of_get_flat_dt_root(); 361 root = of_get_flat_dt_root();
361 chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); 362 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
362 if (chosen == -FDT_ERR_NOTFOUND) 363 if (chosen == -FDT_ERR_NOTFOUND) {
364 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
363 return; 365 return;
366 }
364 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); 367 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
365 if (!vec5) 368 if (!vec5) {
369 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
366 return; 370 return;
367 if (size <= OV5_INDX(OV5_MMU_RADIX_300) || 371 }
368 !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300))) 372 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
369 /* Hypervisor doesn't support radix */
370 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; 373 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
374 return;
375 }
376
377 /* Check for supported configuration */
378 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
379 OV5_FEAT(OV5_MMU_SUPPORT);
380 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
381 /* Hypervisor only supports radix - check enabled && GTSE */
382 if (!early_radix_enabled()) {
383 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
384 }
385 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
386 OV5_FEAT(OV5_RADIX_GTSE))) {
387 pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
388 }
389 /* Do radix anyway - the hypervisor said we had to */
390 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
391 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
392 /* Hypervisor only supports hash - disable radix */
393 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
394 }
371} 395}
372 396
373void __init mmu_early_init_devtree(void) 397void __init mmu_early_init_devtree(void)
@@ -383,7 +407,7 @@ void __init mmu_early_init_devtree(void)
383 * even though the ibm,architecture-vec-5 property created by 407 * even though the ibm,architecture-vec-5 property created by
384 * skiboot doesn't have the necessary bits set. 408 * skiboot doesn't have the necessary bits set.
385 */ 409 */
386 if (early_radix_enabled() && !(mfmsr() & MSR_HV)) 410 if (!(mfmsr() & MSR_HV))
387 early_check_vec5(); 411 early_check_vec5();
388 412
389 if (early_radix_enabled()) 413 if (early_radix_enabled())
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index 2a590a98e652..c28165d8970b 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void)
186 */ 186 */
187 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); 187 register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
188 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); 188 pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
189 asm volatile("ptesync" : : : "memory");
190 asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
191 "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
192 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
189} 193}
190 194
191static void __init radix_init_partition_table(void) 195static void __init radix_init_partition_table(void)
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 6693f75e93d1..da8a0f7a035c 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -39,8 +39,8 @@ opal_tracepoint_refcount:
39BEGIN_FTR_SECTION; \ 39BEGIN_FTR_SECTION; \
40 b 1f; \ 40 b 1f; \
41END_FTR_SECTION(0, 1); \ 41END_FTR_SECTION(0, 1); \
42 ld r12,opal_tracepoint_refcount@toc(r2); \ 42 ld r11,opal_tracepoint_refcount@toc(r2); \
43 cmpdi r12,0; \ 43 cmpdi r11,0; \
44 bne- LABEL; \ 44 bne- LABEL; \
451: 451:
46 46
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index f9670eabfcfa..b53f80f0b4d8 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void)
91 91
92static void icp_opal_set_cpu_priority(unsigned char cppr) 92static void icp_opal_set_cpu_priority(unsigned char cppr)
93{ 93{
94 /*
95 * Here be dragons. The caller has asked to allow only IPI's and not
96 * external interrupts. But OPAL XIVE doesn't support that. So instead
97 * of allowing no interrupts allow all. That's still not right, but
98 * currently the only caller who does this is xics_migrate_irqs_away()
99 * and it works in that case.
100 */
101 if (cppr >= DEFAULT_PRIORITY)
102 cppr = LOWEST_PRIORITY;
103
94 xics_set_base_cppr(cppr); 104 xics_set_base_cppr(cppr);
95 opal_int_set_cppr(cppr); 105 opal_int_set_cppr(cppr);
96 iosync(); 106 iosync();
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 69d858e51ac7..23efe4e42172 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -20,6 +20,7 @@
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/delay.h>
23 24
24#include <asm/prom.h> 25#include <asm/prom.h>
25#include <asm/io.h> 26#include <asm/io.h>
@@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void)
198 /* Remove ourselves from the global interrupt queue */ 199 /* Remove ourselves from the global interrupt queue */
199 xics_set_cpu_giq(xics_default_distrib_server, 0); 200 xics_set_cpu_giq(xics_default_distrib_server, 0);
200 201
201 /* Allow IPIs again... */
202 icp_ops->set_priority(DEFAULT_PRIORITY);
203
204 for_each_irq_desc(virq, desc) { 202 for_each_irq_desc(virq, desc) {
205 struct irq_chip *chip; 203 struct irq_chip *chip;
206 long server; 204 long server;
@@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void)
255unlock: 253unlock:
256 raw_spin_unlock_irqrestore(&desc->lock, flags); 254 raw_spin_unlock_irqrestore(&desc->lock, flags);
257 } 255 }
256
257 /* Allow "sufficient" time to drop any inflight IRQ's */
258 mdelay(5);
259
260 /*
261 * Allow IPIs again. This is done at the very end, after migrating all
262 * interrupts, the expectation is that we'll only get woken up by an IPI
263 * interrupt beyond this point, but leave externals masked just to be
264 * safe. If we're using icp-opal this may actually allow all
265 * interrupts anyway, but that should be OK.
266 */
267 icp_ops->set_priority(DEFAULT_PRIORITY);
268
258} 269}
259#endif /* CONFIG_HOTPLUG_CPU */ 270#endif /* CONFIG_HOTPLUG_CPU */
260 271
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index 248a820048df..66d31de60b9a 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name)
114 114
115 rc = run_test(test_function, name); 115 rc = run_test(test_function, name);
116 116
117 if (rc == MAGIC_SKIP_RETURN_VALUE) 117 if (rc == MAGIC_SKIP_RETURN_VALUE) {
118 test_skip(name); 118 test_skip(name);
119 else 119 /* so that skipped test is not marked as failed */
120 rc = 0;
121 } else
120 test_finish(name, rc); 122 test_finish(name, rc);
121 123
122 return rc; 124 return rc;