diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-23 05:49:51 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-23 05:49:51 -0400 |
commit | a2c01ed5d46f0686c52272e09f7d2f5be9f573fd (patch) | |
tree | 773a9e89b041cfa4ee8018af6d490c77bc6f8a7a | |
parent | d0ddf980d6efdcee6c7a85eb0f27baa6b60eeff6 (diff) | |
parent | 8832317f662c06f5c06e638f57bfe89a71c9b266 (diff) |
Merge tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc fixes from Michael Ellerman:
- Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on
POWER8" from Paul
- Handle irq_happened flag correctly in off-line loop from Paul
- Validate rtas.entry before calling enter_rtas() from Vasant
* tag 'powerpc-4.3-5' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/rtas: Validate rtas.entry before calling enter_rtas()
powerpc/powernv: Handle irq_happened flag correctly in off-line loop
powerpc: Revert "Use the POWER8 Micro Partition Prefetch Engine in KVM HV on POWER8"
-rw-r--r-- | arch/powerpc/include/asm/cache.h | 7 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/ppc-opcode.h | 17 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg.h | 1 | ||||
-rw-r--r-- | arch/powerpc/kernel/rtas.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 55 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/smp.c | 29 |
7 files changed, 28 insertions, 86 deletions
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index 0dc42c5082b7..5f8229e24fe6 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #include <asm/reg.h> | ||
7 | 6 | ||
8 | /* bytes per L1 cache line */ | 7 | /* bytes per L1 cache line */ |
9 | #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) | 8 | #if defined(CONFIG_8xx) || defined(CONFIG_403GCX) |
@@ -40,12 +39,6 @@ struct ppc64_caches { | |||
40 | }; | 39 | }; |
41 | 40 | ||
42 | extern struct ppc64_caches ppc64_caches; | 41 | extern struct ppc64_caches ppc64_caches; |
43 | |||
44 | static inline void logmpp(u64 x) | ||
45 | { | ||
46 | asm volatile(PPC_LOGMPP(R1) : : "r" (x)); | ||
47 | } | ||
48 | |||
49 | #endif /* __powerpc64__ && ! __ASSEMBLY__ */ | 42 | #endif /* __powerpc64__ && ! __ASSEMBLY__ */ |
50 | 43 | ||
51 | #if defined(__ASSEMBLY__) | 44 | #if defined(__ASSEMBLY__) |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 827a38d7a9db..887c259556df 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -297,8 +297,6 @@ struct kvmppc_vcore { | |||
297 | u32 arch_compat; | 297 | u32 arch_compat; |
298 | ulong pcr; | 298 | ulong pcr; |
299 | ulong dpdes; /* doorbell state (POWER8) */ | 299 | ulong dpdes; /* doorbell state (POWER8) */ |
300 | void *mpp_buffer; /* Micro Partition Prefetch buffer */ | ||
301 | bool mpp_buffer_is_valid; | ||
302 | ulong conferring_threads; | 300 | ulong conferring_threads; |
303 | }; | 301 | }; |
304 | 302 | ||
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 790f5d1d9a46..7ab04fc59e24 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -141,7 +141,6 @@ | |||
141 | #define PPC_INST_ISEL 0x7c00001e | 141 | #define PPC_INST_ISEL 0x7c00001e |
142 | #define PPC_INST_ISEL_MASK 0xfc00003e | 142 | #define PPC_INST_ISEL_MASK 0xfc00003e |
143 | #define PPC_INST_LDARX 0x7c0000a8 | 143 | #define PPC_INST_LDARX 0x7c0000a8 |
144 | #define PPC_INST_LOGMPP 0x7c0007e4 | ||
145 | #define PPC_INST_LSWI 0x7c0004aa | 144 | #define PPC_INST_LSWI 0x7c0004aa |
146 | #define PPC_INST_LSWX 0x7c00042a | 145 | #define PPC_INST_LSWX 0x7c00042a |
147 | #define PPC_INST_LWARX 0x7c000028 | 146 | #define PPC_INST_LWARX 0x7c000028 |
@@ -285,20 +284,6 @@ | |||
285 | #define __PPC_EH(eh) 0 | 284 | #define __PPC_EH(eh) 0 |
286 | #endif | 285 | #endif |
287 | 286 | ||
288 | /* POWER8 Micro Partition Prefetch (MPP) parameters */ | ||
289 | /* Address mask is common for LOGMPP instruction and MPPR SPR */ | ||
290 | #define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL | ||
291 | |||
292 | /* Bits 60 and 61 of MPP SPR should be set to one of the following */ | ||
293 | /* Aborting the fetch is indeed setting 00 in the table size bits */ | ||
294 | #define PPC_MPPR_FETCH_ABORT (0x0ULL << 60) | ||
295 | #define PPC_MPPR_FETCH_WHOLE_TABLE (0x2ULL << 60) | ||
296 | |||
297 | /* Bits 54 and 55 of register for LOGMPP instruction should be set to: */ | ||
298 | #define PPC_LOGMPP_LOG_L2 (0x02ULL << 54) | ||
299 | #define PPC_LOGMPP_LOG_L2L3 (0x01ULL << 54) | ||
300 | #define PPC_LOGMPP_LOG_ABORT (0x03ULL << 54) | ||
301 | |||
302 | /* Deal with instructions that older assemblers aren't aware of */ | 287 | /* Deal with instructions that older assemblers aren't aware of */ |
303 | #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ | 288 | #define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \ |
304 | __PPC_RA(a) | __PPC_RB(b)) | 289 | __PPC_RA(a) | __PPC_RB(b)) |
@@ -307,8 +292,6 @@ | |||
307 | #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ | 292 | #define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \ |
308 | ___PPC_RT(t) | ___PPC_RA(a) | \ | 293 | ___PPC_RT(t) | ___PPC_RA(a) | \ |
309 | ___PPC_RB(b) | __PPC_EH(eh)) | 294 | ___PPC_RB(b) | __PPC_EH(eh)) |
310 | #define PPC_LOGMPP(b) stringify_in_c(.long PPC_INST_LOGMPP | \ | ||
311 | __PPC_RB(b)) | ||
312 | #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ | 295 | #define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \ |
313 | ___PPC_RT(t) | ___PPC_RA(a) | \ | 296 | ___PPC_RT(t) | ___PPC_RA(a) | \ |
314 | ___PPC_RB(b) | __PPC_EH(eh)) | 297 | ___PPC_RB(b) | __PPC_EH(eh)) |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index aa1cc5f015ee..a908ada8e0a5 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -226,7 +226,6 @@ | |||
226 | #define CTRL_TE 0x00c00000 /* thread enable */ | 226 | #define CTRL_TE 0x00c00000 /* thread enable */ |
227 | #define CTRL_RUNLATCH 0x1 | 227 | #define CTRL_RUNLATCH 0x1 |
228 | #define SPRN_DAWR 0xB4 | 228 | #define SPRN_DAWR 0xB4 |
229 | #define SPRN_MPPR 0xB8 /* Micro Partition Prefetch Register */ | ||
230 | #define SPRN_RPR 0xBA /* Relative Priority Register */ | 229 | #define SPRN_RPR 0xBA /* Relative Priority Register */ |
231 | #define SPRN_CIABR 0xBB | 230 | #define SPRN_CIABR 0xBB |
232 | #define CIABR_PRIV 0x3 | 231 | #define CIABR_PRIV 0x3 |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 84bf934cf748..5a753fae8265 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -1043,6 +1043,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs) | |||
1043 | if (!capable(CAP_SYS_ADMIN)) | 1043 | if (!capable(CAP_SYS_ADMIN)) |
1044 | return -EPERM; | 1044 | return -EPERM; |
1045 | 1045 | ||
1046 | if (!rtas.entry) | ||
1047 | return -EINVAL; | ||
1048 | |||
1046 | if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) | 1049 | if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0) |
1047 | return -EFAULT; | 1050 | return -EFAULT; |
1048 | 1051 | ||
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 228049786888..9c26c5a96ea2 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -36,7 +36,6 @@ | |||
36 | 36 | ||
37 | #include <asm/reg.h> | 37 | #include <asm/reg.h> |
38 | #include <asm/cputable.h> | 38 | #include <asm/cputable.h> |
39 | #include <asm/cache.h> | ||
40 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
41 | #include <asm/tlbflush.h> | 40 | #include <asm/tlbflush.h> |
42 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
@@ -75,12 +74,6 @@ | |||
75 | 74 | ||
76 | static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); | 75 | static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); |
77 | 76 | ||
78 | #if defined(CONFIG_PPC_64K_PAGES) | ||
79 | #define MPP_BUFFER_ORDER 0 | ||
80 | #elif defined(CONFIG_PPC_4K_PAGES) | ||
81 | #define MPP_BUFFER_ORDER 3 | ||
82 | #endif | ||
83 | |||
84 | static int dynamic_mt_modes = 6; | 77 | static int dynamic_mt_modes = 6; |
85 | module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); | 78 | module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); |
86 | MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); | 79 | MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); |
@@ -1455,13 +1448,6 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) | |||
1455 | vcore->kvm = kvm; | 1448 | vcore->kvm = kvm; |
1456 | INIT_LIST_HEAD(&vcore->preempt_list); | 1449 | INIT_LIST_HEAD(&vcore->preempt_list); |
1457 | 1450 | ||
1458 | vcore->mpp_buffer_is_valid = false; | ||
1459 | |||
1460 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
1461 | vcore->mpp_buffer = (void *)__get_free_pages( | ||
1462 | GFP_KERNEL|__GFP_ZERO, | ||
1463 | MPP_BUFFER_ORDER); | ||
1464 | |||
1465 | return vcore; | 1451 | return vcore; |
1466 | } | 1452 | } |
1467 | 1453 | ||
@@ -1894,33 +1880,6 @@ static int on_primary_thread(void) | |||
1894 | return 1; | 1880 | return 1; |
1895 | } | 1881 | } |
1896 | 1882 | ||
1897 | static void kvmppc_start_saving_l2_cache(struct kvmppc_vcore *vc) | ||
1898 | { | ||
1899 | phys_addr_t phy_addr, mpp_addr; | ||
1900 | |||
1901 | phy_addr = (phys_addr_t)virt_to_phys(vc->mpp_buffer); | ||
1902 | mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; | ||
1903 | |||
1904 | mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_ABORT); | ||
1905 | logmpp(mpp_addr | PPC_LOGMPP_LOG_L2); | ||
1906 | |||
1907 | vc->mpp_buffer_is_valid = true; | ||
1908 | } | ||
1909 | |||
1910 | static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc) | ||
1911 | { | ||
1912 | phys_addr_t phy_addr, mpp_addr; | ||
1913 | |||
1914 | phy_addr = virt_to_phys(vc->mpp_buffer); | ||
1915 | mpp_addr = phy_addr & PPC_MPPE_ADDRESS_MASK; | ||
1916 | |||
1917 | /* We must abort any in-progress save operations to ensure | ||
1918 | * the table is valid so that prefetch engine knows when to | ||
1919 | * stop prefetching. */ | ||
1920 | logmpp(mpp_addr | PPC_LOGMPP_LOG_ABORT); | ||
1921 | mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE); | ||
1922 | } | ||
1923 | |||
1924 | /* | 1883 | /* |
1925 | * A list of virtual cores for each physical CPU. | 1884 | * A list of virtual cores for each physical CPU. |
1926 | * These are vcores that could run but their runner VCPU tasks are | 1885 | * These are vcores that could run but their runner VCPU tasks are |
@@ -2471,14 +2430,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) | |||
2471 | 2430 | ||
2472 | srcu_idx = srcu_read_lock(&vc->kvm->srcu); | 2431 | srcu_idx = srcu_read_lock(&vc->kvm->srcu); |
2473 | 2432 | ||
2474 | if (vc->mpp_buffer_is_valid) | ||
2475 | kvmppc_start_restoring_l2_cache(vc); | ||
2476 | |||
2477 | __kvmppc_vcore_entry(); | 2433 | __kvmppc_vcore_entry(); |
2478 | 2434 | ||
2479 | if (vc->mpp_buffer) | ||
2480 | kvmppc_start_saving_l2_cache(vc); | ||
2481 | |||
2482 | srcu_read_unlock(&vc->kvm->srcu, srcu_idx); | 2435 | srcu_read_unlock(&vc->kvm->srcu, srcu_idx); |
2483 | 2436 | ||
2484 | spin_lock(&vc->lock); | 2437 | spin_lock(&vc->lock); |
@@ -3073,14 +3026,8 @@ static void kvmppc_free_vcores(struct kvm *kvm) | |||
3073 | { | 3026 | { |
3074 | long int i; | 3027 | long int i; |
3075 | 3028 | ||
3076 | for (i = 0; i < KVM_MAX_VCORES; ++i) { | 3029 | for (i = 0; i < KVM_MAX_VCORES; ++i) |
3077 | if (kvm->arch.vcores[i] && kvm->arch.vcores[i]->mpp_buffer) { | ||
3078 | struct kvmppc_vcore *vc = kvm->arch.vcores[i]; | ||
3079 | free_pages((unsigned long)vc->mpp_buffer, | ||
3080 | MPP_BUFFER_ORDER); | ||
3081 | } | ||
3082 | kfree(kvm->arch.vcores[i]); | 3030 | kfree(kvm->arch.vcores[i]); |
3083 | } | ||
3084 | kvm->arch.online_vcores = 0; | 3031 | kvm->arch.online_vcores = 0; |
3085 | } | 3032 | } |
3086 | 3033 | ||
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 8f70ba681a78..ca264833ee64 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
@@ -171,7 +171,26 @@ static void pnv_smp_cpu_kill_self(void) | |||
171 | * so clear LPCR:PECE1. We keep PECE2 enabled. | 171 | * so clear LPCR:PECE1. We keep PECE2 enabled. |
172 | */ | 172 | */ |
173 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); | 173 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); |
174 | |||
175 | /* | ||
176 | * Hard-disable interrupts, and then clear irq_happened flags | ||
177 | * that we can safely ignore while off-line, since they | ||
178 | * are for things for which we do no processing when off-line | ||
179 | * (or in the case of HMI, all the processing we need to do | ||
180 | * is done in lower-level real-mode code). | ||
181 | */ | ||
182 | hard_irq_disable(); | ||
183 | local_paca->irq_happened &= ~(PACA_IRQ_DEC | PACA_IRQ_HMI); | ||
184 | |||
174 | while (!generic_check_cpu_restart(cpu)) { | 185 | while (!generic_check_cpu_restart(cpu)) { |
186 | /* | ||
187 | * Clear IPI flag, since we don't handle IPIs while | ||
188 | * offline, except for those when changing micro-threading | ||
189 | * mode, which are handled explicitly below, and those | ||
190 | * for coming online, which are handled via | ||
191 | * generic_check_cpu_restart() calls. | ||
192 | */ | ||
193 | kvmppc_set_host_ipi(cpu, 0); | ||
175 | 194 | ||
176 | ppc64_runlatch_off(); | 195 | ppc64_runlatch_off(); |
177 | 196 | ||
@@ -196,20 +215,20 @@ static void pnv_smp_cpu_kill_self(void) | |||
196 | * having finished executing in a KVM guest, then srr1 | 215 | * having finished executing in a KVM guest, then srr1 |
197 | * contains 0. | 216 | * contains 0. |
198 | */ | 217 | */ |
199 | if ((srr1 & wmask) == SRR1_WAKEEE) { | 218 | if (((srr1 & wmask) == SRR1_WAKEEE) || |
219 | (local_paca->irq_happened & PACA_IRQ_EE)) { | ||
200 | icp_native_flush_interrupt(); | 220 | icp_native_flush_interrupt(); |
201 | local_paca->irq_happened &= PACA_IRQ_HARD_DIS; | ||
202 | smp_mb(); | ||
203 | } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { | 221 | } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) { |
204 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); | 222 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); |
205 | asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); | 223 | asm volatile(PPC_MSGCLR(%0) : : "r" (msg)); |
206 | kvmppc_set_host_ipi(cpu, 0); | ||
207 | } | 224 | } |
225 | local_paca->irq_happened &= ~(PACA_IRQ_EE | PACA_IRQ_DBELL); | ||
226 | smp_mb(); | ||
208 | 227 | ||
209 | if (cpu_core_split_required()) | 228 | if (cpu_core_split_required()) |
210 | continue; | 229 | continue; |
211 | 230 | ||
212 | if (!generic_check_cpu_restart(cpu)) | 231 | if (srr1 && !generic_check_cpu_restart(cpu)) |
213 | DBG("CPU%d Unexpected exit while offline !\n", cpu); | 232 | DBG("CPU%d Unexpected exit while offline !\n", cpu); |
214 | } | 233 | } |
215 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); | 234 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); |