aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-06-04 02:32:12 -0400
committerDavid S. Miller <davem@davemloft.net>2014-06-04 02:32:12 -0400
commitc99f7abf0e69987e4add567e155e042cb1f2a20b (patch)
treed23898dc30ed25c1dae9bb6325041027d412397a /arch/powerpc
parent92ff71b8fe9cd9c673615fc6f3870af7376d7c84 (diff)
parentd8b0426af5b67973585712c9af36b86f6ea97815 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: include/net/inetpeer.h net/ipv6/output_core.c Changes in net were fixing bugs in code removed in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h7
-rw-r--r--arch/powerpc/include/asm/sections.h11
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kvm/book3s.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S104
-rw-r--r--arch/powerpc/kvm/book3s_pr.c6
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
13 files changed, 140 insertions, 12 deletions
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 4c0cedf4e2c7..ce4c68a4a823 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -150,7 +150,9 @@ endif
150 150
151CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) 151CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
152 152
153KBUILD_CPPFLAGS += -Iarch/$(ARCH) 153asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
154
155KBUILD_CPPFLAGS += -Iarch/$(ARCH) $(asinstr)
154KBUILD_AFLAGS += -Iarch/$(ARCH) 156KBUILD_AFLAGS += -Iarch/$(ARCH)
155KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y) 157KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
156CPP = $(CC) -E $(KBUILD_CFLAGS) 158CPP = $(CC) -E $(KBUILD_CFLAGS)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 6586a40a46ce..cded7c1278ef 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -318,11 +318,16 @@ n:
318 addi reg,reg,(name - 0b)@l; 318 addi reg,reg,(name - 0b)@l;
319 319
320#ifdef __powerpc64__ 320#ifdef __powerpc64__
321#ifdef HAVE_AS_ATHIGH
322#define __AS_ATHIGH high
323#else
324#define __AS_ATHIGH h
325#endif
321#define LOAD_REG_IMMEDIATE(reg,expr) \ 326#define LOAD_REG_IMMEDIATE(reg,expr) \
322 lis reg,(expr)@highest; \ 327 lis reg,(expr)@highest; \
323 ori reg,reg,(expr)@higher; \ 328 ori reg,reg,(expr)@higher; \
324 rldicr reg,reg,32,31; \ 329 rldicr reg,reg,32,31; \
325 oris reg,reg,(expr)@h; \ 330 oris reg,reg,(expr)@__AS_ATHIGH; \
326 ori reg,reg,(expr)@l; 331 ori reg,reg,(expr)@l;
327 332
328#define LOAD_REG_ADDR(reg,name) \ 333#define LOAD_REG_ADDR(reg,name) \
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index d0e784e0ff48..521790330672 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -39,6 +39,17 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
39 (unsigned long)_stext < end; 39 (unsigned long)_stext < end;
40} 40}
41 41
42static inline int overlaps_kvm_tmp(unsigned long start, unsigned long end)
43{
44#ifdef CONFIG_KVM_GUEST
45 extern char kvm_tmp[];
46 return start < (unsigned long)kvm_tmp &&
47 (unsigned long)&kvm_tmp[1024 * 1024] < end;
48#else
49 return 0;
50#endif
51}
52
42#undef dereference_function_descriptor 53#undef dereference_function_descriptor
43static inline void *dereference_function_descriptor(void *ptr) 54static inline void *dereference_function_descriptor(void *ptr)
44{ 55{
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 3ddf70276706..ea4dc3a89c1f 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -361,3 +361,4 @@ SYSCALL(finit_module)
361SYSCALL(ni_syscall) /* sys_kcmp */ 361SYSCALL(ni_syscall) /* sys_kcmp */
362SYSCALL_SPU(sched_setattr) 362SYSCALL_SPU(sched_setattr)
363SYSCALL_SPU(sched_getattr) 363SYSCALL_SPU(sched_getattr)
364SYSCALL_SPU(renameat2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4494f029b632..9b892bbd9d84 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 357 15#define __NR_syscalls 358
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 881bf2e2560d..2d526f7b48da 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -379,5 +379,6 @@
379#define __NR_kcmp 354 379#define __NR_kcmp 354
380#define __NR_sched_setattr 355 380#define __NR_sched_setattr 355
381#define __NR_sched_getattr 356 381#define __NR_sched_getattr 356
382#define __NR_renameat2 357
382 383
383#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 384#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 6a0175297b0d..dd8695f6cb6d 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -74,7 +74,7 @@
74#define KVM_INST_MTSRIN 0x7c0001e4 74#define KVM_INST_MTSRIN 0x7c0001e4
75 75
76static bool kvm_patching_worked = true; 76static bool kvm_patching_worked = true;
77static char kvm_tmp[1024 * 1024]; 77char kvm_tmp[1024 * 1024];
78static int kvm_tmp_index; 78static int kvm_tmp_index;
79 79
80static inline void kvm_patch_ins(u32 *inst, u32 new_inst) 80static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 59d229a2a3e0..879b3aacac32 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -237,7 +237,7 @@ static void wake_offline_cpus(void)
237 if (!cpu_online(cpu)) { 237 if (!cpu_online(cpu)) {
238 printk(KERN_INFO "kexec: Waking offline cpu %d.\n", 238 printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
239 cpu); 239 cpu);
240 cpu_up(cpu); 240 WARN_ON(cpu_up(cpu));
241 } 241 }
242 } 242 }
243} 243}
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 94e597e6f15c..7af190a266b3 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -886,7 +886,7 @@ static int kvmppc_book3s_init(void)
886 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 886 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
887 if (r) 887 if (r)
888 return r; 888 return r;
889#ifdef CONFIG_KVM_BOOK3S_32 889#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
890 r = kvmppc_book3s_init_pr(); 890 r = kvmppc_book3s_init_pr();
891#endif 891#endif
892 return r; 892 return r;
@@ -895,7 +895,7 @@ static int kvmppc_book3s_init(void)
895 895
896static void kvmppc_book3s_exit(void) 896static void kvmppc_book3s_exit(void)
897{ 897{
898#ifdef CONFIG_KVM_BOOK3S_32 898#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
899 kvmppc_book3s_exit_pr(); 899 kvmppc_book3s_exit_pr();
900#endif 900#endif
901 kvm_exit(); 901 kvm_exit();
@@ -905,7 +905,7 @@ module_init(kvmppc_book3s_init);
905module_exit(kvmppc_book3s_exit); 905module_exit(kvmppc_book3s_exit);
906 906
907/* On 32bit this is our one and only kernel module */ 907/* On 32bit this is our one and only kernel module */
908#ifdef CONFIG_KVM_BOOK3S_32 908#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
909MODULE_ALIAS_MISCDEV(KVM_MINOR); 909MODULE_ALIAS_MISCDEV(KVM_MINOR);
910MODULE_ALIAS("devname:kvm"); 910MODULE_ALIAS("devname:kvm");
911#endif 911#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 1d6c56ad5b60..8fcc36306a02 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -234,7 +234,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
234 pte_size = psize; 234 pte_size = psize;
235 pte = lookup_linux_pte_and_update(pgdir, hva, writing, 235 pte = lookup_linux_pte_and_update(pgdir, hva, writing,
236 &pte_size); 236 &pte_size);
237 if (pte_present(pte)) { 237 if (pte_present(pte) && !pte_numa(pte)) {
238 if (writing && !pte_write(pte)) 238 if (writing && !pte_write(pte))
239 /* make the actual HPTE be read-only */ 239 /* make the actual HPTE be read-only */
240 ptel = hpte_make_readonly(ptel); 240 ptel = hpte_make_readonly(ptel);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b031f932c0cc..07c8b5b0f9d2 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1323,6 +1323,110 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1323 mr r3, r9 1323 mr r3, r9
1324 bl kvmppc_save_fp 1324 bl kvmppc_save_fp
1325 1325
1326#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1327BEGIN_FTR_SECTION
1328 b 2f
1329END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1330 /* Turn on TM. */
1331 mfmsr r8
1332 li r0, 1
1333 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1334 mtmsrd r8
1335
1336 ld r5, VCPU_MSR(r9)
1337 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1338 beq 1f /* TM not active in guest. */
1339
1340 li r3, TM_CAUSE_KVM_RESCHED
1341
1342 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1343 li r5, 0
1344 mtmsrd r5, 1
1345
1346 /* All GPRs are volatile at this point. */
1347 TRECLAIM(R3)
1348
1349 /* Temporarily store r13 and r9 so we have some regs to play with */
1350 SET_SCRATCH0(r13)
1351 GET_PACA(r13)
1352 std r9, PACATMSCRATCH(r13)
1353 ld r9, HSTATE_KVM_VCPU(r13)
1354
1355 /* Get a few more GPRs free. */
1356 std r29, VCPU_GPRS_TM(29)(r9)
1357 std r30, VCPU_GPRS_TM(30)(r9)
1358 std r31, VCPU_GPRS_TM(31)(r9)
1359
1360 /* Save away PPR and DSCR soon so don't run with user values. */
1361 mfspr r31, SPRN_PPR
1362 HMT_MEDIUM
1363 mfspr r30, SPRN_DSCR
1364 ld r29, HSTATE_DSCR(r13)
1365 mtspr SPRN_DSCR, r29
1366
1367 /* Save all but r9, r13 & r29-r31 */
1368 reg = 0
1369 .rept 29
1370 .if (reg != 9) && (reg != 13)
1371 std reg, VCPU_GPRS_TM(reg)(r9)
1372 .endif
1373 reg = reg + 1
1374 .endr
1375 /* ... now save r13 */
1376 GET_SCRATCH0(r4)
1377 std r4, VCPU_GPRS_TM(13)(r9)
1378 /* ... and save r9 */
1379 ld r4, PACATMSCRATCH(r13)
1380 std r4, VCPU_GPRS_TM(9)(r9)
1381
1382 /* Reload stack pointer and TOC. */
1383 ld r1, HSTATE_HOST_R1(r13)
1384 ld r2, PACATOC(r13)
1385
1386 /* Set MSR RI now we have r1 and r13 back. */
1387 li r5, MSR_RI
1388 mtmsrd r5, 1
1389
1390 /* Save away checkpinted SPRs. */
1391 std r31, VCPU_PPR_TM(r9)
1392 std r30, VCPU_DSCR_TM(r9)
1393 mflr r5
1394 mfcr r6
1395 mfctr r7
1396 mfspr r8, SPRN_AMR
1397 mfspr r10, SPRN_TAR
1398 std r5, VCPU_LR_TM(r9)
1399 stw r6, VCPU_CR_TM(r9)
1400 std r7, VCPU_CTR_TM(r9)
1401 std r8, VCPU_AMR_TM(r9)
1402 std r10, VCPU_TAR_TM(r9)
1403
1404 /* Restore r12 as trap number. */
1405 lwz r12, VCPU_TRAP(r9)
1406
1407 /* Save FP/VSX. */
1408 addi r3, r9, VCPU_FPRS_TM
1409 bl .store_fp_state
1410 addi r3, r9, VCPU_VRS_TM
1411 bl .store_vr_state
1412 mfspr r6, SPRN_VRSAVE
1413 stw r6, VCPU_VRSAVE_TM(r9)
14141:
1415 /*
1416 * We need to save these SPRs after the treclaim so that the software
1417 * error code is recorded correctly in the TEXASR. Also the user may
1418 * change these outside of a transaction, so they must always be
1419 * context switched.
1420 */
1421 mfspr r5, SPRN_TFHAR
1422 mfspr r6, SPRN_TFIAR
1423 mfspr r7, SPRN_TEXASR
1424 std r5, VCPU_TFHAR(r9)
1425 std r6, VCPU_TFIAR(r9)
1426 std r7, VCPU_TEXASR(r9)
14272:
1428#endif
1429
1326 /* Increment yield count if they have a VPA */ 1430 /* Increment yield count if they have a VPA */
1327 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1431 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1328 cmpdi r8, 0 1432 cmpdi r8, 0
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c5c052a9729c..02f1defd8bb9 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1153,7 +1153,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1153 goto free_vcpu; 1153 goto free_vcpu;
1154 vcpu->arch.book3s = vcpu_book3s; 1154 vcpu->arch.book3s = vcpu_book3s;
1155 1155
1156#ifdef CONFIG_KVM_BOOK3S_32 1156#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1157 vcpu->arch.shadow_vcpu = 1157 vcpu->arch.shadow_vcpu =
1158 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); 1158 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1159 if (!vcpu->arch.shadow_vcpu) 1159 if (!vcpu->arch.shadow_vcpu)
@@ -1198,7 +1198,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1198uninit_vcpu: 1198uninit_vcpu:
1199 kvm_vcpu_uninit(vcpu); 1199 kvm_vcpu_uninit(vcpu);
1200free_shadow_vcpu: 1200free_shadow_vcpu:
1201#ifdef CONFIG_KVM_BOOK3S_32 1201#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1202 kfree(vcpu->arch.shadow_vcpu); 1202 kfree(vcpu->arch.shadow_vcpu);
1203free_vcpu3s: 1203free_vcpu3s:
1204#endif 1204#endif
@@ -1215,7 +1215,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
1215 1215
1216 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); 1216 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1217 kvm_vcpu_uninit(vcpu); 1217 kvm_vcpu_uninit(vcpu);
1218#ifdef CONFIG_KVM_BOOK3S_32 1218#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
1219 kfree(vcpu->arch.shadow_vcpu); 1219 kfree(vcpu->arch.shadow_vcpu);
1220#endif 1220#endif
1221 vfree(vcpu_book3s); 1221 vfree(vcpu_book3s);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d766d6ee33fe..06ba83b036d3 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -207,6 +207,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
207 if (overlaps_kernel_text(vaddr, vaddr + step)) 207 if (overlaps_kernel_text(vaddr, vaddr + step))
208 tprot &= ~HPTE_R_N; 208 tprot &= ~HPTE_R_N;
209 209
210 /* Make kvm guest trampolines executable */
211 if (overlaps_kvm_tmp(vaddr, vaddr + step))
212 tprot &= ~HPTE_R_N;
213
210 /* 214 /*
211 * If relocatable, check if it overlaps interrupt vectors that 215 * If relocatable, check if it overlaps interrupt vectors that
212 * are copied down to real 0. For relocatable kernel 216 * are copied down to real 0. For relocatable kernel