aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S4
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c65
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c3
7 files changed, 33 insertions, 56 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 80561074078d..68468d695f12 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1562 goto out; 1562 goto out;
1563 } 1563 }
1564 if (!rma_setup && is_vrma_hpte(v)) { 1564 if (!rma_setup && is_vrma_hpte(v)) {
1565 unsigned long psize = hpte_page_size(v, r); 1565 unsigned long psize = hpte_base_page_size(v, r);
1566 unsigned long senc = slb_pgsize_encoding(psize); 1566 unsigned long senc = slb_pgsize_encoding(psize);
1567 unsigned long lpcr; 1567 unsigned long lpcr;
1568 1568
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 6e6224318c36..5a24d3c2b6b8 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
814 r = hpte[i+1]; 814 r = hpte[i+1];
815 815
816 /* 816 /*
817 * Check the HPTE again, including large page size 817 * Check the HPTE again, including base page size
818 * Since we don't currently allow any MPSS (mixed
819 * page-size segment) page sizes, it is sufficient
820 * to check against the actual page size.
821 */ 818 */
822 if ((v & valid) && (v & mask) == val && 819 if ((v & valid) && (v & mask) == val &&
823 hpte_page_size(v, r) == (1ul << pshift)) 820 hpte_base_page_size(v, r) == (1ul << pshift))
824 /* Return with the HPTE still locked */ 821 /* Return with the HPTE still locked */
825 return (hash << 3) + (i >> 1); 822 return (hash << 3) + (i >> 1);
826 823
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 868347ef09fd..558a67df8126 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -48,7 +48,7 @@
48 * 48 *
49 * LR = return address to continue at after eventually re-enabling MMU 49 * LR = return address to continue at after eventually re-enabling MMU
50 */ 50 */
51_GLOBAL(kvmppc_hv_entry_trampoline) 51_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
52 mflr r0 52 mflr r0
53 std r0, PPC_LR_STKOFF(r1) 53 std r0, PPC_LR_STKOFF(r1)
54 stdu r1, -112(r1) 54 stdu r1, -112(r1)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index e2c29e381dc7..d044b8b7c69d 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,7 +25,11 @@
25#include <asm/exception-64s.h> 25#include <asm/exception-64s.h>
26 26
27#if defined(CONFIG_PPC_BOOK3S_64) 27#if defined(CONFIG_PPC_BOOK3S_64)
28#if defined(_CALL_ELF) && _CALL_ELF == 2
29#define FUNC(name) name
30#else
28#define FUNC(name) GLUE(.,name) 31#define FUNC(name) GLUE(.,name)
32#endif
29#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU 33#define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
30 34
31#elif defined(CONFIG_PPC_BOOK3S_32) 35#elif defined(CONFIG_PPC_BOOK3S_32)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 9eec675220e6..16c4d88ba27d 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -36,7 +36,11 @@
36 36
37#if defined(CONFIG_PPC_BOOK3S_64) 37#if defined(CONFIG_PPC_BOOK3S_64)
38 38
39#if defined(_CALL_ELF) && _CALL_ELF == 2
40#define FUNC(name) name
41#else
39#define FUNC(name) GLUE(.,name) 42#define FUNC(name) GLUE(.,name)
43#endif
40 44
41#elif defined(CONFIG_PPC_BOOK3S_32) 45#elif defined(CONFIG_PPC_BOOK3S_32)
42 46
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
146 * On entry, r4 contains the guest shadow MSR 150 * On entry, r4 contains the guest shadow MSR
147 * MSR.EE has to be 0 when calling this function 151 * MSR.EE has to be 0 when calling this function
148 */ 152 */
149_GLOBAL(kvmppc_entry_trampoline) 153_GLOBAL_TOC(kvmppc_entry_trampoline)
150 mfmsr r5 154 mfmsr r5
151 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) 155 LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
152 toreal(r7) 156 toreal(r7)
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index edb14ba992b3..ef27fbd5d9c5 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
23 u32 irq, server, priority; 23 u32 irq, server, priority;
24 int rc; 24 int rc;
25 25
26 if (args->nargs != 3 || args->nret != 1) { 26 if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
27 rc = -3; 27 rc = -3;
28 goto out; 28 goto out;
29 } 29 }
30 30
31 irq = args->args[0]; 31 irq = be32_to_cpu(args->args[0]);
32 server = args->args[1]; 32 server = be32_to_cpu(args->args[1]);
33 priority = args->args[2]; 33 priority = be32_to_cpu(args->args[2]);
34 34
35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); 35 rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
36 if (rc) 36 if (rc)
37 rc = -3; 37 rc = -3;
38out: 38out:
39 args->rets[0] = rc; 39 args->rets[0] = cpu_to_be32(rc);
40} 40}
41 41
42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) 42static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
44 u32 irq, server, priority; 44 u32 irq, server, priority;
45 int rc; 45 int rc;
46 46
47 if (args->nargs != 1 || args->nret != 3) { 47 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
48 rc = -3; 48 rc = -3;
49 goto out; 49 goto out;
50 } 50 }
51 51
52 irq = args->args[0]; 52 irq = be32_to_cpu(args->args[0]);
53 53
54 server = priority = 0; 54 server = priority = 0;
55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); 55 rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
58 goto out; 58 goto out;
59 } 59 }
60 60
61 args->rets[1] = server; 61 args->rets[1] = cpu_to_be32(server);
62 args->rets[2] = priority; 62 args->rets[2] = cpu_to_be32(priority);
63out: 63out:
64 args->rets[0] = rc; 64 args->rets[0] = cpu_to_be32(rc);
65} 65}
66 66
67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) 67static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
69 u32 irq; 69 u32 irq;
70 int rc; 70 int rc;
71 71
72 if (args->nargs != 1 || args->nret != 1) { 72 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
73 rc = -3; 73 rc = -3;
74 goto out; 74 goto out;
75 } 75 }
76 76
77 irq = args->args[0]; 77 irq = be32_to_cpu(args->args[0]);
78 78
79 rc = kvmppc_xics_int_off(vcpu->kvm, irq); 79 rc = kvmppc_xics_int_off(vcpu->kvm, irq);
80 if (rc) 80 if (rc)
81 rc = -3; 81 rc = -3;
82out: 82out:
83 args->rets[0] = rc; 83 args->rets[0] = cpu_to_be32(rc);
84} 84}
85 85
86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) 86static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
88 u32 irq; 88 u32 irq;
89 int rc; 89 int rc;
90 90
91 if (args->nargs != 1 || args->nret != 1) { 91 if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
92 rc = -3; 92 rc = -3;
93 goto out; 93 goto out;
94 } 94 }
95 95
96 irq = args->args[0]; 96 irq = be32_to_cpu(args->args[0]);
97 97
98 rc = kvmppc_xics_int_on(vcpu->kvm, irq); 98 rc = kvmppc_xics_int_on(vcpu->kvm, irq);
99 if (rc) 99 if (rc)
100 rc = -3; 100 rc = -3;
101out: 101out:
102 args->rets[0] = rc; 102 args->rets[0] = cpu_to_be32(rc);
103} 103}
104#endif /* CONFIG_KVM_XICS */ 104#endif /* CONFIG_KVM_XICS */
105 105
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
205 return rc; 205 return rc;
206} 206}
207 207
208static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
209{
210#ifdef __LITTLE_ENDIAN__
211 int i;
212
213 args->token = be32_to_cpu(args->token);
214 args->nargs = be32_to_cpu(args->nargs);
215 args->nret = be32_to_cpu(args->nret);
216 for (i = 0; i < args->nargs; i++)
217 args->args[i] = be32_to_cpu(args->args[i]);
218#endif
219}
220
221static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
222{
223#ifdef __LITTLE_ENDIAN__
224 int i;
225
226 for (i = 0; i < args->nret; i++)
227 args->args[i] = cpu_to_be32(args->args[i]);
228 args->token = cpu_to_be32(args->token);
229 args->nargs = cpu_to_be32(args->nargs);
230 args->nret = cpu_to_be32(args->nret);
231#endif
232}
233
234int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) 208int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
235{ 209{
236 struct rtas_token_definition *d; 210 struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
249 if (rc) 223 if (rc)
250 goto fail; 224 goto fail;
251 225
252 kvmppc_rtas_swap_endian_in(&args);
253
254 /* 226 /*
255 * args->rets is a pointer into args->args. Now that we've 227 * args->rets is a pointer into args->args. Now that we've
256 * copied args we need to fix it up to point into our copy, 228 * copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
258 * value so we can restore it on the way out. 230 * value so we can restore it on the way out.
259 */ 231 */
260 orig_rets = args.rets; 232 orig_rets = args.rets;
261 args.rets = &args.args[args.nargs]; 233 args.rets = &args.args[be32_to_cpu(args.nargs)];
262 234
263 mutex_lock(&vcpu->kvm->lock); 235 mutex_lock(&vcpu->kvm->lock);
264 236
265 rc = -ENOENT; 237 rc = -ENOENT;
266 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { 238 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
267 if (d->token == args.token) { 239 if (d->token == be32_to_cpu(args.token)) {
268 d->handler->handler(vcpu, &args); 240 d->handler->handler(vcpu, &args);
269 rc = 0; 241 rc = 0;
270 break; 242 break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
275 247
276 if (rc == 0) { 248 if (rc == 0) {
277 args.rets = orig_rets; 249 args.rets = orig_rets;
278 kvmppc_rtas_swap_endian_out(&args);
279 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); 250 rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
280 if (rc) 251 if (rc)
281 goto fail; 252 goto fail;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index dd2cc03f406f..86903d3f5a03 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
473 if (printk_ratelimit()) 473 if (printk_ratelimit())
474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n", 474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
475 __func__, (long)gfn, pfn); 475 __func__, (long)gfn, pfn);
476 return -EINVAL; 476 ret = -EINVAL;
477 goto out;
477 } 478 }
478 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); 479 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
479 480