aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2011-08-08 11:21:15 -0400
committerAvi Kivity <avi@redhat.com>2011-09-25 12:52:24 -0400
commit0254f0742998dc61fcf68a3488e2d93636031263 (patch)
tree5217bf195c44418f0365310f9f29b475a67a3b1e /arch/powerpc
parenta15bd354f083f20f257db450488db52ac27df439 (diff)
KVM: PPC: Add PAPR hypercall code for PR mode
When running a PAPR guest, we need to handle a few hypercalls in kernel space, most prominently the page table invalidation (to sync the shadows). So this patch adds handling for a few PAPR hypercalls to PR mode KVM. I tried to share the code with HV mode, but it ended up being a lot easier this way around, as the two differ too much in those details. Signed-off-by: Alexander Graf <agraf@suse.de> --- v1 -> v2: - whitespace fix
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h1
-rw-r--r--arch/powerpc/kvm/Makefile1
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c158
3 files changed, 160 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 472437b7b85d..91d41fabc5b0 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -150,6 +150,7 @@ extern void kvmppc_load_up_altivec(void);
150extern void kvmppc_load_up_vsx(void); 150extern void kvmppc_load_up_vsx(void);
151extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 151extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
152extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 152extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
153extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
153 154
154static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 155static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
155{ 156{
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 08428e2c188d..4c66d51dbd38 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -43,6 +43,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
43 fpu.o \ 43 fpu.o \
44 book3s_paired_singles.o \ 44 book3s_paired_singles.o \
45 book3s_pr.o \ 45 book3s_pr.o \
46 book3s_pr_papr.o \
46 book3s_emulate.o \ 47 book3s_emulate.o \
47 book3s_interrupts.o \ 48 book3s_interrupts.o \
48 book3s_mmu_hpte.o \ 49 book3s_mmu_hpte.o \
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
new file mode 100644
index 000000000000..b9589324797b
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -0,0 +1,158 @@
1/*
2 * Copyright (C) 2011. Freescale Inc. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Paul Mackerras <paulus@samba.org>
7 *
8 * Description:
9 *
10 * Hypercall handling for running PAPR guests in PR KVM on Book 3S
11 * processors.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License, version 2, as
15 * published by the Free Software Foundation.
16 */
17
18#include <asm/uaccess.h>
19#include <asm/kvm_ppc.h>
20#include <asm/kvm_book3s.h>
21
22static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index)
23{
24 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
25 unsigned long pteg_addr;
26
27 pte_index <<= 4;
28 pte_index &= ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1) << 7 | 0x70;
29 pteg_addr = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
30 pteg_addr |= pte_index;
31
32 return pteg_addr;
33}
34
35static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
36{
37 long flags = kvmppc_get_gpr(vcpu, 4);
38 long pte_index = kvmppc_get_gpr(vcpu, 5);
39 unsigned long pteg[2 * 8];
40 unsigned long pteg_addr, i, *hpte;
41
42 pte_index &= ~7UL;
43 pteg_addr = get_pteg_addr(vcpu, pte_index);
44
45 copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
46 hpte = pteg;
47
48 if (likely((flags & H_EXACT) == 0)) {
49 pte_index &= ~7UL;
50 for (i = 0; ; ++i) {
51 if (i == 8)
52 return H_PTEG_FULL;
53 if ((*hpte & HPTE_V_VALID) == 0)
54 break;
55 hpte += 2;
56 }
57 } else {
58 i = kvmppc_get_gpr(vcpu, 5) & 7UL;
59 hpte += i * 2;
60 }
61
62 hpte[0] = kvmppc_get_gpr(vcpu, 6);
63 hpte[1] = kvmppc_get_gpr(vcpu, 7);
64 copy_to_user((void __user *)pteg_addr, pteg, sizeof(pteg));
65 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
66 kvmppc_set_gpr(vcpu, 4, pte_index | i);
67
68 return EMULATE_DONE;
69}
70
71static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
72{
73 unsigned long flags= kvmppc_get_gpr(vcpu, 4);
74 unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
75 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
76 unsigned long v = 0, pteg, rb;
77 unsigned long pte[2];
78
79 pteg = get_pteg_addr(vcpu, pte_index);
80 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
81
82 if ((pte[0] & HPTE_V_VALID) == 0 ||
83 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
84 ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) {
85 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
86 return EMULATE_DONE;
87 }
88
89 copy_to_user((void __user *)pteg, &v, sizeof(v));
90
91 rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
92 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
93
94 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
95 kvmppc_set_gpr(vcpu, 4, pte[0]);
96 kvmppc_set_gpr(vcpu, 5, pte[1]);
97
98 return EMULATE_DONE;
99}
100
101static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
102{
103 unsigned long flags = kvmppc_get_gpr(vcpu, 4);
104 unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
105 unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
106 unsigned long rb, pteg, r, v;
107 unsigned long pte[2];
108
109 pteg = get_pteg_addr(vcpu, pte_index);
110 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
111
112 if ((pte[0] & HPTE_V_VALID) == 0 ||
113 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) {
114 kvmppc_set_gpr(vcpu, 3, H_NOT_FOUND);
115 return EMULATE_DONE;
116 }
117
118 v = pte[0];
119 r = pte[1];
120 r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
121 HPTE_R_KEY_LO);
122 r |= (flags << 55) & HPTE_R_PP0;
123 r |= (flags << 48) & HPTE_R_KEY_HI;
124 r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
125
126 pte[1] = r;
127
128 rb = compute_tlbie_rb(v, r, pte_index);
129 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
130 copy_to_user((void __user *)pteg, pte, sizeof(pte));
131
132 kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
133
134 return EMULATE_DONE;
135}
136
137int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
138{
139 switch (cmd) {
140 case H_ENTER:
141 return kvmppc_h_pr_enter(vcpu);
142 case H_REMOVE:
143 return kvmppc_h_pr_remove(vcpu);
144 case H_PROTECT:
145 return kvmppc_h_pr_protect(vcpu);
146 case H_BULK_REMOVE:
147 /* We just flush all PTEs, so user space can
148 handle the HPT modifications */
149 kvmppc_mmu_pte_flush(vcpu, 0, 0);
150 break;
151 case H_CEDE:
152 kvm_vcpu_block(vcpu);
153 vcpu->stat.halt_wakeup++;
154 return EMULATE_DONE;
155 }
156
157 return EMULATE_FAIL;
158}