diff options
author | Alexander Graf <agraf@suse.de> | 2010-01-07 20:58:01 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 10:35:47 -0500 |
commit | 8e5b26b55a8b6aee2c789b1d20ec715f9e4bea5c (patch) | |
tree | 4e2d003852ce327a47153b6c100239c6d8e1418f /arch/powerpc/kvm/book3s_64_emulate.c | |
parent | 0d178975d0a5afe5e0fd3211bd1397905b225be5 (diff) |
KVM: PPC: Use accessor functions for GPR access
All code in PPC KVM currently accesses gprs in the vcpu struct directly.
While there's nothing wrong with that wrt the current way gprs are stored
and loaded, it doesn't suffice for the PACA acceleration that will follow
in this patchset.
So let's just create little wrapper inline functions that we call whenever
a GPR needs to be read from or written to. The compiled code shouldn't really
change at all for now.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_emulate.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_emulate.c | 77 |
1 files changed, 40 insertions, 37 deletions
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c index 1027eac6d474..2b0ee7e040c9 100644 --- a/arch/powerpc/kvm/book3s_64_emulate.c +++ b/arch/powerpc/kvm/book3s_64_emulate.c | |||
@@ -65,11 +65,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
65 | case 31: | 65 | case 31: |
66 | switch (get_xop(inst)) { | 66 | switch (get_xop(inst)) { |
67 | case OP_31_XOP_MFMSR: | 67 | case OP_31_XOP_MFMSR: |
68 | vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr; | 68 | kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); |
69 | break; | 69 | break; |
70 | case OP_31_XOP_MTMSRD: | 70 | case OP_31_XOP_MTMSRD: |
71 | { | 71 | { |
72 | ulong rs = vcpu->arch.gpr[get_rs(inst)]; | 72 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); |
73 | if (inst & 0x10000) { | 73 | if (inst & 0x10000) { |
74 | vcpu->arch.msr &= ~(MSR_RI | MSR_EE); | 74 | vcpu->arch.msr &= ~(MSR_RI | MSR_EE); |
75 | vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); | 75 | vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); |
@@ -78,30 +78,30 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
78 | break; | 78 | break; |
79 | } | 79 | } |
80 | case OP_31_XOP_MTMSR: | 80 | case OP_31_XOP_MTMSR: |
81 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]); | 81 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); |
82 | break; | 82 | break; |
83 | case OP_31_XOP_MFSRIN: | 83 | case OP_31_XOP_MFSRIN: |
84 | { | 84 | { |
85 | int srnum; | 85 | int srnum; |
86 | 86 | ||
87 | srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf; | 87 | srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; |
88 | if (vcpu->arch.mmu.mfsrin) { | 88 | if (vcpu->arch.mmu.mfsrin) { |
89 | u32 sr; | 89 | u32 sr; |
90 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | 90 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); |
91 | vcpu->arch.gpr[get_rt(inst)] = sr; | 91 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); |
92 | } | 92 | } |
93 | break; | 93 | break; |
94 | } | 94 | } |
95 | case OP_31_XOP_MTSRIN: | 95 | case OP_31_XOP_MTSRIN: |
96 | vcpu->arch.mmu.mtsrin(vcpu, | 96 | vcpu->arch.mmu.mtsrin(vcpu, |
97 | (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf, | 97 | (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, |
98 | vcpu->arch.gpr[get_rs(inst)]); | 98 | kvmppc_get_gpr(vcpu, get_rs(inst))); |
99 | break; | 99 | break; |
100 | case OP_31_XOP_TLBIE: | 100 | case OP_31_XOP_TLBIE: |
101 | case OP_31_XOP_TLBIEL: | 101 | case OP_31_XOP_TLBIEL: |
102 | { | 102 | { |
103 | bool large = (inst & 0x00200000) ? true : false; | 103 | bool large = (inst & 0x00200000) ? true : false; |
104 | ulong addr = vcpu->arch.gpr[get_rb(inst)]; | 104 | ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); |
105 | vcpu->arch.mmu.tlbie(vcpu, addr, large); | 105 | vcpu->arch.mmu.tlbie(vcpu, addr, large); |
106 | break; | 106 | break; |
107 | } | 107 | } |
@@ -111,14 +111,16 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
111 | if (!vcpu->arch.mmu.slbmte) | 111 | if (!vcpu->arch.mmu.slbmte) |
112 | return EMULATE_FAIL; | 112 | return EMULATE_FAIL; |
113 | 113 | ||
114 | vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)], | 114 | vcpu->arch.mmu.slbmte(vcpu, |
115 | vcpu->arch.gpr[get_rb(inst)]); | 115 | kvmppc_get_gpr(vcpu, get_rs(inst)), |
116 | kvmppc_get_gpr(vcpu, get_rb(inst))); | ||
116 | break; | 117 | break; |
117 | case OP_31_XOP_SLBIE: | 118 | case OP_31_XOP_SLBIE: |
118 | if (!vcpu->arch.mmu.slbie) | 119 | if (!vcpu->arch.mmu.slbie) |
119 | return EMULATE_FAIL; | 120 | return EMULATE_FAIL; |
120 | 121 | ||
121 | vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]); | 122 | vcpu->arch.mmu.slbie(vcpu, |
123 | kvmppc_get_gpr(vcpu, get_rb(inst))); | ||
122 | break; | 124 | break; |
123 | case OP_31_XOP_SLBIA: | 125 | case OP_31_XOP_SLBIA: |
124 | if (!vcpu->arch.mmu.slbia) | 126 | if (!vcpu->arch.mmu.slbia) |
@@ -132,9 +134,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
132 | } else { | 134 | } else { |
133 | ulong t, rb; | 135 | ulong t, rb; |
134 | 136 | ||
135 | rb = vcpu->arch.gpr[get_rb(inst)]; | 137 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); |
136 | t = vcpu->arch.mmu.slbmfee(vcpu, rb); | 138 | t = vcpu->arch.mmu.slbmfee(vcpu, rb); |
137 | vcpu->arch.gpr[get_rt(inst)] = t; | 139 | kvmppc_set_gpr(vcpu, get_rt(inst), t); |
138 | } | 140 | } |
139 | break; | 141 | break; |
140 | case OP_31_XOP_SLBMFEV: | 142 | case OP_31_XOP_SLBMFEV: |
@@ -143,20 +145,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
143 | } else { | 145 | } else { |
144 | ulong t, rb; | 146 | ulong t, rb; |
145 | 147 | ||
146 | rb = vcpu->arch.gpr[get_rb(inst)]; | 148 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); |
147 | t = vcpu->arch.mmu.slbmfev(vcpu, rb); | 149 | t = vcpu->arch.mmu.slbmfev(vcpu, rb); |
148 | vcpu->arch.gpr[get_rt(inst)] = t; | 150 | kvmppc_set_gpr(vcpu, get_rt(inst), t); |
149 | } | 151 | } |
150 | break; | 152 | break; |
151 | case OP_31_XOP_DCBZ: | 153 | case OP_31_XOP_DCBZ: |
152 | { | 154 | { |
153 | ulong rb = vcpu->arch.gpr[get_rb(inst)]; | 155 | ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); |
154 | ulong ra = 0; | 156 | ulong ra = 0; |
155 | ulong addr; | 157 | ulong addr; |
156 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; | 158 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; |
157 | 159 | ||
158 | if (get_ra(inst)) | 160 | if (get_ra(inst)) |
159 | ra = vcpu->arch.gpr[get_ra(inst)]; | 161 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); |
160 | 162 | ||
161 | addr = (ra + rb) & ~31ULL; | 163 | addr = (ra + rb) & ~31ULL; |
162 | if (!(vcpu->arch.msr & MSR_SF)) | 164 | if (!(vcpu->arch.msr & MSR_SF)) |
@@ -233,43 +235,44 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) | |||
233 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 235 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) |
234 | { | 236 | { |
235 | int emulated = EMULATE_DONE; | 237 | int emulated = EMULATE_DONE; |
238 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
236 | 239 | ||
237 | switch (sprn) { | 240 | switch (sprn) { |
238 | case SPRN_SDR1: | 241 | case SPRN_SDR1: |
239 | to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs]; | 242 | to_book3s(vcpu)->sdr1 = spr_val; |
240 | break; | 243 | break; |
241 | case SPRN_DSISR: | 244 | case SPRN_DSISR: |
242 | to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs]; | 245 | to_book3s(vcpu)->dsisr = spr_val; |
243 | break; | 246 | break; |
244 | case SPRN_DAR: | 247 | case SPRN_DAR: |
245 | vcpu->arch.dear = vcpu->arch.gpr[rs]; | 248 | vcpu->arch.dear = spr_val; |
246 | break; | 249 | break; |
247 | case SPRN_HIOR: | 250 | case SPRN_HIOR: |
248 | to_book3s(vcpu)->hior = vcpu->arch.gpr[rs]; | 251 | to_book3s(vcpu)->hior = spr_val; |
249 | break; | 252 | break; |
250 | case SPRN_IBAT0U ... SPRN_IBAT3L: | 253 | case SPRN_IBAT0U ... SPRN_IBAT3L: |
251 | case SPRN_IBAT4U ... SPRN_IBAT7L: | 254 | case SPRN_IBAT4U ... SPRN_IBAT7L: |
252 | case SPRN_DBAT0U ... SPRN_DBAT3L: | 255 | case SPRN_DBAT0U ... SPRN_DBAT3L: |
253 | case SPRN_DBAT4U ... SPRN_DBAT7L: | 256 | case SPRN_DBAT4U ... SPRN_DBAT7L: |
254 | kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]); | 257 | kvmppc_write_bat(vcpu, sprn, (u32)spr_val); |
255 | /* BAT writes happen so rarely that we're ok to flush | 258 | /* BAT writes happen so rarely that we're ok to flush |
256 | * everything here */ | 259 | * everything here */ |
257 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 260 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
258 | break; | 261 | break; |
259 | case SPRN_HID0: | 262 | case SPRN_HID0: |
260 | to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs]; | 263 | to_book3s(vcpu)->hid[0] = spr_val; |
261 | break; | 264 | break; |
262 | case SPRN_HID1: | 265 | case SPRN_HID1: |
263 | to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs]; | 266 | to_book3s(vcpu)->hid[1] = spr_val; |
264 | break; | 267 | break; |
265 | case SPRN_HID2: | 268 | case SPRN_HID2: |
266 | to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs]; | 269 | to_book3s(vcpu)->hid[2] = spr_val; |
267 | break; | 270 | break; |
268 | case SPRN_HID4: | 271 | case SPRN_HID4: |
269 | to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs]; | 272 | to_book3s(vcpu)->hid[4] = spr_val; |
270 | break; | 273 | break; |
271 | case SPRN_HID5: | 274 | case SPRN_HID5: |
272 | to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs]; | 275 | to_book3s(vcpu)->hid[5] = spr_val; |
273 | /* guest HID5 set can change is_dcbz32 */ | 276 | /* guest HID5 set can change is_dcbz32 */ |
274 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | 277 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && |
275 | (mfmsr() & MSR_HV)) | 278 | (mfmsr() & MSR_HV)) |
@@ -299,38 +302,38 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
299 | 302 | ||
300 | switch (sprn) { | 303 | switch (sprn) { |
301 | case SPRN_SDR1: | 304 | case SPRN_SDR1: |
302 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1; | 305 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); |
303 | break; | 306 | break; |
304 | case SPRN_DSISR: | 307 | case SPRN_DSISR: |
305 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr; | 308 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); |
306 | break; | 309 | break; |
307 | case SPRN_DAR: | 310 | case SPRN_DAR: |
308 | vcpu->arch.gpr[rt] = vcpu->arch.dear; | 311 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); |
309 | break; | 312 | break; |
310 | case SPRN_HIOR: | 313 | case SPRN_HIOR: |
311 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior; | 314 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); |
312 | break; | 315 | break; |
313 | case SPRN_HID0: | 316 | case SPRN_HID0: |
314 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0]; | 317 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); |
315 | break; | 318 | break; |
316 | case SPRN_HID1: | 319 | case SPRN_HID1: |
317 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1]; | 320 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); |
318 | break; | 321 | break; |
319 | case SPRN_HID2: | 322 | case SPRN_HID2: |
320 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2]; | 323 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); |
321 | break; | 324 | break; |
322 | case SPRN_HID4: | 325 | case SPRN_HID4: |
323 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4]; | 326 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); |
324 | break; | 327 | break; |
325 | case SPRN_HID5: | 328 | case SPRN_HID5: |
326 | vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5]; | 329 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); |
327 | break; | 330 | break; |
328 | case SPRN_THRM1: | 331 | case SPRN_THRM1: |
329 | case SPRN_THRM2: | 332 | case SPRN_THRM2: |
330 | case SPRN_THRM3: | 333 | case SPRN_THRM3: |
331 | case SPRN_CTRLF: | 334 | case SPRN_CTRLF: |
332 | case SPRN_CTRLT: | 335 | case SPRN_CTRLT: |
333 | vcpu->arch.gpr[rt] = 0; | 336 | kvmppc_set_gpr(vcpu, rt, 0); |
334 | break; | 337 | break; |
335 | default: | 338 | default: |
336 | printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); | 339 | printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); |