diff options
author | Hollis Blanchard <hollisb@us.ibm.com> | 2009-01-03 17:23:05 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:02:57 -0400 |
commit | cea5d8c9de669e30ed6d60930318376d5cc42e9e (patch) | |
tree | 35645c4bb1144268ad4b8f18f151ca9e3c2305e9 /arch/powerpc/kvm | |
parent | f44353610b584fcbc31e363f35594796c6446d63 (diff) |
KVM: ppc: use macros instead of hardcoded literals for instruction decoding
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 93 |
1 files changed, 63 insertions, 30 deletions
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index d1d38daa93fb..a561d6e8da1c 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -30,6 +30,39 @@ | |||
30 | #include <asm/disassemble.h> | 30 | #include <asm/disassemble.h> |
31 | #include "timing.h" | 31 | #include "timing.h" |
32 | 32 | ||
33 | #define OP_TRAP 3 | ||
34 | |||
35 | #define OP_31_XOP_LWZX 23 | ||
36 | #define OP_31_XOP_LBZX 87 | ||
37 | #define OP_31_XOP_STWX 151 | ||
38 | #define OP_31_XOP_STBX 215 | ||
39 | #define OP_31_XOP_STBUX 247 | ||
40 | #define OP_31_XOP_LHZX 279 | ||
41 | #define OP_31_XOP_LHZUX 311 | ||
42 | #define OP_31_XOP_MFSPR 339 | ||
43 | #define OP_31_XOP_STHX 407 | ||
44 | #define OP_31_XOP_STHUX 439 | ||
45 | #define OP_31_XOP_MTSPR 467 | ||
46 | #define OP_31_XOP_DCBI 470 | ||
47 | #define OP_31_XOP_LWBRX 534 | ||
48 | #define OP_31_XOP_TLBSYNC 566 | ||
49 | #define OP_31_XOP_STWBRX 662 | ||
50 | #define OP_31_XOP_LHBRX 790 | ||
51 | #define OP_31_XOP_STHBRX 918 | ||
52 | |||
53 | #define OP_LWZ 32 | ||
54 | #define OP_LWZU 33 | ||
55 | #define OP_LBZ 34 | ||
56 | #define OP_LBZU 35 | ||
57 | #define OP_STW 36 | ||
58 | #define OP_STWU 37 | ||
59 | #define OP_STB 38 | ||
60 | #define OP_STBU 39 | ||
61 | #define OP_LHZ 40 | ||
62 | #define OP_LHZU 41 | ||
63 | #define OP_STH 44 | ||
64 | #define OP_STHU 45 | ||
65 | |||
33 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | 66 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) |
34 | { | 67 | { |
35 | if (vcpu->arch.tcr & TCR_DIE) { | 68 | if (vcpu->arch.tcr & TCR_DIE) { |
@@ -78,7 +111,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
78 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 111 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
79 | 112 | ||
80 | switch (get_op(inst)) { | 113 | switch (get_op(inst)) { |
81 | case 3: /* trap */ | 114 | case OP_TRAP: |
82 | vcpu->arch.esr |= ESR_PTR; | 115 | vcpu->arch.esr |= ESR_PTR; |
83 | kvmppc_core_queue_program(vcpu); | 116 | kvmppc_core_queue_program(vcpu); |
84 | advance = 0; | 117 | advance = 0; |
@@ -87,31 +120,31 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
87 | case 31: | 120 | case 31: |
88 | switch (get_xop(inst)) { | 121 | switch (get_xop(inst)) { |
89 | 122 | ||
90 | case 23: /* lwzx */ | 123 | case OP_31_XOP_LWZX: |
91 | rt = get_rt(inst); | 124 | rt = get_rt(inst); |
92 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 125 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
93 | break; | 126 | break; |
94 | 127 | ||
95 | case 87: /* lbzx */ | 128 | case OP_31_XOP_LBZX: |
96 | rt = get_rt(inst); | 129 | rt = get_rt(inst); |
97 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 130 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
98 | break; | 131 | break; |
99 | 132 | ||
100 | case 151: /* stwx */ | 133 | case OP_31_XOP_STWX: |
101 | rs = get_rs(inst); | 134 | rs = get_rs(inst); |
102 | emulated = kvmppc_handle_store(run, vcpu, | 135 | emulated = kvmppc_handle_store(run, vcpu, |
103 | vcpu->arch.gpr[rs], | 136 | vcpu->arch.gpr[rs], |
104 | 4, 1); | 137 | 4, 1); |
105 | break; | 138 | break; |
106 | 139 | ||
107 | case 215: /* stbx */ | 140 | case OP_31_XOP_STBX: |
108 | rs = get_rs(inst); | 141 | rs = get_rs(inst); |
109 | emulated = kvmppc_handle_store(run, vcpu, | 142 | emulated = kvmppc_handle_store(run, vcpu, |
110 | vcpu->arch.gpr[rs], | 143 | vcpu->arch.gpr[rs], |
111 | 1, 1); | 144 | 1, 1); |
112 | break; | 145 | break; |
113 | 146 | ||
114 | case 247: /* stbux */ | 147 | case OP_31_XOP_STBUX: |
115 | rs = get_rs(inst); | 148 | rs = get_rs(inst); |
116 | ra = get_ra(inst); | 149 | ra = get_ra(inst); |
117 | rb = get_rb(inst); | 150 | rb = get_rb(inst); |
@@ -126,12 +159,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
126 | vcpu->arch.gpr[rs] = ea; | 159 | vcpu->arch.gpr[rs] = ea; |
127 | break; | 160 | break; |
128 | 161 | ||
129 | case 279: /* lhzx */ | 162 | case OP_31_XOP_LHZX: |
130 | rt = get_rt(inst); | 163 | rt = get_rt(inst); |
131 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 164 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
132 | break; | 165 | break; |
133 | 166 | ||
134 | case 311: /* lhzux */ | 167 | case OP_31_XOP_LHZUX: |
135 | rt = get_rt(inst); | 168 | rt = get_rt(inst); |
136 | ra = get_ra(inst); | 169 | ra = get_ra(inst); |
137 | rb = get_rb(inst); | 170 | rb = get_rb(inst); |
@@ -144,7 +177,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
144 | vcpu->arch.gpr[ra] = ea; | 177 | vcpu->arch.gpr[ra] = ea; |
145 | break; | 178 | break; |
146 | 179 | ||
147 | case 339: /* mfspr */ | 180 | case OP_31_XOP_MFSPR: |
148 | sprn = get_sprn(inst); | 181 | sprn = get_sprn(inst); |
149 | rt = get_rt(inst); | 182 | rt = get_rt(inst); |
150 | 183 | ||
@@ -185,7 +218,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
185 | } | 218 | } |
186 | break; | 219 | break; |
187 | 220 | ||
188 | case 407: /* sthx */ | 221 | case OP_31_XOP_STHX: |
189 | rs = get_rs(inst); | 222 | rs = get_rs(inst); |
190 | ra = get_ra(inst); | 223 | ra = get_ra(inst); |
191 | rb = get_rb(inst); | 224 | rb = get_rb(inst); |
@@ -195,7 +228,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
195 | 2, 1); | 228 | 2, 1); |
196 | break; | 229 | break; |
197 | 230 | ||
198 | case 439: /* sthux */ | 231 | case OP_31_XOP_STHUX: |
199 | rs = get_rs(inst); | 232 | rs = get_rs(inst); |
200 | ra = get_ra(inst); | 233 | ra = get_ra(inst); |
201 | rb = get_rb(inst); | 234 | rb = get_rb(inst); |
@@ -210,7 +243,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
210 | vcpu->arch.gpr[ra] = ea; | 243 | vcpu->arch.gpr[ra] = ea; |
211 | break; | 244 | break; |
212 | 245 | ||
213 | case 467: /* mtspr */ | 246 | case OP_31_XOP_MTSPR: |
214 | sprn = get_sprn(inst); | 247 | sprn = get_sprn(inst); |
215 | rs = get_rs(inst); | 248 | rs = get_rs(inst); |
216 | switch (sprn) { | 249 | switch (sprn) { |
@@ -246,7 +279,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
246 | } | 279 | } |
247 | break; | 280 | break; |
248 | 281 | ||
249 | case 470: /* dcbi */ | 282 | case OP_31_XOP_DCBI: |
250 | /* Do nothing. The guest is performing dcbi because | 283 | /* Do nothing. The guest is performing dcbi because |
251 | * hardware DMA is not snooped by the dcache, but | 284 | * hardware DMA is not snooped by the dcache, but |
252 | * emulated DMA either goes through the dcache as | 285 | * emulated DMA either goes through the dcache as |
@@ -254,15 +287,15 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
254 | * coherence. */ | 287 | * coherence. */ |
255 | break; | 288 | break; |
256 | 289 | ||
257 | case 534: /* lwbrx */ | 290 | case OP_31_XOP_LWBRX: |
258 | rt = get_rt(inst); | 291 | rt = get_rt(inst); |
259 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); | 292 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); |
260 | break; | 293 | break; |
261 | 294 | ||
262 | case 566: /* tlbsync */ | 295 | case OP_31_XOP_TLBSYNC: |
263 | break; | 296 | break; |
264 | 297 | ||
265 | case 662: /* stwbrx */ | 298 | case OP_31_XOP_STWBRX: |
266 | rs = get_rs(inst); | 299 | rs = get_rs(inst); |
267 | ra = get_ra(inst); | 300 | ra = get_ra(inst); |
268 | rb = get_rb(inst); | 301 | rb = get_rb(inst); |
@@ -272,12 +305,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
272 | 4, 0); | 305 | 4, 0); |
273 | break; | 306 | break; |
274 | 307 | ||
275 | case 790: /* lhbrx */ | 308 | case OP_31_XOP_LHBRX: |
276 | rt = get_rt(inst); | 309 | rt = get_rt(inst); |
277 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); | 310 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); |
278 | break; | 311 | break; |
279 | 312 | ||
280 | case 918: /* sthbrx */ | 313 | case OP_31_XOP_STHBRX: |
281 | rs = get_rs(inst); | 314 | rs = get_rs(inst); |
282 | ra = get_ra(inst); | 315 | ra = get_ra(inst); |
283 | rb = get_rb(inst); | 316 | rb = get_rb(inst); |
@@ -293,37 +326,37 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
293 | } | 326 | } |
294 | break; | 327 | break; |
295 | 328 | ||
296 | case 32: /* lwz */ | 329 | case OP_LWZ: |
297 | rt = get_rt(inst); | 330 | rt = get_rt(inst); |
298 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 331 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
299 | break; | 332 | break; |
300 | 333 | ||
301 | case 33: /* lwzu */ | 334 | case OP_LWZU: |
302 | ra = get_ra(inst); | 335 | ra = get_ra(inst); |
303 | rt = get_rt(inst); | 336 | rt = get_rt(inst); |
304 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 337 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
305 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 338 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; |
306 | break; | 339 | break; |
307 | 340 | ||
308 | case 34: /* lbz */ | 341 | case OP_LBZ: |
309 | rt = get_rt(inst); | 342 | rt = get_rt(inst); |
310 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 343 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
311 | break; | 344 | break; |
312 | 345 | ||
313 | case 35: /* lbzu */ | 346 | case OP_LBZU: |
314 | ra = get_ra(inst); | 347 | ra = get_ra(inst); |
315 | rt = get_rt(inst); | 348 | rt = get_rt(inst); |
316 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 349 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
317 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 350 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; |
318 | break; | 351 | break; |
319 | 352 | ||
320 | case 36: /* stw */ | 353 | case OP_STW: |
321 | rs = get_rs(inst); | 354 | rs = get_rs(inst); |
322 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 355 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], |
323 | 4, 1); | 356 | 4, 1); |
324 | break; | 357 | break; |
325 | 358 | ||
326 | case 37: /* stwu */ | 359 | case OP_STWU: |
327 | ra = get_ra(inst); | 360 | ra = get_ra(inst); |
328 | rs = get_rs(inst); | 361 | rs = get_rs(inst); |
329 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 362 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], |
@@ -331,13 +364,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
331 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 364 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; |
332 | break; | 365 | break; |
333 | 366 | ||
334 | case 38: /* stb */ | 367 | case OP_STB: |
335 | rs = get_rs(inst); | 368 | rs = get_rs(inst); |
336 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 369 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], |
337 | 1, 1); | 370 | 1, 1); |
338 | break; | 371 | break; |
339 | 372 | ||
340 | case 39: /* stbu */ | 373 | case OP_STBU: |
341 | ra = get_ra(inst); | 374 | ra = get_ra(inst); |
342 | rs = get_rs(inst); | 375 | rs = get_rs(inst); |
343 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 376 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], |
@@ -345,25 +378,25 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
345 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 378 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; |
346 | break; | 379 | break; |
347 | 380 | ||
348 | case 40: /* lhz */ | 381 | case OP_LHZ: |
349 | rt = get_rt(inst); | 382 | rt = get_rt(inst); |
350 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 383 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
351 | break; | 384 | break; |
352 | 385 | ||
353 | case 41: /* lhzu */ | 386 | case OP_LHZU: |
354 | ra = get_ra(inst); | 387 | ra = get_ra(inst); |
355 | rt = get_rt(inst); | 388 | rt = get_rt(inst); |
356 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 389 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
357 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 390 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; |
358 | break; | 391 | break; |
359 | 392 | ||
360 | case 44: /* sth */ | 393 | case OP_STH: |
361 | rs = get_rs(inst); | 394 | rs = get_rs(inst); |
362 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 395 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], |
363 | 2, 1); | 396 | 2, 1); |
364 | break; | 397 | break; |
365 | 398 | ||
366 | case 45: /* sthu */ | 399 | case OP_STHU: |
367 | ra = get_ra(inst); | 400 | ra = get_ra(inst); |
368 | rs = get_rs(inst); | 401 | rs = get_rs(inst); |
369 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 402 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], |