diff options
author | Xiantao Zhang <xiantao.zhang@intel.com> | 2008-09-12 08:23:11 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-10-15 08:25:12 -0400 |
commit | 81aec5227eedf9035e8544d8021ca6b8fb7c357a (patch) | |
tree | 3e2166dd2b54cb433b2492c052fa5637e10d38fd /arch | |
parent | 271b05281f7f485a0be8764860687ebb98459b80 (diff) |
KVM: ia64: Implement a uniform vps interface
An uniform entry kvm_vps_entry is added for
vps_sync_write/read, vps_resume_handler/guest,
and branches to differnt PAL service according to the offset.
Singed-off-by: Anthony Xu <anthony.xu@intel.com>
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/kvm/kvm_minstate.h | 23 | ||||
-rw-r--r-- | arch/ia64/kvm/optvfault.S | 69 | ||||
-rw-r--r-- | arch/ia64/kvm/process.c | 4 | ||||
-rw-r--r-- | arch/ia64/kvm/vmm_ivt.S | 39 |
4 files changed, 84 insertions, 51 deletions
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h index 13980d9b8bcf..2cc41d17cf99 100644 --- a/arch/ia64/kvm/kvm_minstate.h +++ b/arch/ia64/kvm/kvm_minstate.h | |||
@@ -50,27 +50,18 @@ | |||
50 | 50 | ||
51 | #define PAL_VSA_SYNC_READ \ | 51 | #define PAL_VSA_SYNC_READ \ |
52 | /* begin to call pal vps sync_read */ \ | 52 | /* begin to call pal vps sync_read */ \ |
53 | {.mii; \ | ||
53 | add r25 = VMM_VPD_BASE_OFFSET, r21; \ | 54 | add r25 = VMM_VPD_BASE_OFFSET, r21; \ |
54 | adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21; /* entry point */ \ | 55 | nop 0x0; \ |
56 | mov r24=ip; \ | ||
55 | ;; \ | 57 | ;; \ |
58 | } \ | ||
59 | {.mmb \ | ||
60 | add r24=0x20, r24; \ | ||
56 | ld8 r25 = [r25]; /* read vpd base */ \ | 61 | ld8 r25 = [r25]; /* read vpd base */ \ |
57 | ld8 r20 = [r20]; \ | 62 | br.cond.sptk kvm_vps_sync_read; /*call the service*/ \ |
58 | ;; \ | ||
59 | add r20 = PAL_VPS_SYNC_READ,r20; \ | ||
60 | ;; \ | ||
61 | { .mii; \ | ||
62 | nop 0x0; \ | ||
63 | mov r24 = ip; \ | ||
64 | mov b0 = r20; \ | ||
65 | ;; \ | 63 | ;; \ |
66 | }; \ | 64 | }; \ |
67 | { .mmb; \ | ||
68 | add r24 = 0x20, r24; \ | ||
69 | nop 0x0; \ | ||
70 | br.cond.sptk b0; /* call the service */ \ | ||
71 | ;; \ | ||
72 | }; | ||
73 | |||
74 | 65 | ||
75 | 66 | ||
76 | #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 | 67 | #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21 |
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S index e4f15d641b22..f0bf0a8efa3e 100644 --- a/arch/ia64/kvm/optvfault.S +++ b/arch/ia64/kvm/optvfault.S | |||
@@ -20,6 +20,75 @@ | |||
20 | #define ACCE_MOV_TO_PSR | 20 | #define ACCE_MOV_TO_PSR |
21 | #define ACCE_THASH | 21 | #define ACCE_THASH |
22 | 22 | ||
23 | ENTRY(kvm_vps_entry) | ||
24 | adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21 | ||
25 | ;; | ||
26 | ld8 r29 = [r29] | ||
27 | ;; | ||
28 | add r29 = r29, r30 | ||
29 | ;; | ||
30 | mov b0 = r29 | ||
31 | br.sptk.many b0 | ||
32 | END(kvm_vps_entry) | ||
33 | |||
34 | /* | ||
35 | * Inputs: | ||
36 | * r24 : return address | ||
37 | * r25 : vpd | ||
38 | * r29 : scratch | ||
39 | * | ||
40 | */ | ||
41 | GLOBAL_ENTRY(kvm_vps_sync_read) | ||
42 | movl r30 = PAL_VPS_SYNC_READ | ||
43 | ;; | ||
44 | br.sptk.many kvm_vps_entry | ||
45 | END(kvm_vps_sync_read) | ||
46 | |||
47 | /* | ||
48 | * Inputs: | ||
49 | * r24 : return address | ||
50 | * r25 : vpd | ||
51 | * r29 : scratch | ||
52 | * | ||
53 | */ | ||
54 | GLOBAL_ENTRY(kvm_vps_sync_write) | ||
55 | movl r30 = PAL_VPS_SYNC_WRITE | ||
56 | ;; | ||
57 | br.sptk.many kvm_vps_entry | ||
58 | END(kvm_vps_sync_write) | ||
59 | |||
60 | /* | ||
61 | * Inputs: | ||
62 | * r23 : pr | ||
63 | * r24 : guest b0 | ||
64 | * r25 : vpd | ||
65 | * | ||
66 | */ | ||
67 | GLOBAL_ENTRY(kvm_vps_resume_normal) | ||
68 | movl r30 = PAL_VPS_RESUME_NORMAL | ||
69 | ;; | ||
70 | mov pr=r23,-2 | ||
71 | br.sptk.many kvm_vps_entry | ||
72 | END(kvm_vps_resume_normal) | ||
73 | |||
74 | /* | ||
75 | * Inputs: | ||
76 | * r23 : pr | ||
77 | * r24 : guest b0 | ||
78 | * r25 : vpd | ||
79 | * r17 : isr | ||
80 | */ | ||
81 | GLOBAL_ENTRY(kvm_vps_resume_handler) | ||
82 | movl r30 = PAL_VPS_RESUME_HANDLER | ||
83 | ;; | ||
84 | ld8 r27=[r25] | ||
85 | shr r17=r17,IA64_ISR_IR_BIT | ||
86 | ;; | ||
87 | dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE | ||
88 | mov pr=r23,-2 | ||
89 | br.sptk.many kvm_vps_entry | ||
90 | END(kvm_vps_resume_handler) | ||
91 | |||
23 | //mov r1=ar3 | 92 | //mov r1=ar3 |
24 | GLOBAL_ENTRY(kvm_asm_mov_from_ar) | 93 | GLOBAL_ENTRY(kvm_asm_mov_from_ar) |
25 | #ifndef ACCE_MOV_FROM_AR | 94 | #ifndef ACCE_MOV_FROM_AR |
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c index 5a33f7ed29a0..3417783ae164 100644 --- a/arch/ia64/kvm/process.c +++ b/arch/ia64/kvm/process.c | |||
@@ -962,9 +962,9 @@ static void kvm_do_resume_op(struct kvm_vcpu *vcpu) | |||
962 | void vmm_transition(struct kvm_vcpu *vcpu) | 962 | void vmm_transition(struct kvm_vcpu *vcpu) |
963 | { | 963 | { |
964 | ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, | 964 | ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd, |
965 | 0, 0, 0, 0, 0, 0); | 965 | 1, 0, 0, 0, 0, 0); |
966 | vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); | 966 | vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host); |
967 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, | 967 | ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd, |
968 | 0, 0, 0, 0, 0, 0); | 968 | 1, 0, 0, 0, 0, 0); |
969 | kvm_do_resume_op(vcpu); | 969 | kvm_do_resume_op(vcpu); |
970 | } | 970 | } |
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S index 3ee5f481c06d..c1d7251a1480 100644 --- a/arch/ia64/kvm/vmm_ivt.S +++ b/arch/ia64/kvm/vmm_ivt.S | |||
@@ -1261,11 +1261,6 @@ kvm_rse_clear_invalid: | |||
1261 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1261 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1262 | ;; | 1262 | ;; |
1263 | ld8 r19=[r19] //vpsr | 1263 | ld8 r19=[r19] //vpsr |
1264 | adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21 | ||
1265 | ;; | ||
1266 | ld8 r20=[r20] | ||
1267 | ;; | ||
1268 | //vsa_sync_write_start | ||
1269 | mov r25=r18 | 1264 | mov r25=r18 |
1270 | adds r16= VMM_VCPU_GP_OFFSET,r21 | 1265 | adds r16= VMM_VCPU_GP_OFFSET,r21 |
1271 | ;; | 1266 | ;; |
@@ -1274,10 +1269,7 @@ kvm_rse_clear_invalid: | |||
1274 | ;; | 1269 | ;; |
1275 | add r24=r24,r16 | 1270 | add r24=r24,r16 |
1276 | ;; | 1271 | ;; |
1277 | add r16=PAL_VPS_SYNC_WRITE,r20 | 1272 | br.sptk.many kvm_vps_sync_write // call the service |
1278 | ;; | ||
1279 | mov b0=r16 | ||
1280 | br.cond.sptk b0 // call the service | ||
1281 | ;; | 1273 | ;; |
1282 | END(ia64_leave_hypervisor) | 1274 | END(ia64_leave_hypervisor) |
1283 | // fall through | 1275 | // fall through |
@@ -1288,28 +1280,15 @@ GLOBAL_ENTRY(ia64_vmm_entry) | |||
1288 | * r17:cr.isr | 1280 | * r17:cr.isr |
1289 | * r18:vpd | 1281 | * r18:vpd |
1290 | * r19:vpsr | 1282 | * r19:vpsr |
1291 | * r20:__vsa_base | ||
1292 | * r22:b0 | 1283 | * r22:b0 |
1293 | * r23:predicate | 1284 | * r23:predicate |
1294 | */ | 1285 | */ |
1295 | mov r24=r22 | 1286 | mov r24=r22 |
1296 | mov r25=r18 | 1287 | mov r25=r18 |
1297 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic | 1288 | tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic |
1289 | (p1) br.cond.sptk.few kvm_vps_resume_normal | ||
1290 | (p2) br.cond.sptk.many kvm_vps_resume_handler | ||
1298 | ;; | 1291 | ;; |
1299 | (p1) add r29=PAL_VPS_RESUME_NORMAL,r20 | ||
1300 | (p1) br.sptk.many ia64_vmm_entry_out | ||
1301 | ;; | ||
1302 | tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT //p1=cr.isr.ir | ||
1303 | ;; | ||
1304 | (p1) add r29=PAL_VPS_RESUME_NORMAL,r20 | ||
1305 | (p2) add r29=PAL_VPS_RESUME_HANDLER,r20 | ||
1306 | (p2) ld8 r26=[r25] | ||
1307 | ;; | ||
1308 | ia64_vmm_entry_out: | ||
1309 | mov pr=r23,-2 | ||
1310 | mov b0=r29 | ||
1311 | ;; | ||
1312 | br.cond.sptk b0 // call pal service | ||
1313 | END(ia64_vmm_entry) | 1292 | END(ia64_vmm_entry) |
1314 | 1293 | ||
1315 | 1294 | ||
@@ -1376,6 +1355,9 @@ GLOBAL_ENTRY(vmm_reset_entry) | |||
1376 | //set up ipsr, iip, vpd.vpsr, dcr | 1355 | //set up ipsr, iip, vpd.vpsr, dcr |
1377 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 | 1356 | // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1 |
1378 | // For DCR: all bits 0 | 1357 | // For DCR: all bits 0 |
1358 | bsw.0 | ||
1359 | ;; | ||
1360 | mov r21 =r13 | ||
1379 | adds r14=-VMM_PT_REGS_SIZE, r12 | 1361 | adds r14=-VMM_PT_REGS_SIZE, r12 |
1380 | ;; | 1362 | ;; |
1381 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 | 1363 | movl r6=0x501008826000 // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1 |
@@ -1387,12 +1369,6 @@ GLOBAL_ENTRY(vmm_reset_entry) | |||
1387 | ;; | 1369 | ;; |
1388 | srlz.i | 1370 | srlz.i |
1389 | ;; | 1371 | ;; |
1390 | bsw.0 | ||
1391 | ;; | ||
1392 | mov r21 =r13 | ||
1393 | ;; | ||
1394 | bsw.1 | ||
1395 | ;; | ||
1396 | mov ar.rsc = 0 | 1372 | mov ar.rsc = 0 |
1397 | ;; | 1373 | ;; |
1398 | flushrs | 1374 | flushrs |
@@ -1406,12 +1382,9 @@ GLOBAL_ENTRY(vmm_reset_entry) | |||
1406 | ld8 r1 = [r20] | 1382 | ld8 r1 = [r20] |
1407 | ;; | 1383 | ;; |
1408 | mov cr.iip=r4 | 1384 | mov cr.iip=r4 |
1409 | ;; | ||
1410 | adds r16=VMM_VPD_BASE_OFFSET,r13 | 1385 | adds r16=VMM_VPD_BASE_OFFSET,r13 |
1411 | adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13 | ||
1412 | ;; | 1386 | ;; |
1413 | ld8 r18=[r16] | 1387 | ld8 r18=[r16] |
1414 | ld8 r20=[r20] | ||
1415 | ;; | 1388 | ;; |
1416 | adds r19=VMM_VPD_VPSR_OFFSET,r18 | 1389 | adds r19=VMM_VPD_VPSR_OFFSET,r18 |
1417 | ;; | 1390 | ;; |