aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2006-12-13 03:33:45 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-13 12:05:46 -0500
commit5aff458e9c90df55d6badabd89a1a063a80d9768 (patch)
tree278b56e2f14128c4c73553d0037b385614421e9b /drivers/kvm
parentfd24dc4af6387d820159e3affdfb1e9d3bdce1f8 (diff)
[PATCH] KVM: Clean up AMD SVM debug registers load and unload
By letting gcc choose the temporary register for us, we lose arch dependency and some ugliness. Conceivably gcc will also generate marginally better code. Signed-off-by: Avi Kivity <avi@qumranet.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/svm.c51
1 files changed, 8 insertions, 43 deletions
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index a33a89c68138..d6042eed7a78 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -1345,53 +1345,18 @@ static void kvm_reput_irq(struct kvm_vcpu *vcpu)
1345 1345
1346static void save_db_regs(unsigned long *db_regs) 1346static void save_db_regs(unsigned long *db_regs)
1347{ 1347{
1348#ifdef __x86_64__ 1348 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1349 asm ("mov %%dr0, %%rax \n\t" 1349 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1350 "mov %%rax, %[dr0] \n\t" 1350 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1351 "mov %%dr1, %%rax \n\t" 1351 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
1352 "mov %%rax, %[dr1] \n\t"
1353 "mov %%dr2, %%rax \n\t"
1354 "mov %%rax, %[dr2] \n\t"
1355 "mov %%dr3, %%rax \n\t"
1356 "mov %%rax, %[dr3] \n\t"
1357 : [dr0] "=m"(db_regs[0]),
1358 [dr1] "=m"(db_regs[1]),
1359 [dr2] "=m"(db_regs[2]),
1360 [dr3] "=m"(db_regs[3])
1361 : : "rax");
1362#else
1363 asm ("mov %%dr0, %%eax \n\t"
1364 "mov %%eax, %[dr0] \n\t"
1365 "mov %%dr1, %%eax \n\t"
1366 "mov %%eax, %[dr1] \n\t"
1367 "mov %%dr2, %%eax \n\t"
1368 "mov %%eax, %[dr2] \n\t"
1369 "mov %%dr3, %%eax \n\t"
1370 "mov %%eax, %[dr3] \n\t"
1371 : [dr0] "=m"(db_regs[0]),
1372 [dr1] "=m"(db_regs[1]),
1373 [dr2] "=m"(db_regs[2]),
1374 [dr3] "=m"(db_regs[3])
1375 : : "eax");
1376#endif
1377} 1352}
1378 1353
1379static void load_db_regs(unsigned long *db_regs) 1354static void load_db_regs(unsigned long *db_regs)
1380{ 1355{
1381 asm volatile ("mov %[dr0], %%dr0 \n\t" 1356 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1382 "mov %[dr1], %%dr1 \n\t" 1357 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1383 "mov %[dr2], %%dr2 \n\t" 1358 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1384 "mov %[dr3], %%dr3 \n\t" 1359 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
1385 :
1386 : [dr0] "r"(db_regs[0]),
1387 [dr1] "r"(db_regs[1]),
1388 [dr2] "r"(db_regs[2]),
1389 [dr3] "r"(db_regs[3])
1390#ifdef __x86_64__
1391 : "rax");
1392#else
1393 : "eax");
1394#endif
1395} 1360}
1396 1361
1397static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1362static int svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)