diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /arch/powerpc/kvm | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/powerpc/kvm')
24 files changed, 4098 insertions, 237 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index f4d1b55aa70b..689a57c2ac80 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/kvm_host.h> | 20 | #include <linux/kvm_host.h> |
21 | #include <linux/slab.h> | ||
21 | #include <linux/err.h> | 22 | #include <linux/err.h> |
22 | 23 | ||
23 | #include <asm/reg.h> | 24 | #include <asm/reg.h> |
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 61af58fcecee..65ea083a5b27 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -65,13 +65,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
65 | */ | 65 | */ |
66 | switch (dcrn) { | 66 | switch (dcrn) { |
67 | case DCRN_CPR0_CONFIG_ADDR: | 67 | case DCRN_CPR0_CONFIG_ADDR: |
68 | vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr; | 68 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); |
69 | break; | 69 | break; |
70 | case DCRN_CPR0_CONFIG_DATA: | 70 | case DCRN_CPR0_CONFIG_DATA: |
71 | local_irq_disable(); | 71 | local_irq_disable(); |
72 | mtdcr(DCRN_CPR0_CONFIG_ADDR, | 72 | mtdcr(DCRN_CPR0_CONFIG_ADDR, |
73 | vcpu->arch.cpr0_cfgaddr); | 73 | vcpu->arch.cpr0_cfgaddr); |
74 | vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA); | 74 | kvmppc_set_gpr(vcpu, rt, |
75 | mfdcr(DCRN_CPR0_CONFIG_DATA)); | ||
75 | local_irq_enable(); | 76 | local_irq_enable(); |
76 | break; | 77 | break; |
77 | default: | 78 | default: |
@@ -93,11 +94,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
93 | /* emulate some access in kernel */ | 94 | /* emulate some access in kernel */ |
94 | switch (dcrn) { | 95 | switch (dcrn) { |
95 | case DCRN_CPR0_CONFIG_ADDR: | 96 | case DCRN_CPR0_CONFIG_ADDR: |
96 | vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs]; | 97 | vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); |
97 | break; | 98 | break; |
98 | default: | 99 | default: |
99 | run->dcr.dcrn = dcrn; | 100 | run->dcr.dcrn = dcrn; |
100 | run->dcr.data = vcpu->arch.gpr[rs]; | 101 | run->dcr.data = kvmppc_get_gpr(vcpu, rs); |
101 | run->dcr.is_write = 1; | 102 | run->dcr.is_write = 1; |
102 | vcpu->arch.dcr_needed = 1; | 103 | vcpu->arch.dcr_needed = 1; |
103 | kvmppc_account_exit(vcpu, DCR_EXITS); | 104 | kvmppc_account_exit(vcpu, DCR_EXITS); |
@@ -146,13 +147,13 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
146 | 147 | ||
147 | switch (sprn) { | 148 | switch (sprn) { |
148 | case SPRN_PID: | 149 | case SPRN_PID: |
149 | kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break; | 150 | kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; |
150 | case SPRN_MMUCR: | 151 | case SPRN_MMUCR: |
151 | vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break; | 152 | vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; |
152 | case SPRN_CCR0: | 153 | case SPRN_CCR0: |
153 | vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break; | 154 | vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; |
154 | case SPRN_CCR1: | 155 | case SPRN_CCR1: |
155 | vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break; | 156 | vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; |
156 | default: | 157 | default: |
157 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); | 158 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); |
158 | } | 159 | } |
@@ -167,13 +168,13 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
167 | 168 | ||
168 | switch (sprn) { | 169 | switch (sprn) { |
169 | case SPRN_PID: | 170 | case SPRN_PID: |
170 | vcpu->arch.gpr[rt] = vcpu->arch.pid; break; | 171 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; |
171 | case SPRN_MMUCR: | 172 | case SPRN_MMUCR: |
172 | vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break; | 173 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; |
173 | case SPRN_CCR0: | 174 | case SPRN_CCR0: |
174 | vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break; | 175 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; |
175 | case SPRN_CCR1: | 176 | case SPRN_CCR1: |
176 | vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break; | 177 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; |
177 | default: | 178 | default: |
178 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 179 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); |
179 | } | 180 | } |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index ff3cb63b8117..812312542e50 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -439,8 +439,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
439 | struct kvmppc_44x_tlbe *tlbe; | 439 | struct kvmppc_44x_tlbe *tlbe; |
440 | unsigned int gtlb_index; | 440 | unsigned int gtlb_index; |
441 | 441 | ||
442 | gtlb_index = vcpu->arch.gpr[ra]; | 442 | gtlb_index = kvmppc_get_gpr(vcpu, ra); |
443 | if (gtlb_index > KVM44x_GUEST_TLB_SIZE) { | 443 | if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { |
444 | printk("%s: index %d\n", __func__, gtlb_index); | 444 | printk("%s: index %d\n", __func__, gtlb_index); |
445 | kvmppc_dump_vcpu(vcpu); | 445 | kvmppc_dump_vcpu(vcpu); |
446 | return EMULATE_FAIL; | 446 | return EMULATE_FAIL; |
@@ -455,15 +455,15 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
455 | switch (ws) { | 455 | switch (ws) { |
456 | case PPC44x_TLB_PAGEID: | 456 | case PPC44x_TLB_PAGEID: |
457 | tlbe->tid = get_mmucr_stid(vcpu); | 457 | tlbe->tid = get_mmucr_stid(vcpu); |
458 | tlbe->word0 = vcpu->arch.gpr[rs]; | 458 | tlbe->word0 = kvmppc_get_gpr(vcpu, rs); |
459 | break; | 459 | break; |
460 | 460 | ||
461 | case PPC44x_TLB_XLAT: | 461 | case PPC44x_TLB_XLAT: |
462 | tlbe->word1 = vcpu->arch.gpr[rs]; | 462 | tlbe->word1 = kvmppc_get_gpr(vcpu, rs); |
463 | break; | 463 | break; |
464 | 464 | ||
465 | case PPC44x_TLB_ATTRIB: | 465 | case PPC44x_TLB_ATTRIB: |
466 | tlbe->word2 = vcpu->arch.gpr[rs]; | 466 | tlbe->word2 = kvmppc_get_gpr(vcpu, rs); |
467 | break; | 467 | break; |
468 | 468 | ||
469 | default: | 469 | default: |
@@ -500,18 +500,20 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) | |||
500 | unsigned int as = get_mmucr_sts(vcpu); | 500 | unsigned int as = get_mmucr_sts(vcpu); |
501 | unsigned int pid = get_mmucr_stid(vcpu); | 501 | unsigned int pid = get_mmucr_stid(vcpu); |
502 | 502 | ||
503 | ea = vcpu->arch.gpr[rb]; | 503 | ea = kvmppc_get_gpr(vcpu, rb); |
504 | if (ra) | 504 | if (ra) |
505 | ea += vcpu->arch.gpr[ra]; | 505 | ea += kvmppc_get_gpr(vcpu, ra); |
506 | 506 | ||
507 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); | 507 | gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); |
508 | if (rc) { | 508 | if (rc) { |
509 | u32 cr = kvmppc_get_cr(vcpu); | ||
510 | |||
509 | if (gtlb_index < 0) | 511 | if (gtlb_index < 0) |
510 | vcpu->arch.cr &= ~0x20000000; | 512 | kvmppc_set_cr(vcpu, cr & ~0x20000000); |
511 | else | 513 | else |
512 | vcpu->arch.cr |= 0x20000000; | 514 | kvmppc_set_cr(vcpu, cr | 0x20000000); |
513 | } | 515 | } |
514 | vcpu->arch.gpr[rt] = gtlb_index; | 516 | kvmppc_set_gpr(vcpu, rt, gtlb_index); |
515 | 517 | ||
516 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); | 518 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); |
517 | return EMULATE_DONE; | 519 | return EMULATE_DONE; |
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index c29926846613..60624cc9f4d4 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig | |||
@@ -20,6 +20,24 @@ config KVM | |||
20 | bool | 20 | bool |
21 | select PREEMPT_NOTIFIERS | 21 | select PREEMPT_NOTIFIERS |
22 | select ANON_INODES | 22 | select ANON_INODES |
23 | select KVM_MMIO | ||
24 | |||
25 | config KVM_BOOK3S_64_HANDLER | ||
26 | bool | ||
27 | |||
28 | config KVM_BOOK3S_64 | ||
29 | tristate "KVM support for PowerPC book3s_64 processors" | ||
30 | depends on EXPERIMENTAL && PPC64 | ||
31 | select KVM | ||
32 | select KVM_BOOK3S_64_HANDLER | ||
33 | ---help--- | ||
34 | Support running unmodified book3s_64 and book3s_32 guest kernels | ||
35 | in virtual machines on book3s_64 host processors. | ||
36 | |||
37 | This module provides access to the hardware capabilities through | ||
38 | a character device node named /dev/kvm. | ||
39 | |||
40 | If unsure, say N. | ||
23 | 41 | ||
24 | config KVM_440 | 42 | config KVM_440 |
25 | bool "KVM support for PowerPC 440 processors" | 43 | bool "KVM support for PowerPC 440 processors" |
@@ -36,7 +54,7 @@ config KVM_440 | |||
36 | 54 | ||
37 | config KVM_EXIT_TIMING | 55 | config KVM_EXIT_TIMING |
38 | bool "Detailed exit timing" | 56 | bool "Detailed exit timing" |
39 | depends on KVM | 57 | depends on KVM_440 || KVM_E500 |
40 | ---help--- | 58 | ---help--- |
41 | Calculate elapsed time for every exit/enter cycle. A per-vcpu | 59 | Calculate elapsed time for every exit/enter cycle. A per-vcpu |
42 | report is available in debugfs kvm/vm#_vcpu#_timing. | 60 | report is available in debugfs kvm/vm#_vcpu#_timing. |
@@ -58,6 +76,7 @@ config KVM_E500 | |||
58 | 76 | ||
59 | If unsure, say N. | 77 | If unsure, say N. |
60 | 78 | ||
79 | source drivers/vhost/Kconfig | ||
61 | source drivers/virtio/Kconfig | 80 | source drivers/virtio/Kconfig |
62 | 81 | ||
63 | endif # VIRTUALIZATION | 82 | endif # VIRTUALIZATION |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 37655fe19f2f..56484d652377 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -12,26 +12,45 @@ CFLAGS_44x_tlb.o := -I. | |||
12 | CFLAGS_e500_tlb.o := -I. | 12 | CFLAGS_e500_tlb.o := -I. |
13 | CFLAGS_emulate.o := -I. | 13 | CFLAGS_emulate.o := -I. |
14 | 14 | ||
15 | kvm-objs := $(common-objs-y) powerpc.o emulate.o | 15 | common-objs-y += powerpc.o emulate.o |
16 | obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o | 16 | obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o |
17 | obj-$(CONFIG_KVM) += kvm.o | 17 | obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_64_exports.o |
18 | 18 | ||
19 | AFLAGS_booke_interrupts.o := -I$(obj) | 19 | AFLAGS_booke_interrupts.o := -I$(obj) |
20 | 20 | ||
21 | kvm-440-objs := \ | 21 | kvm-440-objs := \ |
22 | $(common-objs-y) \ | ||
22 | booke.o \ | 23 | booke.o \ |
23 | booke_emulate.o \ | 24 | booke_emulate.o \ |
24 | booke_interrupts.o \ | 25 | booke_interrupts.o \ |
25 | 44x.o \ | 26 | 44x.o \ |
26 | 44x_tlb.o \ | 27 | 44x_tlb.o \ |
27 | 44x_emulate.o | 28 | 44x_emulate.o |
28 | obj-$(CONFIG_KVM_440) += kvm-440.o | 29 | kvm-objs-$(CONFIG_KVM_440) := $(kvm-440-objs) |
29 | 30 | ||
30 | kvm-e500-objs := \ | 31 | kvm-e500-objs := \ |
32 | $(common-objs-y) \ | ||
31 | booke.o \ | 33 | booke.o \ |
32 | booke_emulate.o \ | 34 | booke_emulate.o \ |
33 | booke_interrupts.o \ | 35 | booke_interrupts.o \ |
34 | e500.o \ | 36 | e500.o \ |
35 | e500_tlb.o \ | 37 | e500_tlb.o \ |
36 | e500_emulate.o | 38 | e500_emulate.o |
37 | obj-$(CONFIG_KVM_E500) += kvm-e500.o | 39 | kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs) |
40 | |||
41 | kvm-book3s_64-objs := \ | ||
42 | $(common-objs-y) \ | ||
43 | book3s.o \ | ||
44 | book3s_64_emulate.o \ | ||
45 | book3s_64_interrupts.o \ | ||
46 | book3s_64_mmu_host.o \ | ||
47 | book3s_64_mmu.o \ | ||
48 | book3s_32_mmu.o | ||
49 | kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-objs) | ||
50 | |||
51 | kvm-objs := $(kvm-objs-m) $(kvm-objs-y) | ||
52 | |||
53 | obj-$(CONFIG_KVM_440) += kvm.o | ||
54 | obj-$(CONFIG_KVM_E500) += kvm.o | ||
55 | obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o | ||
56 | |||
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c new file mode 100644 index 000000000000..604af29b71ed --- /dev/null +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -0,0 +1,1191 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Alexander Graf <agraf@suse.de> | ||
6 | * Kevin Wolf <mail@kevin-wolf.de> | ||
7 | * | ||
8 | * Description: | ||
9 | * This file is derived from arch/powerpc/kvm/44x.c, | ||
10 | * by Hollis Blanchard <hollisb@us.ibm.com>. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License, version 2, as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kvm_host.h> | ||
18 | #include <linux/err.h> | ||
19 | |||
20 | #include <asm/reg.h> | ||
21 | #include <asm/cputable.h> | ||
22 | #include <asm/cacheflush.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/kvm_ppc.h> | ||
27 | #include <asm/kvm_book3s.h> | ||
28 | #include <asm/mmu_context.h> | ||
29 | #include <linux/gfp.h> | ||
30 | #include <linux/sched.h> | ||
31 | #include <linux/vmalloc.h> | ||
32 | |||
33 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | ||
34 | |||
35 | /* #define EXIT_DEBUG */ | ||
36 | /* #define EXIT_DEBUG_SIMPLE */ | ||
37 | /* #define DEBUG_EXT */ | ||
38 | |||
39 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | ||
40 | |||
41 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
42 | { "exits", VCPU_STAT(sum_exits) }, | ||
43 | { "mmio", VCPU_STAT(mmio_exits) }, | ||
44 | { "sig", VCPU_STAT(signal_exits) }, | ||
45 | { "sysc", VCPU_STAT(syscall_exits) }, | ||
46 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | ||
47 | { "dec", VCPU_STAT(dec_exits) }, | ||
48 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | ||
49 | { "queue_intr", VCPU_STAT(queue_intr) }, | ||
50 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | ||
51 | { "pf_storage", VCPU_STAT(pf_storage) }, | ||
52 | { "sp_storage", VCPU_STAT(sp_storage) }, | ||
53 | { "pf_instruc", VCPU_STAT(pf_instruc) }, | ||
54 | { "sp_instruc", VCPU_STAT(sp_instruc) }, | ||
55 | { "ld", VCPU_STAT(ld) }, | ||
56 | { "ld_slow", VCPU_STAT(ld_slow) }, | ||
57 | { "st", VCPU_STAT(st) }, | ||
58 | { "st_slow", VCPU_STAT(st_slow) }, | ||
59 | { NULL } | ||
60 | }; | ||
61 | |||
62 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | ||
63 | { | ||
64 | } | ||
65 | |||
66 | void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | ||
67 | { | ||
68 | } | ||
69 | |||
70 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
71 | { | ||
72 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); | ||
73 | memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, | ||
74 | sizeof(get_paca()->shadow_vcpu)); | ||
75 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; | ||
76 | } | ||
77 | |||
78 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | ||
79 | { | ||
80 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); | ||
81 | memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, | ||
82 | sizeof(get_paca()->shadow_vcpu)); | ||
83 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; | ||
84 | |||
85 | kvmppc_giveup_ext(vcpu, MSR_FP); | ||
86 | kvmppc_giveup_ext(vcpu, MSR_VEC); | ||
87 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
88 | } | ||
89 | |||
90 | #if defined(EXIT_DEBUG) | ||
91 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | ||
92 | { | ||
93 | u64 jd = mftb() - vcpu->arch.dec_jiffies; | ||
94 | return vcpu->arch.dec - jd; | ||
95 | } | ||
96 | #endif | ||
97 | |||
98 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | ||
99 | { | ||
100 | vcpu->arch.shadow_msr = vcpu->arch.msr; | ||
101 | /* Guest MSR values */ | ||
102 | vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | | ||
103 | MSR_BE | MSR_DE; | ||
104 | /* Process MSR values */ | ||
105 | vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | | ||
106 | MSR_EE; | ||
107 | /* External providers the guest reserved */ | ||
108 | vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); | ||
109 | /* 64-bit Process MSR values */ | ||
110 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
111 | vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; | ||
112 | #endif | ||
113 | } | ||
114 | |||
115 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | ||
116 | { | ||
117 | ulong old_msr = vcpu->arch.msr; | ||
118 | |||
119 | #ifdef EXIT_DEBUG | ||
120 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | ||
121 | #endif | ||
122 | |||
123 | msr &= to_book3s(vcpu)->msr_mask; | ||
124 | vcpu->arch.msr = msr; | ||
125 | kvmppc_recalc_shadow_msr(vcpu); | ||
126 | |||
127 | if (msr & (MSR_WE|MSR_POW)) { | ||
128 | if (!vcpu->arch.pending_exceptions) { | ||
129 | kvm_vcpu_block(vcpu); | ||
130 | vcpu->stat.halt_wakeup++; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || | ||
135 | (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { | ||
136 | kvmppc_mmu_flush_segments(vcpu); | ||
137 | kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | ||
142 | { | ||
143 | vcpu->arch.srr0 = vcpu->arch.pc; | ||
144 | vcpu->arch.srr1 = vcpu->arch.msr | flags; | ||
145 | vcpu->arch.pc = to_book3s(vcpu)->hior + vec; | ||
146 | vcpu->arch.mmu.reset_msr(vcpu); | ||
147 | } | ||
148 | |||
149 | static int kvmppc_book3s_vec2irqprio(unsigned int vec) | ||
150 | { | ||
151 | unsigned int prio; | ||
152 | |||
153 | switch (vec) { | ||
154 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; | ||
155 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; | ||
156 | case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; | ||
157 | case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; | ||
158 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; | ||
159 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; | ||
160 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; | ||
161 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; | ||
162 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; | ||
163 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; | ||
164 | case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; | ||
165 | case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; | ||
166 | case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; | ||
167 | case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; | ||
168 | case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; | ||
169 | default: prio = BOOK3S_IRQPRIO_MAX; break; | ||
170 | } | ||
171 | |||
172 | return prio; | ||
173 | } | ||
174 | |||
175 | static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, | ||
176 | unsigned int vec) | ||
177 | { | ||
178 | clear_bit(kvmppc_book3s_vec2irqprio(vec), | ||
179 | &vcpu->arch.pending_exceptions); | ||
180 | } | ||
181 | |||
182 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | ||
183 | { | ||
184 | vcpu->stat.queue_intr++; | ||
185 | |||
186 | set_bit(kvmppc_book3s_vec2irqprio(vec), | ||
187 | &vcpu->arch.pending_exceptions); | ||
188 | #ifdef EXIT_DEBUG | ||
189 | printk(KERN_INFO "Queueing interrupt %x\n", vec); | ||
190 | #endif | ||
191 | } | ||
192 | |||
193 | |||
194 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) | ||
195 | { | ||
196 | to_book3s(vcpu)->prog_flags = flags; | ||
197 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); | ||
198 | } | ||
199 | |||
200 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | ||
201 | { | ||
202 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | ||
203 | } | ||
204 | |||
205 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | ||
206 | { | ||
207 | return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); | ||
208 | } | ||
209 | |||
210 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | ||
211 | { | ||
212 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | ||
213 | } | ||
214 | |||
215 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | ||
216 | struct kvm_interrupt *irq) | ||
217 | { | ||
218 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | ||
219 | } | ||
220 | |||
221 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | ||
222 | { | ||
223 | int deliver = 1; | ||
224 | int vec = 0; | ||
225 | ulong flags = 0ULL; | ||
226 | |||
227 | switch (priority) { | ||
228 | case BOOK3S_IRQPRIO_DECREMENTER: | ||
229 | deliver = vcpu->arch.msr & MSR_EE; | ||
230 | vec = BOOK3S_INTERRUPT_DECREMENTER; | ||
231 | break; | ||
232 | case BOOK3S_IRQPRIO_EXTERNAL: | ||
233 | deliver = vcpu->arch.msr & MSR_EE; | ||
234 | vec = BOOK3S_INTERRUPT_EXTERNAL; | ||
235 | break; | ||
236 | case BOOK3S_IRQPRIO_SYSTEM_RESET: | ||
237 | vec = BOOK3S_INTERRUPT_SYSTEM_RESET; | ||
238 | break; | ||
239 | case BOOK3S_IRQPRIO_MACHINE_CHECK: | ||
240 | vec = BOOK3S_INTERRUPT_MACHINE_CHECK; | ||
241 | break; | ||
242 | case BOOK3S_IRQPRIO_DATA_STORAGE: | ||
243 | vec = BOOK3S_INTERRUPT_DATA_STORAGE; | ||
244 | break; | ||
245 | case BOOK3S_IRQPRIO_INST_STORAGE: | ||
246 | vec = BOOK3S_INTERRUPT_INST_STORAGE; | ||
247 | break; | ||
248 | case BOOK3S_IRQPRIO_DATA_SEGMENT: | ||
249 | vec = BOOK3S_INTERRUPT_DATA_SEGMENT; | ||
250 | break; | ||
251 | case BOOK3S_IRQPRIO_INST_SEGMENT: | ||
252 | vec = BOOK3S_INTERRUPT_INST_SEGMENT; | ||
253 | break; | ||
254 | case BOOK3S_IRQPRIO_ALIGNMENT: | ||
255 | vec = BOOK3S_INTERRUPT_ALIGNMENT; | ||
256 | break; | ||
257 | case BOOK3S_IRQPRIO_PROGRAM: | ||
258 | vec = BOOK3S_INTERRUPT_PROGRAM; | ||
259 | flags = to_book3s(vcpu)->prog_flags; | ||
260 | break; | ||
261 | case BOOK3S_IRQPRIO_VSX: | ||
262 | vec = BOOK3S_INTERRUPT_VSX; | ||
263 | break; | ||
264 | case BOOK3S_IRQPRIO_ALTIVEC: | ||
265 | vec = BOOK3S_INTERRUPT_ALTIVEC; | ||
266 | break; | ||
267 | case BOOK3S_IRQPRIO_FP_UNAVAIL: | ||
268 | vec = BOOK3S_INTERRUPT_FP_UNAVAIL; | ||
269 | break; | ||
270 | case BOOK3S_IRQPRIO_SYSCALL: | ||
271 | vec = BOOK3S_INTERRUPT_SYSCALL; | ||
272 | break; | ||
273 | case BOOK3S_IRQPRIO_DEBUG: | ||
274 | vec = BOOK3S_INTERRUPT_TRACE; | ||
275 | break; | ||
276 | case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: | ||
277 | vec = BOOK3S_INTERRUPT_PERFMON; | ||
278 | break; | ||
279 | default: | ||
280 | deliver = 0; | ||
281 | printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); | ||
282 | break; | ||
283 | } | ||
284 | |||
285 | #if 0 | ||
286 | printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); | ||
287 | #endif | ||
288 | |||
289 | if (deliver) | ||
290 | kvmppc_inject_interrupt(vcpu, vec, flags); | ||
291 | |||
292 | return deliver; | ||
293 | } | ||
294 | |||
295 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | ||
296 | { | ||
297 | unsigned long *pending = &vcpu->arch.pending_exceptions; | ||
298 | unsigned int priority; | ||
299 | |||
300 | #ifdef EXIT_DEBUG | ||
301 | if (vcpu->arch.pending_exceptions) | ||
302 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); | ||
303 | #endif | ||
304 | priority = __ffs(*pending); | ||
305 | while (priority <= (sizeof(unsigned int) * 8)) { | ||
306 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && | ||
307 | (priority != BOOK3S_IRQPRIO_DECREMENTER)) { | ||
308 | /* DEC interrupts get cleared by mtdec */ | ||
309 | clear_bit(priority, &vcpu->arch.pending_exceptions); | ||
310 | break; | ||
311 | } | ||
312 | |||
313 | priority = find_next_bit(pending, | ||
314 | BITS_PER_BYTE * sizeof(*pending), | ||
315 | priority + 1); | ||
316 | } | ||
317 | } | ||
318 | |||
319 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | ||
320 | { | ||
321 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | ||
322 | vcpu->arch.pvr = pvr; | ||
323 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | ||
324 | kvmppc_mmu_book3s_64_init(vcpu); | ||
325 | to_book3s(vcpu)->hior = 0xfff00000; | ||
326 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; | ||
327 | } else { | ||
328 | kvmppc_mmu_book3s_32_init(vcpu); | ||
329 | to_book3s(vcpu)->hior = 0; | ||
330 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; | ||
331 | } | ||
332 | |||
333 | /* If we are in hypervisor level on 970, we can tell the CPU to | ||
334 | * treat DCBZ as 32 bytes store */ | ||
335 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | ||
336 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | ||
337 | !strcmp(cur_cpu_spec->platform, "ppc970")) | ||
338 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | ||
339 | |||
340 | } | ||
341 | |||
342 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | ||
343 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | ||
344 | * emulate 32 bytes dcbz length. | ||
345 | * | ||
346 | * The Book3s_64 inventors also realized this case and implemented a special bit | ||
347 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | ||
348 | * | ||
349 | * My approach here is to patch the dcbz instruction on executing pages. | ||
350 | */ | ||
351 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | ||
352 | { | ||
353 | bool touched = false; | ||
354 | hva_t hpage; | ||
355 | u32 *page; | ||
356 | int i; | ||
357 | |||
358 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | ||
359 | if (kvm_is_error_hva(hpage)) | ||
360 | return; | ||
361 | |||
362 | hpage |= pte->raddr & ~PAGE_MASK; | ||
363 | hpage &= ~0xFFFULL; | ||
364 | |||
365 | page = vmalloc(HW_PAGE_SIZE); | ||
366 | |||
367 | if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE)) | ||
368 | goto out; | ||
369 | |||
370 | for (i=0; i < HW_PAGE_SIZE / 4; i++) | ||
371 | if ((page[i] & 0xff0007ff) == INS_DCBZ) { | ||
372 | page[i] &= 0xfffffff7; // reserved instruction, so we trap | ||
373 | touched = true; | ||
374 | } | ||
375 | |||
376 | if (touched) | ||
377 | copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE); | ||
378 | |||
379 | out: | ||
380 | vfree(page); | ||
381 | } | ||
382 | |||
383 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | ||
384 | struct kvmppc_pte *pte) | ||
385 | { | ||
386 | int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); | ||
387 | int r; | ||
388 | |||
389 | if (relocated) { | ||
390 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); | ||
391 | } else { | ||
392 | pte->eaddr = eaddr; | ||
393 | pte->raddr = eaddr & 0xffffffff; | ||
394 | pte->vpage = eaddr >> 12; | ||
395 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | ||
396 | case 0: | ||
397 | pte->vpage |= VSID_REAL; | ||
398 | case MSR_DR: | ||
399 | pte->vpage |= VSID_REAL_DR; | ||
400 | case MSR_IR: | ||
401 | pte->vpage |= VSID_REAL_IR; | ||
402 | } | ||
403 | pte->may_read = true; | ||
404 | pte->may_write = true; | ||
405 | pte->may_execute = true; | ||
406 | r = 0; | ||
407 | } | ||
408 | |||
409 | return r; | ||
410 | } | ||
411 | |||
412 | static hva_t kvmppc_bad_hva(void) | ||
413 | { | ||
414 | return PAGE_OFFSET; | ||
415 | } | ||
416 | |||
417 | static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, | ||
418 | bool read) | ||
419 | { | ||
420 | hva_t hpage; | ||
421 | |||
422 | if (read && !pte->may_read) | ||
423 | goto err; | ||
424 | |||
425 | if (!read && !pte->may_write) | ||
426 | goto err; | ||
427 | |||
428 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | ||
429 | if (kvm_is_error_hva(hpage)) | ||
430 | goto err; | ||
431 | |||
432 | return hpage | (pte->raddr & ~PAGE_MASK); | ||
433 | err: | ||
434 | return kvmppc_bad_hva(); | ||
435 | } | ||
436 | |||
437 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr) | ||
438 | { | ||
439 | struct kvmppc_pte pte; | ||
440 | hva_t hva = eaddr; | ||
441 | |||
442 | vcpu->stat.st++; | ||
443 | |||
444 | if (kvmppc_xlate(vcpu, eaddr, false, &pte)) | ||
445 | goto err; | ||
446 | |||
447 | hva = kvmppc_pte_to_hva(vcpu, &pte, false); | ||
448 | if (kvm_is_error_hva(hva)) | ||
449 | goto err; | ||
450 | |||
451 | if (copy_to_user((void __user *)hva, ptr, size)) { | ||
452 | printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva); | ||
453 | goto err; | ||
454 | } | ||
455 | |||
456 | return 0; | ||
457 | |||
458 | err: | ||
459 | return -ENOENT; | ||
460 | } | ||
461 | |||
462 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, | ||
463 | bool data) | ||
464 | { | ||
465 | struct kvmppc_pte pte; | ||
466 | hva_t hva = eaddr; | ||
467 | |||
468 | vcpu->stat.ld++; | ||
469 | |||
470 | if (kvmppc_xlate(vcpu, eaddr, data, &pte)) | ||
471 | goto err; | ||
472 | |||
473 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); | ||
474 | if (kvm_is_error_hva(hva)) | ||
475 | goto err; | ||
476 | |||
477 | if (copy_from_user(ptr, (void __user *)hva, size)) { | ||
478 | printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); | ||
479 | goto err; | ||
480 | } | ||
481 | |||
482 | return 0; | ||
483 | |||
484 | err: | ||
485 | return -ENOENT; | ||
486 | } | ||
487 | |||
488 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | ||
489 | { | ||
490 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | ||
491 | } | ||
492 | |||
493 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
494 | ulong eaddr, int vec) | ||
495 | { | ||
496 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | ||
497 | int r = RESUME_GUEST; | ||
498 | int relocated; | ||
499 | int page_found = 0; | ||
500 | struct kvmppc_pte pte; | ||
501 | bool is_mmio = false; | ||
502 | |||
503 | if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { | ||
504 | relocated = (vcpu->arch.msr & MSR_DR); | ||
505 | } else { | ||
506 | relocated = (vcpu->arch.msr & MSR_IR); | ||
507 | } | ||
508 | |||
509 | /* Resolve real address if translation turned on */ | ||
510 | if (relocated) { | ||
511 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | ||
512 | } else { | ||
513 | pte.may_execute = true; | ||
514 | pte.may_read = true; | ||
515 | pte.may_write = true; | ||
516 | pte.raddr = eaddr & 0xffffffff; | ||
517 | pte.eaddr = eaddr; | ||
518 | pte.vpage = eaddr >> 12; | ||
519 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | ||
520 | case 0: | ||
521 | pte.vpage |= VSID_REAL; | ||
522 | case MSR_DR: | ||
523 | pte.vpage |= VSID_REAL_DR; | ||
524 | case MSR_IR: | ||
525 | pte.vpage |= VSID_REAL_IR; | ||
526 | } | ||
527 | } | ||
528 | |||
529 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | ||
530 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | ||
531 | /* | ||
532 | * If we do the dcbz hack, we have to NX on every execution, | ||
533 | * so we can patch the executing code. This renders our guest | ||
534 | * NX-less. | ||
535 | */ | ||
536 | pte.may_execute = !data; | ||
537 | } | ||
538 | |||
539 | if (page_found == -ENOENT) { | ||
540 | /* Page not found in guest PTE entries */ | ||
541 | vcpu->arch.dear = vcpu->arch.fault_dear; | ||
542 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | ||
543 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); | ||
544 | kvmppc_book3s_queue_irqprio(vcpu, vec); | ||
545 | } else if (page_found == -EPERM) { | ||
546 | /* Storage protection */ | ||
547 | vcpu->arch.dear = vcpu->arch.fault_dear; | ||
548 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; | ||
549 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | ||
550 | vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); | ||
551 | kvmppc_book3s_queue_irqprio(vcpu, vec); | ||
552 | } else if (page_found == -EINVAL) { | ||
553 | /* Page not found in guest SLB */ | ||
554 | vcpu->arch.dear = vcpu->arch.fault_dear; | ||
555 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | ||
556 | } else if (!is_mmio && | ||
557 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | ||
558 | /* The guest's PTE is not mapped yet. Map on the host */ | ||
559 | kvmppc_mmu_map_page(vcpu, &pte); | ||
560 | if (data) | ||
561 | vcpu->stat.sp_storage++; | ||
562 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | ||
563 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | ||
564 | kvmppc_patch_dcbz(vcpu, &pte); | ||
565 | } else { | ||
566 | /* MMIO */ | ||
567 | vcpu->stat.mmio_exits++; | ||
568 | vcpu->arch.paddr_accessed = pte.raddr; | ||
569 | r = kvmppc_emulate_mmio(run, vcpu); | ||
570 | if ( r == RESUME_HOST_NV ) | ||
571 | r = RESUME_HOST; | ||
572 | } | ||
573 | |||
574 | return r; | ||
575 | } | ||
576 | |||
577 | static inline int get_fpr_index(int i) | ||
578 | { | ||
579 | #ifdef CONFIG_VSX | ||
580 | i *= 2; | ||
581 | #endif | ||
582 | return i; | ||
583 | } | ||
584 | |||
585 | /* Give up external provider (FPU, Altivec, VSX) */ | ||
586 | static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | ||
587 | { | ||
588 | struct thread_struct *t = ¤t->thread; | ||
589 | u64 *vcpu_fpr = vcpu->arch.fpr; | ||
590 | u64 *vcpu_vsx = vcpu->arch.vsr; | ||
591 | u64 *thread_fpr = (u64*)t->fpr; | ||
592 | int i; | ||
593 | |||
594 | if (!(vcpu->arch.guest_owned_ext & msr)) | ||
595 | return; | ||
596 | |||
597 | #ifdef DEBUG_EXT | ||
598 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | ||
599 | #endif | ||
600 | |||
601 | switch (msr) { | ||
602 | case MSR_FP: | ||
603 | giveup_fpu(current); | ||
604 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | ||
605 | vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; | ||
606 | |||
607 | vcpu->arch.fpscr = t->fpscr.val; | ||
608 | break; | ||
609 | case MSR_VEC: | ||
610 | #ifdef CONFIG_ALTIVEC | ||
611 | giveup_altivec(current); | ||
612 | memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); | ||
613 | vcpu->arch.vscr = t->vscr; | ||
614 | #endif | ||
615 | break; | ||
616 | case MSR_VSX: | ||
617 | #ifdef CONFIG_VSX | ||
618 | __giveup_vsx(current); | ||
619 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | ||
620 | vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; | ||
621 | #endif | ||
622 | break; | ||
623 | default: | ||
624 | BUG(); | ||
625 | } | ||
626 | |||
627 | vcpu->arch.guest_owned_ext &= ~msr; | ||
628 | current->thread.regs->msr &= ~msr; | ||
629 | kvmppc_recalc_shadow_msr(vcpu); | ||
630 | } | ||
631 | |||
632 | /* Handle external providers (FPU, Altivec, VSX) */ | ||
633 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | ||
634 | ulong msr) | ||
635 | { | ||
636 | struct thread_struct *t = ¤t->thread; | ||
637 | u64 *vcpu_fpr = vcpu->arch.fpr; | ||
638 | u64 *vcpu_vsx = vcpu->arch.vsr; | ||
639 | u64 *thread_fpr = (u64*)t->fpr; | ||
640 | int i; | ||
641 | |||
642 | if (!(vcpu->arch.msr & msr)) { | ||
643 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
644 | return RESUME_GUEST; | ||
645 | } | ||
646 | |||
647 | #ifdef DEBUG_EXT | ||
648 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | ||
649 | #endif | ||
650 | |||
651 | current->thread.regs->msr |= msr; | ||
652 | |||
653 | switch (msr) { | ||
654 | case MSR_FP: | ||
655 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) | ||
656 | thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; | ||
657 | |||
658 | t->fpscr.val = vcpu->arch.fpscr; | ||
659 | t->fpexc_mode = 0; | ||
660 | kvmppc_load_up_fpu(); | ||
661 | break; | ||
662 | case MSR_VEC: | ||
663 | #ifdef CONFIG_ALTIVEC | ||
664 | memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); | ||
665 | t->vscr = vcpu->arch.vscr; | ||
666 | t->vrsave = -1; | ||
667 | kvmppc_load_up_altivec(); | ||
668 | #endif | ||
669 | break; | ||
670 | case MSR_VSX: | ||
671 | #ifdef CONFIG_VSX | ||
672 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) | ||
673 | thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; | ||
674 | kvmppc_load_up_vsx(); | ||
675 | #endif | ||
676 | break; | ||
677 | default: | ||
678 | BUG(); | ||
679 | } | ||
680 | |||
681 | vcpu->arch.guest_owned_ext |= msr; | ||
682 | |||
683 | kvmppc_recalc_shadow_msr(vcpu); | ||
684 | |||
685 | return RESUME_GUEST; | ||
686 | } | ||
687 | |||
688 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
689 | unsigned int exit_nr) | ||
690 | { | ||
691 | int r = RESUME_HOST; | ||
692 | |||
693 | vcpu->stat.sum_exits++; | ||
694 | |||
695 | run->exit_reason = KVM_EXIT_UNKNOWN; | ||
696 | run->ready_for_interrupt_injection = 1; | ||
697 | #ifdef EXIT_DEBUG | ||
698 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", | ||
699 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | ||
700 | kvmppc_get_dec(vcpu), vcpu->arch.msr); | ||
701 | #elif defined (EXIT_DEBUG_SIMPLE) | ||
702 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) | ||
703 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", | ||
704 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | ||
705 | vcpu->arch.msr); | ||
706 | #endif | ||
707 | kvm_resched(vcpu); | ||
708 | switch (exit_nr) { | ||
709 | case BOOK3S_INTERRUPT_INST_STORAGE: | ||
710 | vcpu->stat.pf_instruc++; | ||
711 | /* only care about PTEG not found errors, but leave NX alone */ | ||
712 | if (vcpu->arch.shadow_srr1 & 0x40000000) { | ||
713 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); | ||
714 | vcpu->stat.sp_instruc++; | ||
715 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | ||
716 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | ||
717 | /* | ||
718 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | ||
719 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | ||
720 | * that no guest that needs the dcbz hack does NX. | ||
721 | */ | ||
722 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | ||
723 | } else { | ||
724 | vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; | ||
725 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
726 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | ||
727 | r = RESUME_GUEST; | ||
728 | } | ||
729 | break; | ||
730 | case BOOK3S_INTERRUPT_DATA_STORAGE: | ||
731 | vcpu->stat.pf_storage++; | ||
732 | /* The only case we need to handle is missing shadow PTEs */ | ||
733 | if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { | ||
734 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); | ||
735 | } else { | ||
736 | vcpu->arch.dear = vcpu->arch.fault_dear; | ||
737 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | ||
738 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
739 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); | ||
740 | r = RESUME_GUEST; | ||
741 | } | ||
742 | break; | ||
743 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | ||
744 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { | ||
745 | vcpu->arch.dear = vcpu->arch.fault_dear; | ||
746 | kvmppc_book3s_queue_irqprio(vcpu, | ||
747 | BOOK3S_INTERRUPT_DATA_SEGMENT); | ||
748 | } | ||
749 | r = RESUME_GUEST; | ||
750 | break; | ||
751 | case BOOK3S_INTERRUPT_INST_SEGMENT: | ||
752 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { | ||
753 | kvmppc_book3s_queue_irqprio(vcpu, | ||
754 | BOOK3S_INTERRUPT_INST_SEGMENT); | ||
755 | } | ||
756 | r = RESUME_GUEST; | ||
757 | break; | ||
758 | /* We're good on these - the host merely wanted to get our attention */ | ||
759 | case BOOK3S_INTERRUPT_DECREMENTER: | ||
760 | vcpu->stat.dec_exits++; | ||
761 | r = RESUME_GUEST; | ||
762 | break; | ||
763 | case BOOK3S_INTERRUPT_EXTERNAL: | ||
764 | vcpu->stat.ext_intr_exits++; | ||
765 | r = RESUME_GUEST; | ||
766 | break; | ||
767 | case BOOK3S_INTERRUPT_PROGRAM: | ||
768 | { | ||
769 | enum emulation_result er; | ||
770 | ulong flags; | ||
771 | |||
772 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; | ||
773 | |||
774 | if (vcpu->arch.msr & MSR_PR) { | ||
775 | #ifdef EXIT_DEBUG | ||
776 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); | ||
777 | #endif | ||
778 | if ((vcpu->arch.last_inst & 0xff0007ff) != | ||
779 | (INS_DCBZ & 0xfffffff7)) { | ||
780 | kvmppc_core_queue_program(vcpu, flags); | ||
781 | r = RESUME_GUEST; | ||
782 | break; | ||
783 | } | ||
784 | } | ||
785 | |||
786 | vcpu->stat.emulated_inst_exits++; | ||
787 | er = kvmppc_emulate_instruction(run, vcpu); | ||
788 | switch (er) { | ||
789 | case EMULATE_DONE: | ||
790 | r = RESUME_GUEST_NV; | ||
791 | break; | ||
792 | case EMULATE_FAIL: | ||
793 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | ||
794 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | ||
795 | kvmppc_core_queue_program(vcpu, flags); | ||
796 | r = RESUME_GUEST; | ||
797 | break; | ||
798 | default: | ||
799 | BUG(); | ||
800 | } | ||
801 | break; | ||
802 | } | ||
803 | case BOOK3S_INTERRUPT_SYSCALL: | ||
804 | #ifdef EXIT_DEBUG | ||
805 | printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); | ||
806 | #endif | ||
807 | vcpu->stat.syscall_exits++; | ||
808 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
809 | r = RESUME_GUEST; | ||
810 | break; | ||
811 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | ||
812 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP); | ||
813 | break; | ||
814 | case BOOK3S_INTERRUPT_ALTIVEC: | ||
815 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC); | ||
816 | break; | ||
817 | case BOOK3S_INTERRUPT_VSX: | ||
818 | r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); | ||
819 | break; | ||
820 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | ||
821 | case BOOK3S_INTERRUPT_TRACE: | ||
822 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | ||
823 | r = RESUME_GUEST; | ||
824 | break; | ||
825 | default: | ||
826 | /* Ugh - bork here! What did we get? */ | ||
827 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | ||
828 | exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); | ||
829 | r = RESUME_HOST; | ||
830 | BUG(); | ||
831 | break; | ||
832 | } | ||
833 | |||
834 | |||
835 | if (!(r & RESUME_HOST)) { | ||
836 | /* To avoid clobbering exit_reason, only check for signals if | ||
837 | * we aren't already exiting to userspace for some other | ||
838 | * reason. */ | ||
839 | if (signal_pending(current)) { | ||
840 | #ifdef EXIT_DEBUG | ||
841 | printk(KERN_EMERG "KVM: Going back to host\n"); | ||
842 | #endif | ||
843 | vcpu->stat.signal_exits++; | ||
844 | run->exit_reason = KVM_EXIT_INTR; | ||
845 | r = -EINTR; | ||
846 | } else { | ||
847 | /* In case an interrupt came in that was triggered | ||
848 | * from userspace (like DEC), we need to check what | ||
849 | * to inject now! */ | ||
850 | kvmppc_core_deliver_interrupts(vcpu); | ||
851 | } | ||
852 | } | ||
853 | |||
854 | #ifdef EXIT_DEBUG | ||
855 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); | ||
856 | #endif | ||
857 | |||
858 | return r; | ||
859 | } | ||
860 | |||
861 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | ||
862 | { | ||
863 | return 0; | ||
864 | } | ||
865 | |||
866 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
867 | { | ||
868 | int i; | ||
869 | |||
870 | regs->pc = vcpu->arch.pc; | ||
871 | regs->cr = kvmppc_get_cr(vcpu); | ||
872 | regs->ctr = vcpu->arch.ctr; | ||
873 | regs->lr = vcpu->arch.lr; | ||
874 | regs->xer = kvmppc_get_xer(vcpu); | ||
875 | regs->msr = vcpu->arch.msr; | ||
876 | regs->srr0 = vcpu->arch.srr0; | ||
877 | regs->srr1 = vcpu->arch.srr1; | ||
878 | regs->pid = vcpu->arch.pid; | ||
879 | regs->sprg0 = vcpu->arch.sprg0; | ||
880 | regs->sprg1 = vcpu->arch.sprg1; | ||
881 | regs->sprg2 = vcpu->arch.sprg2; | ||
882 | regs->sprg3 = vcpu->arch.sprg3; | ||
883 | regs->sprg5 = vcpu->arch.sprg4; | ||
884 | regs->sprg6 = vcpu->arch.sprg5; | ||
885 | regs->sprg7 = vcpu->arch.sprg6; | ||
886 | |||
887 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | ||
888 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | ||
889 | |||
890 | return 0; | ||
891 | } | ||
892 | |||
893 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
894 | { | ||
895 | int i; | ||
896 | |||
897 | vcpu->arch.pc = regs->pc; | ||
898 | kvmppc_set_cr(vcpu, regs->cr); | ||
899 | vcpu->arch.ctr = regs->ctr; | ||
900 | vcpu->arch.lr = regs->lr; | ||
901 | kvmppc_set_xer(vcpu, regs->xer); | ||
902 | kvmppc_set_msr(vcpu, regs->msr); | ||
903 | vcpu->arch.srr0 = regs->srr0; | ||
904 | vcpu->arch.srr1 = regs->srr1; | ||
905 | vcpu->arch.sprg0 = regs->sprg0; | ||
906 | vcpu->arch.sprg1 = regs->sprg1; | ||
907 | vcpu->arch.sprg2 = regs->sprg2; | ||
908 | vcpu->arch.sprg3 = regs->sprg3; | ||
909 | vcpu->arch.sprg5 = regs->sprg4; | ||
910 | vcpu->arch.sprg6 = regs->sprg5; | ||
911 | vcpu->arch.sprg7 = regs->sprg6; | ||
912 | |||
913 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | ||
914 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | ||
915 | |||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
920 | struct kvm_sregs *sregs) | ||
921 | { | ||
922 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | ||
923 | int i; | ||
924 | |||
925 | sregs->pvr = vcpu->arch.pvr; | ||
926 | |||
927 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | ||
928 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | ||
929 | for (i = 0; i < 64; i++) { | ||
930 | sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; | ||
931 | sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; | ||
932 | } | ||
933 | } else { | ||
934 | for (i = 0; i < 16; i++) { | ||
935 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | ||
936 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | ||
937 | } | ||
938 | for (i = 0; i < 8; i++) { | ||
939 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | ||
940 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | ||
941 | } | ||
942 | } | ||
943 | return 0; | ||
944 | } | ||
945 | |||
946 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
947 | struct kvm_sregs *sregs) | ||
948 | { | ||
949 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | ||
950 | int i; | ||
951 | |||
952 | kvmppc_set_pvr(vcpu, sregs->pvr); | ||
953 | |||
954 | vcpu3s->sdr1 = sregs->u.s.sdr1; | ||
955 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | ||
956 | for (i = 0; i < 64; i++) { | ||
957 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | ||
958 | sregs->u.s.ppc64.slb[i].slbe); | ||
959 | } | ||
960 | } else { | ||
961 | for (i = 0; i < 16; i++) { | ||
962 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | ||
963 | } | ||
964 | for (i = 0; i < 8; i++) { | ||
965 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | ||
966 | (u32)sregs->u.s.ppc32.ibat[i]); | ||
967 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | ||
968 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | ||
969 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | ||
970 | (u32)sregs->u.s.ppc32.dbat[i]); | ||
971 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | ||
972 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | ||
973 | } | ||
974 | } | ||
975 | |||
976 | /* Flush the MMU after messing with the segments */ | ||
977 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | ||
978 | return 0; | ||
979 | } | ||
980 | |||
981 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
982 | { | ||
983 | return -ENOTSUPP; | ||
984 | } | ||
985 | |||
986 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
987 | { | ||
988 | return -ENOTSUPP; | ||
989 | } | ||
990 | |||
991 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
992 | struct kvm_translation *tr) | ||
993 | { | ||
994 | return 0; | ||
995 | } | ||
996 | |||
997 | /* | ||
998 | * Get (and clear) the dirty memory log for a memory slot. | ||
999 | */ | ||
1000 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | ||
1001 | struct kvm_dirty_log *log) | ||
1002 | { | ||
1003 | struct kvm_memory_slot *memslot; | ||
1004 | struct kvm_vcpu *vcpu; | ||
1005 | ulong ga, ga_end; | ||
1006 | int is_dirty = 0; | ||
1007 | int r; | ||
1008 | unsigned long n; | ||
1009 | |||
1010 | mutex_lock(&kvm->slots_lock); | ||
1011 | |||
1012 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | ||
1013 | if (r) | ||
1014 | goto out; | ||
1015 | |||
1016 | /* If nothing is dirty, don't bother messing with page tables. */ | ||
1017 | if (is_dirty) { | ||
1018 | memslot = &kvm->memslots->memslots[log->slot]; | ||
1019 | |||
1020 | ga = memslot->base_gfn << PAGE_SHIFT; | ||
1021 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | ||
1022 | |||
1023 | kvm_for_each_vcpu(n, vcpu, kvm) | ||
1024 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | ||
1025 | |||
1026 | n = kvm_dirty_bitmap_bytes(memslot); | ||
1027 | memset(memslot->dirty_bitmap, 0, n); | ||
1028 | } | ||
1029 | |||
1030 | r = 0; | ||
1031 | out: | ||
1032 | mutex_unlock(&kvm->slots_lock); | ||
1033 | return r; | ||
1034 | } | ||
1035 | |||
1036 | int kvmppc_core_check_processor_compat(void) | ||
1037 | { | ||
1038 | return 0; | ||
1039 | } | ||
1040 | |||
1041 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | ||
1042 | { | ||
1043 | struct kvmppc_vcpu_book3s *vcpu_book3s; | ||
1044 | struct kvm_vcpu *vcpu; | ||
1045 | int err; | ||
1046 | |||
1047 | vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, | ||
1048 | get_order(sizeof(struct kvmppc_vcpu_book3s))); | ||
1049 | if (!vcpu_book3s) { | ||
1050 | err = -ENOMEM; | ||
1051 | goto out; | ||
1052 | } | ||
1053 | |||
1054 | vcpu = &vcpu_book3s->vcpu; | ||
1055 | err = kvm_vcpu_init(vcpu, kvm, id); | ||
1056 | if (err) | ||
1057 | goto free_vcpu; | ||
1058 | |||
1059 | vcpu->arch.host_retip = kvm_return_point; | ||
1060 | vcpu->arch.host_msr = mfmsr(); | ||
1061 | /* default to book3s_64 (970fx) */ | ||
1062 | vcpu->arch.pvr = 0x3C0301; | ||
1063 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | ||
1064 | vcpu_book3s->slb_nr = 64; | ||
1065 | |||
1066 | /* remember where some real-mode handlers are */ | ||
1067 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; | ||
1068 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; | ||
1069 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; | ||
1070 | vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; | ||
1071 | |||
1072 | vcpu->arch.shadow_msr = MSR_USER64; | ||
1073 | |||
1074 | err = __init_new_context(); | ||
1075 | if (err < 0) | ||
1076 | goto free_vcpu; | ||
1077 | vcpu_book3s->context_id = err; | ||
1078 | |||
1079 | vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1; | ||
1080 | vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS; | ||
1081 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; | ||
1082 | |||
1083 | return vcpu; | ||
1084 | |||
1085 | free_vcpu: | ||
1086 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | ||
1087 | out: | ||
1088 | return ERR_PTR(err); | ||
1089 | } | ||
1090 | |||
1091 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | ||
1092 | { | ||
1093 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
1094 | |||
1095 | __destroy_context(vcpu_book3s->context_id); | ||
1096 | kvm_vcpu_uninit(vcpu); | ||
1097 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | ||
1098 | } | ||
1099 | |||
1100 | extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | ||
1101 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | ||
1102 | { | ||
1103 | int ret; | ||
1104 | struct thread_struct ext_bkp; | ||
1105 | bool save_vec = current->thread.used_vr; | ||
1106 | bool save_vsx = current->thread.used_vsr; | ||
1107 | ulong ext_msr; | ||
1108 | |||
1109 | /* No need to go into the guest when all we do is going out */ | ||
1110 | if (signal_pending(current)) { | ||
1111 | kvm_run->exit_reason = KVM_EXIT_INTR; | ||
1112 | return -EINTR; | ||
1113 | } | ||
1114 | |||
1115 | /* Save FPU state in stack */ | ||
1116 | if (current->thread.regs->msr & MSR_FP) | ||
1117 | giveup_fpu(current); | ||
1118 | memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); | ||
1119 | ext_bkp.fpscr = current->thread.fpscr; | ||
1120 | ext_bkp.fpexc_mode = current->thread.fpexc_mode; | ||
1121 | |||
1122 | #ifdef CONFIG_ALTIVEC | ||
1123 | /* Save Altivec state in stack */ | ||
1124 | if (save_vec) { | ||
1125 | if (current->thread.regs->msr & MSR_VEC) | ||
1126 | giveup_altivec(current); | ||
1127 | memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); | ||
1128 | ext_bkp.vscr = current->thread.vscr; | ||
1129 | ext_bkp.vrsave = current->thread.vrsave; | ||
1130 | } | ||
1131 | ext_bkp.used_vr = current->thread.used_vr; | ||
1132 | #endif | ||
1133 | |||
1134 | #ifdef CONFIG_VSX | ||
1135 | /* Save VSX state in stack */ | ||
1136 | if (save_vsx && (current->thread.regs->msr & MSR_VSX)) | ||
1137 | __giveup_vsx(current); | ||
1138 | ext_bkp.used_vsr = current->thread.used_vsr; | ||
1139 | #endif | ||
1140 | |||
1141 | /* Remember the MSR with disabled extensions */ | ||
1142 | ext_msr = current->thread.regs->msr; | ||
1143 | |||
1144 | /* XXX we get called with irq disabled - change that! */ | ||
1145 | local_irq_enable(); | ||
1146 | |||
1147 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); | ||
1148 | |||
1149 | local_irq_disable(); | ||
1150 | |||
1151 | current->thread.regs->msr = ext_msr; | ||
1152 | |||
1153 | /* Make sure we save the guest FPU/Altivec/VSX state */ | ||
1154 | kvmppc_giveup_ext(vcpu, MSR_FP); | ||
1155 | kvmppc_giveup_ext(vcpu, MSR_VEC); | ||
1156 | kvmppc_giveup_ext(vcpu, MSR_VSX); | ||
1157 | |||
1158 | /* Restore FPU state from stack */ | ||
1159 | memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); | ||
1160 | current->thread.fpscr = ext_bkp.fpscr; | ||
1161 | current->thread.fpexc_mode = ext_bkp.fpexc_mode; | ||
1162 | |||
1163 | #ifdef CONFIG_ALTIVEC | ||
1164 | /* Restore Altivec state from stack */ | ||
1165 | if (save_vec && current->thread.used_vr) { | ||
1166 | memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); | ||
1167 | current->thread.vscr = ext_bkp.vscr; | ||
1168 | current->thread.vrsave= ext_bkp.vrsave; | ||
1169 | } | ||
1170 | current->thread.used_vr = ext_bkp.used_vr; | ||
1171 | #endif | ||
1172 | |||
1173 | #ifdef CONFIG_VSX | ||
1174 | current->thread.used_vsr = ext_bkp.used_vsr; | ||
1175 | #endif | ||
1176 | |||
1177 | return ret; | ||
1178 | } | ||
1179 | |||
1180 | static int kvmppc_book3s_init(void) | ||
1181 | { | ||
1182 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE); | ||
1183 | } | ||
1184 | |||
1185 | static void kvmppc_book3s_exit(void) | ||
1186 | { | ||
1187 | kvm_exit(); | ||
1188 | } | ||
1189 | |||
1190 | module_init(kvmppc_book3s_init); | ||
1191 | module_exit(kvmppc_book3s_exit); | ||
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c new file mode 100644 index 000000000000..faf99f20d993 --- /dev/null +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
@@ -0,0 +1,372 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kvm.h> | ||
23 | #include <linux/kvm_host.h> | ||
24 | #include <linux/highmem.h> | ||
25 | |||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/kvm_ppc.h> | ||
28 | #include <asm/kvm_book3s.h> | ||
29 | |||
30 | /* #define DEBUG_MMU */ | ||
31 | /* #define DEBUG_MMU_PTE */ | ||
32 | /* #define DEBUG_MMU_PTE_IP 0xfff14c40 */ | ||
33 | |||
34 | #ifdef DEBUG_MMU | ||
35 | #define dprintk(X...) printk(KERN_INFO X) | ||
36 | #else | ||
37 | #define dprintk(X...) do { } while(0) | ||
38 | #endif | ||
39 | |||
40 | #ifdef DEBUG_PTE | ||
41 | #define dprintk_pte(X...) printk(KERN_INFO X) | ||
42 | #else | ||
43 | #define dprintk_pte(X...) do { } while(0) | ||
44 | #endif | ||
45 | |||
46 | #define PTEG_FLAG_ACCESSED 0x00000100 | ||
47 | #define PTEG_FLAG_DIRTY 0x00000080 | ||
48 | |||
49 | static inline bool check_debug_ip(struct kvm_vcpu *vcpu) | ||
50 | { | ||
51 | #ifdef DEBUG_MMU_PTE_IP | ||
52 | return vcpu->arch.pc == DEBUG_MMU_PTE_IP; | ||
53 | #else | ||
54 | return true; | ||
55 | #endif | ||
56 | } | ||
57 | |||
58 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
59 | struct kvmppc_pte *pte, bool data); | ||
60 | |||
61 | static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) | ||
62 | { | ||
63 | return &vcpu_book3s->sr[(eaddr >> 28) & 0xf]; | ||
64 | } | ||
65 | |||
66 | static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
67 | bool data) | ||
68 | { | ||
69 | struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr); | ||
70 | struct kvmppc_pte pte; | ||
71 | |||
72 | if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) | ||
73 | return pte.vpage; | ||
74 | |||
75 | return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16); | ||
76 | } | ||
77 | |||
78 | static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) | ||
79 | { | ||
80 | kvmppc_set_msr(vcpu, 0); | ||
81 | } | ||
82 | |||
83 | static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, | ||
84 | struct kvmppc_sr *sre, gva_t eaddr, | ||
85 | bool primary) | ||
86 | { | ||
87 | u32 page, hash, pteg, htabmask; | ||
88 | hva_t r; | ||
89 | |||
90 | page = (eaddr & 0x0FFFFFFF) >> 12; | ||
91 | htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0; | ||
92 | |||
93 | hash = ((sre->vsid ^ page) << 6); | ||
94 | if (!primary) | ||
95 | hash = ~hash; | ||
96 | hash &= htabmask; | ||
97 | |||
98 | pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; | ||
99 | |||
100 | dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", | ||
101 | vcpu_book3s->vcpu.arch.pc, eaddr, vcpu_book3s->sdr1, pteg, | ||
102 | sre->vsid); | ||
103 | |||
104 | r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); | ||
105 | if (kvm_is_error_hva(r)) | ||
106 | return r; | ||
107 | return r | (pteg & ~PAGE_MASK); | ||
108 | } | ||
109 | |||
110 | static u32 kvmppc_mmu_book3s_32_get_ptem(struct kvmppc_sr *sre, gva_t eaddr, | ||
111 | bool primary) | ||
112 | { | ||
113 | return ((eaddr & 0x0fffffff) >> 22) | (sre->vsid << 7) | | ||
114 | (primary ? 0 : 0x40) | 0x80000000; | ||
115 | } | ||
116 | |||
117 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
118 | struct kvmppc_pte *pte, bool data) | ||
119 | { | ||
120 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
121 | struct kvmppc_bat *bat; | ||
122 | int i; | ||
123 | |||
124 | for (i = 0; i < 8; i++) { | ||
125 | if (data) | ||
126 | bat = &vcpu_book3s->dbat[i]; | ||
127 | else | ||
128 | bat = &vcpu_book3s->ibat[i]; | ||
129 | |||
130 | if (vcpu->arch.msr & MSR_PR) { | ||
131 | if (!bat->vp) | ||
132 | continue; | ||
133 | } else { | ||
134 | if (!bat->vs) | ||
135 | continue; | ||
136 | } | ||
137 | |||
138 | if (check_debug_ip(vcpu)) | ||
139 | { | ||
140 | dprintk_pte("%cBAT %02d: 0x%lx - 0x%x (0x%x)\n", | ||
141 | data ? 'd' : 'i', i, eaddr, bat->bepi, | ||
142 | bat->bepi_mask); | ||
143 | } | ||
144 | if ((eaddr & bat->bepi_mask) == bat->bepi) { | ||
145 | pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); | ||
146 | pte->vpage = (eaddr >> 12) | VSID_BAT; | ||
147 | pte->may_read = bat->pp; | ||
148 | pte->may_write = bat->pp > 1; | ||
149 | pte->may_execute = true; | ||
150 | if (!pte->may_read) { | ||
151 | printk(KERN_INFO "BAT is not readable!\n"); | ||
152 | continue; | ||
153 | } | ||
154 | if (!pte->may_write) { | ||
155 | /* let's treat r/o BATs as not-readable for now */ | ||
156 | dprintk_pte("BAT is read-only!\n"); | ||
157 | continue; | ||
158 | } | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | } | ||
163 | |||
164 | return -ENOENT; | ||
165 | } | ||
166 | |||
167 | static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
168 | struct kvmppc_pte *pte, bool data, | ||
169 | bool primary) | ||
170 | { | ||
171 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
172 | struct kvmppc_sr *sre; | ||
173 | hva_t ptegp; | ||
174 | u32 pteg[16]; | ||
175 | u64 ptem = 0; | ||
176 | int i; | ||
177 | int found = 0; | ||
178 | |||
179 | sre = find_sr(vcpu_book3s, eaddr); | ||
180 | |||
181 | dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28, | ||
182 | sre->vsid, sre->raw); | ||
183 | |||
184 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); | ||
185 | |||
186 | ptegp = kvmppc_mmu_book3s_32_get_pteg(vcpu_book3s, sre, eaddr, primary); | ||
187 | if (kvm_is_error_hva(ptegp)) { | ||
188 | printk(KERN_INFO "KVM: Invalid PTEG!\n"); | ||
189 | goto no_page_found; | ||
190 | } | ||
191 | |||
192 | ptem = kvmppc_mmu_book3s_32_get_ptem(sre, eaddr, primary); | ||
193 | |||
194 | if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { | ||
195 | printk(KERN_ERR "KVM: Can't copy data from 0x%lx!\n", ptegp); | ||
196 | goto no_page_found; | ||
197 | } | ||
198 | |||
199 | for (i=0; i<16; i+=2) { | ||
200 | if (ptem == pteg[i]) { | ||
201 | u8 pp; | ||
202 | |||
203 | pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); | ||
204 | pp = pteg[i+1] & 3; | ||
205 | |||
206 | if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) || | ||
207 | (sre->Ks && !(vcpu->arch.msr & MSR_PR))) | ||
208 | pp |= 4; | ||
209 | |||
210 | pte->may_write = false; | ||
211 | pte->may_read = false; | ||
212 | pte->may_execute = true; | ||
213 | switch (pp) { | ||
214 | case 0: | ||
215 | case 1: | ||
216 | case 2: | ||
217 | case 6: | ||
218 | pte->may_write = true; | ||
219 | case 3: | ||
220 | case 5: | ||
221 | case 7: | ||
222 | pte->may_read = true; | ||
223 | break; | ||
224 | } | ||
225 | |||
226 | if ( !pte->may_read ) | ||
227 | continue; | ||
228 | |||
229 | dprintk_pte("MMU: Found PTE -> %x %x - %x\n", | ||
230 | pteg[i], pteg[i+1], pp); | ||
231 | found = 1; | ||
232 | break; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | /* Update PTE C and A bits, so the guest's swapper knows we used the | ||
237 | page */ | ||
238 | if (found) { | ||
239 | u32 oldpte = pteg[i+1]; | ||
240 | |||
241 | if (pte->may_read) | ||
242 | pteg[i+1] |= PTEG_FLAG_ACCESSED; | ||
243 | if (pte->may_write) | ||
244 | pteg[i+1] |= PTEG_FLAG_DIRTY; | ||
245 | else | ||
246 | dprintk_pte("KVM: Mapping read-only page!\n"); | ||
247 | |||
248 | /* Write back into the PTEG */ | ||
249 | if (pteg[i+1] != oldpte) | ||
250 | copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | no_page_found: | ||
256 | |||
257 | if (check_debug_ip(vcpu)) { | ||
258 | dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n", | ||
259 | to_book3s(vcpu)->sdr1, ptegp); | ||
260 | for (i=0; i<16; i+=2) { | ||
261 | dprintk_pte(" %02d: 0x%x - 0x%x (0x%llx)\n", | ||
262 | i, pteg[i], pteg[i+1], ptem); | ||
263 | } | ||
264 | } | ||
265 | |||
266 | return -ENOENT; | ||
267 | } | ||
268 | |||
269 | static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
270 | struct kvmppc_pte *pte, bool data) | ||
271 | { | ||
272 | int r; | ||
273 | |||
274 | pte->eaddr = eaddr; | ||
275 | r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); | ||
276 | if (r < 0) | ||
277 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); | ||
278 | if (r < 0) | ||
279 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, false); | ||
280 | |||
281 | return r; | ||
282 | } | ||
283 | |||
284 | |||
285 | static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) | ||
286 | { | ||
287 | return to_book3s(vcpu)->sr[srnum].raw; | ||
288 | } | ||
289 | |||
290 | static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, | ||
291 | ulong value) | ||
292 | { | ||
293 | struct kvmppc_sr *sre; | ||
294 | |||
295 | sre = &to_book3s(vcpu)->sr[srnum]; | ||
296 | |||
297 | /* Flush any left-over shadows from the previous SR */ | ||
298 | |||
299 | /* XXX Not necessary? */ | ||
300 | /* kvmppc_mmu_pte_flush(vcpu, ((u64)sre->vsid) << 28, 0xf0000000ULL); */ | ||
301 | |||
302 | /* And then put in the new SR */ | ||
303 | sre->raw = value; | ||
304 | sre->vsid = (value & 0x0fffffff); | ||
305 | sre->Ks = (value & 0x40000000) ? true : false; | ||
306 | sre->Kp = (value & 0x20000000) ? true : false; | ||
307 | sre->nx = (value & 0x10000000) ? true : false; | ||
308 | |||
309 | /* Map the new segment */ | ||
310 | kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); | ||
311 | } | ||
312 | |||
313 | static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) | ||
314 | { | ||
315 | kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL); | ||
316 | } | ||
317 | |||
318 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, | ||
319 | u64 *vsid) | ||
320 | { | ||
321 | /* In case we only have one of MSR_IR or MSR_DR set, let's put | ||
322 | that in the real-mode context (and hope RM doesn't access | ||
323 | high memory) */ | ||
324 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | ||
325 | case 0: | ||
326 | *vsid = (VSID_REAL >> 16) | esid; | ||
327 | break; | ||
328 | case MSR_IR: | ||
329 | *vsid = (VSID_REAL_IR >> 16) | esid; | ||
330 | break; | ||
331 | case MSR_DR: | ||
332 | *vsid = (VSID_REAL_DR >> 16) | esid; | ||
333 | break; | ||
334 | case MSR_DR|MSR_IR: | ||
335 | { | ||
336 | ulong ea; | ||
337 | ea = esid << SID_SHIFT; | ||
338 | *vsid = find_sr(to_book3s(vcpu), ea)->vsid; | ||
339 | break; | ||
340 | } | ||
341 | default: | ||
342 | BUG(); | ||
343 | } | ||
344 | |||
345 | return 0; | ||
346 | } | ||
347 | |||
348 | static bool kvmppc_mmu_book3s_32_is_dcbz32(struct kvm_vcpu *vcpu) | ||
349 | { | ||
350 | return true; | ||
351 | } | ||
352 | |||
353 | |||
354 | void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu) | ||
355 | { | ||
356 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; | ||
357 | |||
358 | mmu->mtsrin = kvmppc_mmu_book3s_32_mtsrin; | ||
359 | mmu->mfsrin = kvmppc_mmu_book3s_32_mfsrin; | ||
360 | mmu->xlate = kvmppc_mmu_book3s_32_xlate; | ||
361 | mmu->reset_msr = kvmppc_mmu_book3s_32_reset_msr; | ||
362 | mmu->tlbie = kvmppc_mmu_book3s_32_tlbie; | ||
363 | mmu->esid_to_vsid = kvmppc_mmu_book3s_32_esid_to_vsid; | ||
364 | mmu->ea_to_vp = kvmppc_mmu_book3s_32_ea_to_vp; | ||
365 | mmu->is_dcbz32 = kvmppc_mmu_book3s_32_is_dcbz32; | ||
366 | |||
367 | mmu->slbmte = NULL; | ||
368 | mmu->slbmfee = NULL; | ||
369 | mmu->slbmfev = NULL; | ||
370 | mmu->slbie = NULL; | ||
371 | mmu->slbia = NULL; | ||
372 | } | ||
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c new file mode 100644 index 000000000000..2b0ee7e040c9 --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_emulate.c | |||
@@ -0,0 +1,348 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <asm/kvm_ppc.h> | ||
21 | #include <asm/disassemble.h> | ||
22 | #include <asm/kvm_book3s.h> | ||
23 | #include <asm/reg.h> | ||
24 | |||
25 | #define OP_19_XOP_RFID 18 | ||
26 | #define OP_19_XOP_RFI 50 | ||
27 | |||
28 | #define OP_31_XOP_MFMSR 83 | ||
29 | #define OP_31_XOP_MTMSR 146 | ||
30 | #define OP_31_XOP_MTMSRD 178 | ||
31 | #define OP_31_XOP_MTSRIN 242 | ||
32 | #define OP_31_XOP_TLBIEL 274 | ||
33 | #define OP_31_XOP_TLBIE 306 | ||
34 | #define OP_31_XOP_SLBMTE 402 | ||
35 | #define OP_31_XOP_SLBIE 434 | ||
36 | #define OP_31_XOP_SLBIA 498 | ||
37 | #define OP_31_XOP_MFSRIN 659 | ||
38 | #define OP_31_XOP_SLBMFEV 851 | ||
39 | #define OP_31_XOP_EIOIO 854 | ||
40 | #define OP_31_XOP_SLBMFEE 915 | ||
41 | |||
42 | /* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ | ||
43 | #define OP_31_XOP_DCBZ 1010 | ||
44 | |||
45 | int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | ||
46 | unsigned int inst, int *advance) | ||
47 | { | ||
48 | int emulated = EMULATE_DONE; | ||
49 | |||
50 | switch (get_op(inst)) { | ||
51 | case 19: | ||
52 | switch (get_xop(inst)) { | ||
53 | case OP_19_XOP_RFID: | ||
54 | case OP_19_XOP_RFI: | ||
55 | vcpu->arch.pc = vcpu->arch.srr0; | ||
56 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | ||
57 | *advance = 0; | ||
58 | break; | ||
59 | |||
60 | default: | ||
61 | emulated = EMULATE_FAIL; | ||
62 | break; | ||
63 | } | ||
64 | break; | ||
65 | case 31: | ||
66 | switch (get_xop(inst)) { | ||
67 | case OP_31_XOP_MFMSR: | ||
68 | kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); | ||
69 | break; | ||
70 | case OP_31_XOP_MTMSRD: | ||
71 | { | ||
72 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); | ||
73 | if (inst & 0x10000) { | ||
74 | vcpu->arch.msr &= ~(MSR_RI | MSR_EE); | ||
75 | vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); | ||
76 | } else | ||
77 | kvmppc_set_msr(vcpu, rs); | ||
78 | break; | ||
79 | } | ||
80 | case OP_31_XOP_MTMSR: | ||
81 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); | ||
82 | break; | ||
83 | case OP_31_XOP_MFSRIN: | ||
84 | { | ||
85 | int srnum; | ||
86 | |||
87 | srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; | ||
88 | if (vcpu->arch.mmu.mfsrin) { | ||
89 | u32 sr; | ||
90 | sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); | ||
91 | kvmppc_set_gpr(vcpu, get_rt(inst), sr); | ||
92 | } | ||
93 | break; | ||
94 | } | ||
95 | case OP_31_XOP_MTSRIN: | ||
96 | vcpu->arch.mmu.mtsrin(vcpu, | ||
97 | (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, | ||
98 | kvmppc_get_gpr(vcpu, get_rs(inst))); | ||
99 | break; | ||
100 | case OP_31_XOP_TLBIE: | ||
101 | case OP_31_XOP_TLBIEL: | ||
102 | { | ||
103 | bool large = (inst & 0x00200000) ? true : false; | ||
104 | ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); | ||
105 | vcpu->arch.mmu.tlbie(vcpu, addr, large); | ||
106 | break; | ||
107 | } | ||
108 | case OP_31_XOP_EIOIO: | ||
109 | break; | ||
110 | case OP_31_XOP_SLBMTE: | ||
111 | if (!vcpu->arch.mmu.slbmte) | ||
112 | return EMULATE_FAIL; | ||
113 | |||
114 | vcpu->arch.mmu.slbmte(vcpu, | ||
115 | kvmppc_get_gpr(vcpu, get_rs(inst)), | ||
116 | kvmppc_get_gpr(vcpu, get_rb(inst))); | ||
117 | break; | ||
118 | case OP_31_XOP_SLBIE: | ||
119 | if (!vcpu->arch.mmu.slbie) | ||
120 | return EMULATE_FAIL; | ||
121 | |||
122 | vcpu->arch.mmu.slbie(vcpu, | ||
123 | kvmppc_get_gpr(vcpu, get_rb(inst))); | ||
124 | break; | ||
125 | case OP_31_XOP_SLBIA: | ||
126 | if (!vcpu->arch.mmu.slbia) | ||
127 | return EMULATE_FAIL; | ||
128 | |||
129 | vcpu->arch.mmu.slbia(vcpu); | ||
130 | break; | ||
131 | case OP_31_XOP_SLBMFEE: | ||
132 | if (!vcpu->arch.mmu.slbmfee) { | ||
133 | emulated = EMULATE_FAIL; | ||
134 | } else { | ||
135 | ulong t, rb; | ||
136 | |||
137 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | ||
138 | t = vcpu->arch.mmu.slbmfee(vcpu, rb); | ||
139 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | ||
140 | } | ||
141 | break; | ||
142 | case OP_31_XOP_SLBMFEV: | ||
143 | if (!vcpu->arch.mmu.slbmfev) { | ||
144 | emulated = EMULATE_FAIL; | ||
145 | } else { | ||
146 | ulong t, rb; | ||
147 | |||
148 | rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | ||
149 | t = vcpu->arch.mmu.slbmfev(vcpu, rb); | ||
150 | kvmppc_set_gpr(vcpu, get_rt(inst), t); | ||
151 | } | ||
152 | break; | ||
153 | case OP_31_XOP_DCBZ: | ||
154 | { | ||
155 | ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); | ||
156 | ulong ra = 0; | ||
157 | ulong addr; | ||
158 | u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; | ||
159 | |||
160 | if (get_ra(inst)) | ||
161 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); | ||
162 | |||
163 | addr = (ra + rb) & ~31ULL; | ||
164 | if (!(vcpu->arch.msr & MSR_SF)) | ||
165 | addr &= 0xffffffff; | ||
166 | |||
167 | if (kvmppc_st(vcpu, addr, 32, zeros)) { | ||
168 | vcpu->arch.dear = addr; | ||
169 | vcpu->arch.fault_dear = addr; | ||
170 | to_book3s(vcpu)->dsisr = DSISR_PROTFAULT | | ||
171 | DSISR_ISSTORE; | ||
172 | kvmppc_book3s_queue_irqprio(vcpu, | ||
173 | BOOK3S_INTERRUPT_DATA_STORAGE); | ||
174 | kvmppc_mmu_pte_flush(vcpu, addr, ~0xFFFULL); | ||
175 | } | ||
176 | |||
177 | break; | ||
178 | } | ||
179 | default: | ||
180 | emulated = EMULATE_FAIL; | ||
181 | } | ||
182 | break; | ||
183 | default: | ||
184 | emulated = EMULATE_FAIL; | ||
185 | } | ||
186 | |||
187 | return emulated; | ||
188 | } | ||
189 | |||
190 | void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, | ||
191 | u32 val) | ||
192 | { | ||
193 | if (upper) { | ||
194 | /* Upper BAT */ | ||
195 | u32 bl = (val >> 2) & 0x7ff; | ||
196 | bat->bepi_mask = (~bl << 17); | ||
197 | bat->bepi = val & 0xfffe0000; | ||
198 | bat->vs = (val & 2) ? 1 : 0; | ||
199 | bat->vp = (val & 1) ? 1 : 0; | ||
200 | bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; | ||
201 | } else { | ||
202 | /* Lower BAT */ | ||
203 | bat->brpn = val & 0xfffe0000; | ||
204 | bat->wimg = (val >> 3) & 0xf; | ||
205 | bat->pp = val & 3; | ||
206 | bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) | ||
211 | { | ||
212 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
213 | struct kvmppc_bat *bat; | ||
214 | |||
215 | switch (sprn) { | ||
216 | case SPRN_IBAT0U ... SPRN_IBAT3L: | ||
217 | bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; | ||
218 | break; | ||
219 | case SPRN_IBAT4U ... SPRN_IBAT7L: | ||
220 | bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT4U) / 2]; | ||
221 | break; | ||
222 | case SPRN_DBAT0U ... SPRN_DBAT3L: | ||
223 | bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; | ||
224 | break; | ||
225 | case SPRN_DBAT4U ... SPRN_DBAT7L: | ||
226 | bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT4U) / 2]; | ||
227 | break; | ||
228 | default: | ||
229 | BUG(); | ||
230 | } | ||
231 | |||
232 | kvmppc_set_bat(vcpu, bat, !(sprn % 2), val); | ||
233 | } | ||
234 | |||
235 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | ||
236 | { | ||
237 | int emulated = EMULATE_DONE; | ||
238 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
239 | |||
240 | switch (sprn) { | ||
241 | case SPRN_SDR1: | ||
242 | to_book3s(vcpu)->sdr1 = spr_val; | ||
243 | break; | ||
244 | case SPRN_DSISR: | ||
245 | to_book3s(vcpu)->dsisr = spr_val; | ||
246 | break; | ||
247 | case SPRN_DAR: | ||
248 | vcpu->arch.dear = spr_val; | ||
249 | break; | ||
250 | case SPRN_HIOR: | ||
251 | to_book3s(vcpu)->hior = spr_val; | ||
252 | break; | ||
253 | case SPRN_IBAT0U ... SPRN_IBAT3L: | ||
254 | case SPRN_IBAT4U ... SPRN_IBAT7L: | ||
255 | case SPRN_DBAT0U ... SPRN_DBAT3L: | ||
256 | case SPRN_DBAT4U ... SPRN_DBAT7L: | ||
257 | kvmppc_write_bat(vcpu, sprn, (u32)spr_val); | ||
258 | /* BAT writes happen so rarely that we're ok to flush | ||
259 | * everything here */ | ||
260 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | ||
261 | break; | ||
262 | case SPRN_HID0: | ||
263 | to_book3s(vcpu)->hid[0] = spr_val; | ||
264 | break; | ||
265 | case SPRN_HID1: | ||
266 | to_book3s(vcpu)->hid[1] = spr_val; | ||
267 | break; | ||
268 | case SPRN_HID2: | ||
269 | to_book3s(vcpu)->hid[2] = spr_val; | ||
270 | break; | ||
271 | case SPRN_HID4: | ||
272 | to_book3s(vcpu)->hid[4] = spr_val; | ||
273 | break; | ||
274 | case SPRN_HID5: | ||
275 | to_book3s(vcpu)->hid[5] = spr_val; | ||
276 | /* guest HID5 set can change is_dcbz32 */ | ||
277 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | ||
278 | (mfmsr() & MSR_HV)) | ||
279 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | ||
280 | break; | ||
281 | case SPRN_ICTC: | ||
282 | case SPRN_THRM1: | ||
283 | case SPRN_THRM2: | ||
284 | case SPRN_THRM3: | ||
285 | case SPRN_CTRLF: | ||
286 | case SPRN_CTRLT: | ||
287 | break; | ||
288 | default: | ||
289 | printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); | ||
290 | #ifndef DEBUG_SPR | ||
291 | emulated = EMULATE_FAIL; | ||
292 | #endif | ||
293 | break; | ||
294 | } | ||
295 | |||
296 | return emulated; | ||
297 | } | ||
298 | |||
299 | int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | ||
300 | { | ||
301 | int emulated = EMULATE_DONE; | ||
302 | |||
303 | switch (sprn) { | ||
304 | case SPRN_SDR1: | ||
305 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); | ||
306 | break; | ||
307 | case SPRN_DSISR: | ||
308 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); | ||
309 | break; | ||
310 | case SPRN_DAR: | ||
311 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); | ||
312 | break; | ||
313 | case SPRN_HIOR: | ||
314 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); | ||
315 | break; | ||
316 | case SPRN_HID0: | ||
317 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); | ||
318 | break; | ||
319 | case SPRN_HID1: | ||
320 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); | ||
321 | break; | ||
322 | case SPRN_HID2: | ||
323 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); | ||
324 | break; | ||
325 | case SPRN_HID4: | ||
326 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); | ||
327 | break; | ||
328 | case SPRN_HID5: | ||
329 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); | ||
330 | break; | ||
331 | case SPRN_THRM1: | ||
332 | case SPRN_THRM2: | ||
333 | case SPRN_THRM3: | ||
334 | case SPRN_CTRLF: | ||
335 | case SPRN_CTRLT: | ||
336 | kvmppc_set_gpr(vcpu, rt, 0); | ||
337 | break; | ||
338 | default: | ||
339 | printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); | ||
340 | #ifndef DEBUG_SPR | ||
341 | emulated = EMULATE_FAIL; | ||
342 | #endif | ||
343 | break; | ||
344 | } | ||
345 | |||
346 | return emulated; | ||
347 | } | ||
348 | |||
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c new file mode 100644 index 000000000000..1dd5a1ddfd0d --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_exports.c | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <asm/kvm_book3s.h> | ||
22 | |||
23 | EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter); | ||
24 | EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem); | ||
25 | EXPORT_SYMBOL_GPL(kvmppc_rmcall); | ||
26 | EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); | ||
27 | #ifdef CONFIG_ALTIVEC | ||
28 | EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); | ||
29 | #endif | ||
30 | #ifdef CONFIG_VSX | ||
31 | EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx); | ||
32 | #endif | ||
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S new file mode 100644 index 000000000000..c1584d0cbce8 --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_interrupts.S | |||
@@ -0,0 +1,318 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/kvm_asm.h> | ||
22 | #include <asm/reg.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | #include <asm/exception-64s.h> | ||
26 | |||
27 | #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit | ||
28 | #define ULONG_SIZE 8 | ||
29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | ||
30 | |||
31 | .macro DISABLE_INTERRUPTS | ||
32 | mfmsr r0 | ||
33 | rldicl r0,r0,48,1 | ||
34 | rotldi r0,r0,16 | ||
35 | mtmsrd r0,1 | ||
36 | .endm | ||
37 | |||
38 | #define VCPU_LOAD_NVGPRS(vcpu) \ | ||
39 | ld r14, VCPU_GPR(r14)(vcpu); \ | ||
40 | ld r15, VCPU_GPR(r15)(vcpu); \ | ||
41 | ld r16, VCPU_GPR(r16)(vcpu); \ | ||
42 | ld r17, VCPU_GPR(r17)(vcpu); \ | ||
43 | ld r18, VCPU_GPR(r18)(vcpu); \ | ||
44 | ld r19, VCPU_GPR(r19)(vcpu); \ | ||
45 | ld r20, VCPU_GPR(r20)(vcpu); \ | ||
46 | ld r21, VCPU_GPR(r21)(vcpu); \ | ||
47 | ld r22, VCPU_GPR(r22)(vcpu); \ | ||
48 | ld r23, VCPU_GPR(r23)(vcpu); \ | ||
49 | ld r24, VCPU_GPR(r24)(vcpu); \ | ||
50 | ld r25, VCPU_GPR(r25)(vcpu); \ | ||
51 | ld r26, VCPU_GPR(r26)(vcpu); \ | ||
52 | ld r27, VCPU_GPR(r27)(vcpu); \ | ||
53 | ld r28, VCPU_GPR(r28)(vcpu); \ | ||
54 | ld r29, VCPU_GPR(r29)(vcpu); \ | ||
55 | ld r30, VCPU_GPR(r30)(vcpu); \ | ||
56 | ld r31, VCPU_GPR(r31)(vcpu); \ | ||
57 | |||
58 | /***************************************************************************** | ||
59 | * * | ||
60 | * Guest entry / exit code that is in kernel module memory (highmem) * | ||
61 | * * | ||
62 | ****************************************************************************/ | ||
63 | |||
64 | /* Registers: | ||
65 | * r3: kvm_run pointer | ||
66 | * r4: vcpu pointer | ||
67 | */ | ||
68 | _GLOBAL(__kvmppc_vcpu_entry) | ||
69 | |||
70 | kvm_start_entry: | ||
71 | /* Write correct stack frame */ | ||
72 | mflr r0 | ||
73 | std r0,16(r1) | ||
74 | |||
75 | /* Save host state to the stack */ | ||
76 | stdu r1, -SWITCH_FRAME_SIZE(r1) | ||
77 | |||
78 | /* Save r3 (kvm_run) and r4 (vcpu) */ | ||
79 | SAVE_2GPRS(3, r1) | ||
80 | |||
81 | /* Save non-volatile registers (r14 - r31) */ | ||
82 | SAVE_NVGPRS(r1) | ||
83 | |||
84 | /* Save LR */ | ||
85 | std r0, _LINK(r1) | ||
86 | |||
87 | /* Load non-volatile guest state from the vcpu */ | ||
88 | VCPU_LOAD_NVGPRS(r4) | ||
89 | |||
90 | /* Save R1/R2 in the PACA */ | ||
91 | std r1, PACA_KVM_HOST_R1(r13) | ||
92 | std r2, PACA_KVM_HOST_R2(r13) | ||
93 | |||
94 | /* XXX swap in/out on load? */ | ||
95 | ld r3, VCPU_HIGHMEM_HANDLER(r4) | ||
96 | std r3, PACA_KVM_VMHANDLER(r13) | ||
97 | |||
98 | kvm_start_lightweight: | ||
99 | |||
100 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ | ||
101 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ | ||
102 | |||
103 | /* Load some guest state in the respective registers */ | ||
104 | ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */ | ||
105 | /* will be swapped in by rmcall */ | ||
106 | |||
107 | ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ | ||
108 | mtlr r3 /* LR = r3 */ | ||
109 | |||
110 | DISABLE_INTERRUPTS | ||
111 | |||
112 | /* Some guests may need to have dcbz set to 32 byte length. | ||
113 | * | ||
114 | * Usually we ensure that by patching the guest's instructions | ||
115 | * to trap on dcbz and emulate it in the hypervisor. | ||
116 | * | ||
117 | * If we can, we should tell the CPU to use 32 byte dcbz though, | ||
118 | * because that's a lot faster. | ||
119 | */ | ||
120 | |||
121 | ld r3, VCPU_HFLAGS(r4) | ||
122 | rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */ | ||
123 | beq no_dcbz32_on | ||
124 | |||
125 | mfspr r3,SPRN_HID5 | ||
126 | ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */ | ||
127 | mtspr SPRN_HID5,r3 | ||
128 | |||
129 | no_dcbz32_on: | ||
130 | |||
131 | ld r6, VCPU_RMCALL(r4) | ||
132 | mtctr r6 | ||
133 | |||
134 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) | ||
135 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | ||
136 | |||
137 | /* Jump to SLB patching handlder and into our guest */ | ||
138 | bctr | ||
139 | |||
140 | /* | ||
141 | * This is the handler in module memory. It gets jumped at from the | ||
142 | * lowmem trampoline code, so it's basically the guest exit code. | ||
143 | * | ||
144 | */ | ||
145 | |||
146 | .global kvmppc_handler_highmem | ||
147 | kvmppc_handler_highmem: | ||
148 | |||
149 | /* | ||
150 | * Register usage at this point: | ||
151 | * | ||
152 | * R0 = guest last inst | ||
153 | * R1 = host R1 | ||
154 | * R2 = host R2 | ||
155 | * R3 = guest PC | ||
156 | * R4 = guest MSR | ||
157 | * R5 = guest DAR | ||
158 | * R6 = guest DSISR | ||
159 | * R13 = PACA | ||
160 | * PACA.KVM.* = guest * | ||
161 | * | ||
162 | */ | ||
163 | |||
164 | /* R7 = vcpu */ | ||
165 | ld r7, GPR4(r1) | ||
166 | |||
167 | /* Now save the guest state */ | ||
168 | |||
169 | stw r0, VCPU_LAST_INST(r7) | ||
170 | |||
171 | std r3, VCPU_PC(r7) | ||
172 | std r4, VCPU_SHADOW_SRR1(r7) | ||
173 | std r5, VCPU_FAULT_DEAR(r7) | ||
174 | std r6, VCPU_FAULT_DSISR(r7) | ||
175 | |||
176 | ld r5, VCPU_HFLAGS(r7) | ||
177 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ | ||
178 | beq no_dcbz32_off | ||
179 | |||
180 | li r4, 0 | ||
181 | mfspr r5,SPRN_HID5 | ||
182 | rldimi r5,r4,6,56 | ||
183 | mtspr SPRN_HID5,r5 | ||
184 | |||
185 | no_dcbz32_off: | ||
186 | |||
187 | std r14, VCPU_GPR(r14)(r7) | ||
188 | std r15, VCPU_GPR(r15)(r7) | ||
189 | std r16, VCPU_GPR(r16)(r7) | ||
190 | std r17, VCPU_GPR(r17)(r7) | ||
191 | std r18, VCPU_GPR(r18)(r7) | ||
192 | std r19, VCPU_GPR(r19)(r7) | ||
193 | std r20, VCPU_GPR(r20)(r7) | ||
194 | std r21, VCPU_GPR(r21)(r7) | ||
195 | std r22, VCPU_GPR(r22)(r7) | ||
196 | std r23, VCPU_GPR(r23)(r7) | ||
197 | std r24, VCPU_GPR(r24)(r7) | ||
198 | std r25, VCPU_GPR(r25)(r7) | ||
199 | std r26, VCPU_GPR(r26)(r7) | ||
200 | std r27, VCPU_GPR(r27)(r7) | ||
201 | std r28, VCPU_GPR(r28)(r7) | ||
202 | std r29, VCPU_GPR(r29)(r7) | ||
203 | std r30, VCPU_GPR(r30)(r7) | ||
204 | std r31, VCPU_GPR(r31)(r7) | ||
205 | |||
206 | /* Save guest CTR */ | ||
207 | mfctr r5 | ||
208 | std r5, VCPU_CTR(r7) | ||
209 | |||
210 | /* Save guest LR */ | ||
211 | mflr r5 | ||
212 | std r5, VCPU_LR(r7) | ||
213 | |||
214 | /* Restore host msr -> SRR1 */ | ||
215 | ld r6, VCPU_HOST_MSR(r7) | ||
216 | |||
217 | /* | ||
218 | * For some interrupts, we need to call the real Linux | ||
219 | * handler, so it can do work for us. This has to happen | ||
220 | * as if the interrupt arrived from the kernel though, | ||
221 | * so let's fake it here where most state is restored. | ||
222 | * | ||
223 | * Call Linux for hardware interrupts/decrementer | ||
224 | * r3 = address of interrupt handler (exit reason) | ||
225 | */ | ||
226 | |||
227 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | ||
228 | beq call_linux_handler | ||
229 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER | ||
230 | beq call_linux_handler | ||
231 | |||
232 | /* Back to EE=1 */ | ||
233 | mtmsr r6 | ||
234 | b kvm_return_point | ||
235 | |||
236 | call_linux_handler: | ||
237 | |||
238 | /* | ||
239 | * If we land here we need to jump back to the handler we | ||
240 | * came from. | ||
241 | * | ||
242 | * We have a page that we can access from real mode, so let's | ||
243 | * jump back to that and use it as a trampoline to get back into the | ||
244 | * interrupt handler! | ||
245 | * | ||
246 | * R3 still contains the exit code, | ||
247 | * R5 VCPU_HOST_RETIP and | ||
248 | * R6 VCPU_HOST_MSR | ||
249 | */ | ||
250 | |||
251 | /* Restore host IP -> SRR0 */ | ||
252 | ld r5, VCPU_HOST_RETIP(r7) | ||
253 | |||
254 | /* XXX Better move to a safe function? | ||
255 | * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */ | ||
256 | |||
257 | mtlr r12 | ||
258 | |||
259 | ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) | ||
260 | mtsrr0 r4 | ||
261 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | ||
262 | mtsrr1 r3 | ||
263 | |||
264 | RFI | ||
265 | |||
266 | .global kvm_return_point | ||
267 | kvm_return_point: | ||
268 | |||
269 | /* Jump back to lightweight entry if we're supposed to */ | ||
270 | /* go back into the guest */ | ||
271 | |||
272 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | ||
273 | mr r5, r12 | ||
274 | |||
275 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | ||
276 | REST_2GPRS(3, r1) | ||
277 | bl KVMPPC_HANDLE_EXIT | ||
278 | |||
279 | /* If RESUME_GUEST, get back in the loop */ | ||
280 | cmpwi r3, RESUME_GUEST | ||
281 | beq kvm_loop_lightweight | ||
282 | |||
283 | cmpwi r3, RESUME_GUEST_NV | ||
284 | beq kvm_loop_heavyweight | ||
285 | |||
286 | kvm_exit_loop: | ||
287 | |||
288 | ld r4, _LINK(r1) | ||
289 | mtlr r4 | ||
290 | |||
291 | /* Restore non-volatile host registers (r14 - r31) */ | ||
292 | REST_NVGPRS(r1) | ||
293 | |||
294 | addi r1, r1, SWITCH_FRAME_SIZE | ||
295 | blr | ||
296 | |||
297 | kvm_loop_heavyweight: | ||
298 | |||
299 | ld r4, _LINK(r1) | ||
300 | std r4, (16 + SWITCH_FRAME_SIZE)(r1) | ||
301 | |||
302 | /* Load vcpu and cpu_run */ | ||
303 | REST_2GPRS(3, r1) | ||
304 | |||
305 | /* Load non-volatile guest state from the vcpu */ | ||
306 | VCPU_LOAD_NVGPRS(r4) | ||
307 | |||
308 | /* Jump back into the beginning of this function */ | ||
309 | b kvm_start_lightweight | ||
310 | |||
311 | kvm_loop_lightweight: | ||
312 | |||
313 | /* We'll need the vcpu pointer */ | ||
314 | REST_GPR(4, r1) | ||
315 | |||
316 | /* Jump back into the beginning of this function */ | ||
317 | b kvm_start_lightweight | ||
318 | |||
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c new file mode 100644 index 000000000000..512dcff77554 --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -0,0 +1,500 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <linux/types.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/kvm.h> | ||
23 | #include <linux/kvm_host.h> | ||
24 | #include <linux/highmem.h> | ||
25 | |||
26 | #include <asm/tlbflush.h> | ||
27 | #include <asm/kvm_ppc.h> | ||
28 | #include <asm/kvm_book3s.h> | ||
29 | |||
30 | /* #define DEBUG_MMU */ | ||
31 | |||
32 | #ifdef DEBUG_MMU | ||
33 | #define dprintk(X...) printk(KERN_INFO X) | ||
34 | #else | ||
35 | #define dprintk(X...) do { } while(0) | ||
36 | #endif | ||
37 | |||
38 | static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) | ||
39 | { | ||
40 | kvmppc_set_msr(vcpu, MSR_SF); | ||
41 | } | ||
42 | |||
43 | static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( | ||
44 | struct kvmppc_vcpu_book3s *vcpu_book3s, | ||
45 | gva_t eaddr) | ||
46 | { | ||
47 | int i; | ||
48 | u64 esid = GET_ESID(eaddr); | ||
49 | u64 esid_1t = GET_ESID_1T(eaddr); | ||
50 | |||
51 | for (i = 0; i < vcpu_book3s->slb_nr; i++) { | ||
52 | u64 cmp_esid = esid; | ||
53 | |||
54 | if (!vcpu_book3s->slb[i].valid) | ||
55 | continue; | ||
56 | |||
57 | if (vcpu_book3s->slb[i].tb) | ||
58 | cmp_esid = esid_1t; | ||
59 | |||
60 | if (vcpu_book3s->slb[i].esid == cmp_esid) | ||
61 | return &vcpu_book3s->slb[i]; | ||
62 | } | ||
63 | |||
64 | dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", | ||
65 | eaddr, esid, esid_1t); | ||
66 | for (i = 0; i < vcpu_book3s->slb_nr; i++) { | ||
67 | if (vcpu_book3s->slb[i].vsid) | ||
68 | dprintk(" %d: %c%c%c %llx %llx\n", i, | ||
69 | vcpu_book3s->slb[i].valid ? 'v' : ' ', | ||
70 | vcpu_book3s->slb[i].large ? 'l' : ' ', | ||
71 | vcpu_book3s->slb[i].tb ? 't' : ' ', | ||
72 | vcpu_book3s->slb[i].esid, | ||
73 | vcpu_book3s->slb[i].vsid); | ||
74 | } | ||
75 | |||
76 | return NULL; | ||
77 | } | ||
78 | |||
79 | static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
80 | bool data) | ||
81 | { | ||
82 | struct kvmppc_slb *slb; | ||
83 | |||
84 | slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), eaddr); | ||
85 | if (!slb) | ||
86 | return 0; | ||
87 | |||
88 | if (slb->tb) | ||
89 | return (((u64)eaddr >> 12) & 0xfffffff) | | ||
90 | (((u64)slb->vsid) << 28); | ||
91 | |||
92 | return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16); | ||
93 | } | ||
94 | |||
95 | static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) | ||
96 | { | ||
97 | return slbe->large ? 24 : 12; | ||
98 | } | ||
99 | |||
100 | static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) | ||
101 | { | ||
102 | int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); | ||
103 | return ((eaddr & 0xfffffff) >> p); | ||
104 | } | ||
105 | |||
106 | static hva_t kvmppc_mmu_book3s_64_get_pteg( | ||
107 | struct kvmppc_vcpu_book3s *vcpu_book3s, | ||
108 | struct kvmppc_slb *slbe, gva_t eaddr, | ||
109 | bool second) | ||
110 | { | ||
111 | u64 hash, pteg, htabsize; | ||
112 | u32 page; | ||
113 | hva_t r; | ||
114 | |||
115 | page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); | ||
116 | htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); | ||
117 | |||
118 | hash = slbe->vsid ^ page; | ||
119 | if (second) | ||
120 | hash = ~hash; | ||
121 | hash &= ((1ULL << 39ULL) - 1ULL); | ||
122 | hash &= htabsize; | ||
123 | hash <<= 7ULL; | ||
124 | |||
125 | pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; | ||
126 | pteg |= hash; | ||
127 | |||
128 | dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n", | ||
129 | page, vcpu_book3s->sdr1, pteg, slbe->vsid); | ||
130 | |||
131 | r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); | ||
132 | if (kvm_is_error_hva(r)) | ||
133 | return r; | ||
134 | return r | (pteg & ~PAGE_MASK); | ||
135 | } | ||
136 | |||
137 | static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) | ||
138 | { | ||
139 | int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); | ||
140 | u64 avpn; | ||
141 | |||
142 | avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); | ||
143 | avpn |= slbe->vsid << (28 - p); | ||
144 | |||
145 | if (p < 24) | ||
146 | avpn >>= ((80 - p) - 56) - 8; | ||
147 | else | ||
148 | avpn <<= 8; | ||
149 | |||
150 | return avpn; | ||
151 | } | ||
152 | |||
153 | static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | ||
154 | struct kvmppc_pte *gpte, bool data) | ||
155 | { | ||
156 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
157 | struct kvmppc_slb *slbe; | ||
158 | hva_t ptegp; | ||
159 | u64 pteg[16]; | ||
160 | u64 avpn = 0; | ||
161 | int i; | ||
162 | u8 key = 0; | ||
163 | bool found = false; | ||
164 | bool perm_err = false; | ||
165 | int second = 0; | ||
166 | |||
167 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); | ||
168 | if (!slbe) | ||
169 | goto no_seg_found; | ||
170 | |||
171 | do_second: | ||
172 | ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); | ||
173 | if (kvm_is_error_hva(ptegp)) | ||
174 | goto no_page_found; | ||
175 | |||
176 | avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); | ||
177 | |||
178 | if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { | ||
179 | printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp); | ||
180 | goto no_page_found; | ||
181 | } | ||
182 | |||
183 | if ((vcpu->arch.msr & MSR_PR) && slbe->Kp) | ||
184 | key = 4; | ||
185 | else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks) | ||
186 | key = 4; | ||
187 | |||
188 | for (i=0; i<16; i+=2) { | ||
189 | u64 v = pteg[i]; | ||
190 | u64 r = pteg[i+1]; | ||
191 | |||
192 | /* Valid check */ | ||
193 | if (!(v & HPTE_V_VALID)) | ||
194 | continue; | ||
195 | /* Hash check */ | ||
196 | if ((v & HPTE_V_SECONDARY) != second) | ||
197 | continue; | ||
198 | |||
199 | /* AVPN compare */ | ||
200 | if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) { | ||
201 | u8 pp = (r & HPTE_R_PP) | key; | ||
202 | int eaddr_mask = 0xFFF; | ||
203 | |||
204 | gpte->eaddr = eaddr; | ||
205 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, | ||
206 | eaddr, | ||
207 | data); | ||
208 | if (slbe->large) | ||
209 | eaddr_mask = 0xFFFFFF; | ||
210 | gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask); | ||
211 | gpte->may_execute = ((r & HPTE_R_N) ? false : true); | ||
212 | gpte->may_read = false; | ||
213 | gpte->may_write = false; | ||
214 | |||
215 | switch (pp) { | ||
216 | case 0: | ||
217 | case 1: | ||
218 | case 2: | ||
219 | case 6: | ||
220 | gpte->may_write = true; | ||
221 | /* fall through */ | ||
222 | case 3: | ||
223 | case 5: | ||
224 | case 7: | ||
225 | gpte->may_read = true; | ||
226 | break; | ||
227 | } | ||
228 | |||
229 | if (!gpte->may_read) { | ||
230 | perm_err = true; | ||
231 | continue; | ||
232 | } | ||
233 | |||
234 | dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " | ||
235 | "-> 0x%llx\n", | ||
236 | eaddr, avpn, gpte->vpage, gpte->raddr); | ||
237 | found = true; | ||
238 | break; | ||
239 | } | ||
240 | } | ||
241 | |||
242 | /* Update PTE R and C bits, so the guest's swapper knows we used the | ||
243 | * page */ | ||
244 | if (found) { | ||
245 | u32 oldr = pteg[i+1]; | ||
246 | |||
247 | if (gpte->may_read) { | ||
248 | /* Set the accessed flag */ | ||
249 | pteg[i+1] |= HPTE_R_R; | ||
250 | } | ||
251 | if (gpte->may_write) { | ||
252 | /* Set the dirty flag */ | ||
253 | pteg[i+1] |= HPTE_R_C; | ||
254 | } else { | ||
255 | dprintk("KVM: Mapping read-only page!\n"); | ||
256 | } | ||
257 | |||
258 | /* Write back into the PTEG */ | ||
259 | if (pteg[i+1] != oldr) | ||
260 | copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); | ||
261 | |||
262 | return 0; | ||
263 | } else { | ||
264 | dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx " | ||
265 | "ptegp=0x%lx)\n", | ||
266 | eaddr, to_book3s(vcpu)->sdr1, ptegp); | ||
267 | for (i = 0; i < 16; i += 2) | ||
268 | dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n", | ||
269 | i, pteg[i], pteg[i+1], avpn); | ||
270 | |||
271 | if (!second) { | ||
272 | second = HPTE_V_SECONDARY; | ||
273 | goto do_second; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | |||
278 | no_page_found: | ||
279 | |||
280 | |||
281 | if (perm_err) | ||
282 | return -EPERM; | ||
283 | |||
284 | return -ENOENT; | ||
285 | |||
286 | no_seg_found: | ||
287 | |||
288 | dprintk("KVM MMU: Trigger segment fault\n"); | ||
289 | return -EINVAL; | ||
290 | } | ||
291 | |||
292 | static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) | ||
293 | { | ||
294 | struct kvmppc_vcpu_book3s *vcpu_book3s; | ||
295 | u64 esid, esid_1t; | ||
296 | int slb_nr; | ||
297 | struct kvmppc_slb *slbe; | ||
298 | |||
299 | dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb); | ||
300 | |||
301 | vcpu_book3s = to_book3s(vcpu); | ||
302 | |||
303 | esid = GET_ESID(rb); | ||
304 | esid_1t = GET_ESID_1T(rb); | ||
305 | slb_nr = rb & 0xfff; | ||
306 | |||
307 | if (slb_nr > vcpu_book3s->slb_nr) | ||
308 | return; | ||
309 | |||
310 | slbe = &vcpu_book3s->slb[slb_nr]; | ||
311 | |||
312 | slbe->large = (rs & SLB_VSID_L) ? 1 : 0; | ||
313 | slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; | ||
314 | slbe->esid = slbe->tb ? esid_1t : esid; | ||
315 | slbe->vsid = rs >> 12; | ||
316 | slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; | ||
317 | slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; | ||
318 | slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; | ||
319 | slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; | ||
320 | slbe->class = (rs & SLB_VSID_C) ? 1 : 0; | ||
321 | |||
322 | slbe->orige = rb & (ESID_MASK | SLB_ESID_V); | ||
323 | slbe->origv = rs; | ||
324 | |||
325 | /* Map the new segment */ | ||
326 | kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); | ||
327 | } | ||
328 | |||
329 | static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) | ||
330 | { | ||
331 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
332 | struct kvmppc_slb *slbe; | ||
333 | |||
334 | if (slb_nr > vcpu_book3s->slb_nr) | ||
335 | return 0; | ||
336 | |||
337 | slbe = &vcpu_book3s->slb[slb_nr]; | ||
338 | |||
339 | return slbe->orige; | ||
340 | } | ||
341 | |||
342 | static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) | ||
343 | { | ||
344 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
345 | struct kvmppc_slb *slbe; | ||
346 | |||
347 | if (slb_nr > vcpu_book3s->slb_nr) | ||
348 | return 0; | ||
349 | |||
350 | slbe = &vcpu_book3s->slb[slb_nr]; | ||
351 | |||
352 | return slbe->origv; | ||
353 | } | ||
354 | |||
355 | static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) | ||
356 | { | ||
357 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
358 | struct kvmppc_slb *slbe; | ||
359 | |||
360 | dprintk("KVM MMU: slbie(0x%llx)\n", ea); | ||
361 | |||
362 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, ea); | ||
363 | |||
364 | if (!slbe) | ||
365 | return; | ||
366 | |||
367 | dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); | ||
368 | |||
369 | slbe->valid = false; | ||
370 | |||
371 | kvmppc_mmu_map_segment(vcpu, ea); | ||
372 | } | ||
373 | |||
374 | static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) | ||
375 | { | ||
376 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
377 | int i; | ||
378 | |||
379 | dprintk("KVM MMU: slbia()\n"); | ||
380 | |||
381 | for (i = 1; i < vcpu_book3s->slb_nr; i++) | ||
382 | vcpu_book3s->slb[i].valid = false; | ||
383 | |||
384 | if (vcpu->arch.msr & MSR_IR) { | ||
385 | kvmppc_mmu_flush_segments(vcpu); | ||
386 | kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, | ||
391 | ulong value) | ||
392 | { | ||
393 | u64 rb = 0, rs = 0; | ||
394 | |||
395 | /* | ||
396 | * According to Book3 2.01 mtsrin is implemented as: | ||
397 | * | ||
398 | * The SLB entry specified by (RB)32:35 is loaded from register | ||
399 | * RS, as follows. | ||
400 | * | ||
401 | * SLBE Bit Source SLB Field | ||
402 | * | ||
403 | * 0:31 0x0000_0000 ESID-0:31 | ||
404 | * 32:35 (RB)32:35 ESID-32:35 | ||
405 | * 36 0b1 V | ||
406 | * 37:61 0x00_0000|| 0b0 VSID-0:24 | ||
407 | * 62:88 (RS)37:63 VSID-25:51 | ||
408 | * 89:91 (RS)33:35 Ks Kp N | ||
409 | * 92 (RS)36 L ((RS)36 must be 0b0) | ||
410 | * 93 0b0 C | ||
411 | */ | ||
412 | |||
413 | dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value); | ||
414 | |||
415 | /* ESID = srnum */ | ||
416 | rb |= (srnum & 0xf) << 28; | ||
417 | /* Set the valid bit */ | ||
418 | rb |= 1 << 27; | ||
419 | /* Index = ESID */ | ||
420 | rb |= srnum; | ||
421 | |||
422 | /* VSID = VSID */ | ||
423 | rs |= (value & 0xfffffff) << 12; | ||
424 | /* flags = flags */ | ||
425 | rs |= ((value >> 28) & 0x7) << 9; | ||
426 | |||
427 | kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); | ||
428 | } | ||
429 | |||
430 | static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, | ||
431 | bool large) | ||
432 | { | ||
433 | u64 mask = 0xFFFFFFFFFULL; | ||
434 | |||
435 | dprintk("KVM MMU: tlbie(0x%lx)\n", va); | ||
436 | |||
437 | if (large) | ||
438 | mask = 0xFFFFFF000ULL; | ||
439 | kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); | ||
440 | } | ||
441 | |||
442 | static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, | ||
443 | u64 *vsid) | ||
444 | { | ||
445 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | ||
446 | case 0: | ||
447 | *vsid = (VSID_REAL >> 16) | esid; | ||
448 | break; | ||
449 | case MSR_IR: | ||
450 | *vsid = (VSID_REAL_IR >> 16) | esid; | ||
451 | break; | ||
452 | case MSR_DR: | ||
453 | *vsid = (VSID_REAL_DR >> 16) | esid; | ||
454 | break; | ||
455 | case MSR_DR|MSR_IR: | ||
456 | { | ||
457 | ulong ea; | ||
458 | struct kvmppc_slb *slb; | ||
459 | ea = esid << SID_SHIFT; | ||
460 | slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); | ||
461 | if (slb) | ||
462 | *vsid = slb->vsid; | ||
463 | else | ||
464 | return -ENOENT; | ||
465 | |||
466 | break; | ||
467 | } | ||
468 | default: | ||
469 | BUG(); | ||
470 | break; | ||
471 | } | ||
472 | |||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) | ||
477 | { | ||
478 | return (to_book3s(vcpu)->hid[5] & 0x80); | ||
479 | } | ||
480 | |||
481 | void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) | ||
482 | { | ||
483 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; | ||
484 | |||
485 | mmu->mfsrin = NULL; | ||
486 | mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; | ||
487 | mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; | ||
488 | mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; | ||
489 | mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; | ||
490 | mmu->slbie = kvmppc_mmu_book3s_64_slbie; | ||
491 | mmu->slbia = kvmppc_mmu_book3s_64_slbia; | ||
492 | mmu->xlate = kvmppc_mmu_book3s_64_xlate; | ||
493 | mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr; | ||
494 | mmu->tlbie = kvmppc_mmu_book3s_64_tlbie; | ||
495 | mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; | ||
496 | mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; | ||
497 | mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; | ||
498 | |||
499 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; | ||
500 | } | ||
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c new file mode 100644 index 000000000000..f2899b297ffd --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -0,0 +1,408 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Alexander Graf <agraf@suse.de> | ||
6 | * Kevin Wolf <mail@kevin-wolf.de> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License, version 2, as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/kvm_host.h> | ||
23 | |||
24 | #include <asm/kvm_ppc.h> | ||
25 | #include <asm/kvm_book3s.h> | ||
26 | #include <asm/mmu-hash64.h> | ||
27 | #include <asm/machdep.h> | ||
28 | #include <asm/mmu_context.h> | ||
29 | #include <asm/hw_irq.h> | ||
30 | |||
31 | #define PTE_SIZE 12 | ||
32 | #define VSID_ALL 0 | ||
33 | |||
34 | /* #define DEBUG_MMU */ | ||
35 | /* #define DEBUG_SLB */ | ||
36 | |||
37 | #ifdef DEBUG_MMU | ||
38 | #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) | ||
39 | #else | ||
40 | #define dprintk_mmu(a, ...) do { } while(0) | ||
41 | #endif | ||
42 | |||
43 | #ifdef DEBUG_SLB | ||
44 | #define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__) | ||
45 | #else | ||
46 | #define dprintk_slb(a, ...) do { } while(0) | ||
47 | #endif | ||
48 | |||
49 | static void invalidate_pte(struct hpte_cache *pte) | ||
50 | { | ||
51 | dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n", | ||
52 | i, pte->pte.eaddr, pte->pte.vpage, pte->host_va); | ||
53 | |||
54 | ppc_md.hpte_invalidate(pte->slot, pte->host_va, | ||
55 | MMU_PAGE_4K, MMU_SEGSIZE_256M, | ||
56 | false); | ||
57 | pte->host_va = 0; | ||
58 | kvm_release_pfn_dirty(pte->pfn); | ||
59 | } | ||
60 | |||
61 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask) | ||
62 | { | ||
63 | int i; | ||
64 | |||
65 | dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n", | ||
66 | vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); | ||
67 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); | ||
68 | |||
69 | guest_ea &= ea_mask; | ||
70 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { | ||
71 | struct hpte_cache *pte; | ||
72 | |||
73 | pte = &vcpu->arch.hpte_cache[i]; | ||
74 | if (!pte->host_va) | ||
75 | continue; | ||
76 | |||
77 | if ((pte->pte.eaddr & ea_mask) == guest_ea) { | ||
78 | invalidate_pte(pte); | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /* Doing a complete flush -> start from scratch */ | ||
83 | if (!ea_mask) | ||
84 | vcpu->arch.hpte_cache_offset = 0; | ||
85 | } | ||
86 | |||
87 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | ||
88 | { | ||
89 | int i; | ||
90 | |||
91 | dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", | ||
92 | vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); | ||
93 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); | ||
94 | |||
95 | guest_vp &= vp_mask; | ||
96 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { | ||
97 | struct hpte_cache *pte; | ||
98 | |||
99 | pte = &vcpu->arch.hpte_cache[i]; | ||
100 | if (!pte->host_va) | ||
101 | continue; | ||
102 | |||
103 | if ((pte->pte.vpage & vp_mask) == guest_vp) { | ||
104 | invalidate_pte(pte); | ||
105 | } | ||
106 | } | ||
107 | } | ||
108 | |||
109 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end) | ||
110 | { | ||
111 | int i; | ||
112 | |||
113 | dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n", | ||
114 | vcpu->arch.hpte_cache_offset, guest_pa, pa_mask); | ||
115 | BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); | ||
116 | |||
117 | for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { | ||
118 | struct hpte_cache *pte; | ||
119 | |||
120 | pte = &vcpu->arch.hpte_cache[i]; | ||
121 | if (!pte->host_va) | ||
122 | continue; | ||
123 | |||
124 | if ((pte->pte.raddr >= pa_start) && | ||
125 | (pte->pte.raddr < pa_end)) { | ||
126 | invalidate_pte(pte); | ||
127 | } | ||
128 | } | ||
129 | } | ||
130 | |||
131 | struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) | ||
132 | { | ||
133 | int i; | ||
134 | u64 guest_vp; | ||
135 | |||
136 | guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); | ||
137 | for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { | ||
138 | struct hpte_cache *pte; | ||
139 | |||
140 | pte = &vcpu->arch.hpte_cache[i]; | ||
141 | if (!pte->host_va) | ||
142 | continue; | ||
143 | |||
144 | if (pte->pte.vpage == guest_vp) | ||
145 | return &pte->pte; | ||
146 | } | ||
147 | |||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) | ||
152 | { | ||
153 | if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) | ||
154 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | ||
155 | |||
156 | return vcpu->arch.hpte_cache_offset++; | ||
157 | } | ||
158 | |||
159 | /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using | ||
160 | * a hash, so we don't waste cycles on looping */ | ||
161 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) | ||
162 | { | ||
163 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ | ||
164 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ | ||
165 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ | ||
166 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ | ||
167 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ | ||
168 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ | ||
169 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ | ||
170 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); | ||
171 | } | ||
172 | |||
173 | |||
174 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | ||
175 | { | ||
176 | struct kvmppc_sid_map *map; | ||
177 | u16 sid_map_mask; | ||
178 | |||
179 | if (vcpu->arch.msr & MSR_PR) | ||
180 | gvsid |= VSID_PR; | ||
181 | |||
182 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | ||
183 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; | ||
184 | if (map->guest_vsid == gvsid) { | ||
185 | dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", | ||
186 | gvsid, map->host_vsid); | ||
187 | return map; | ||
188 | } | ||
189 | |||
190 | map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; | ||
191 | if (map->guest_vsid == gvsid) { | ||
192 | dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", | ||
193 | gvsid, map->host_vsid); | ||
194 | return map; | ||
195 | } | ||
196 | |||
197 | dprintk_slb("SLB: Searching 0x%llx -> not found\n", gvsid); | ||
198 | return NULL; | ||
199 | } | ||
200 | |||
201 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | ||
202 | { | ||
203 | pfn_t hpaddr; | ||
204 | ulong hash, hpteg, va; | ||
205 | u64 vsid; | ||
206 | int ret; | ||
207 | int rflags = 0x192; | ||
208 | int vflags = 0; | ||
209 | int attempt = 0; | ||
210 | struct kvmppc_sid_map *map; | ||
211 | |||
212 | /* Get host physical address for gpa */ | ||
213 | hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | ||
214 | if (kvm_is_error_hva(hpaddr)) { | ||
215 | printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr); | ||
216 | return -EINVAL; | ||
217 | } | ||
218 | hpaddr <<= PAGE_SHIFT; | ||
219 | #if PAGE_SHIFT == 12 | ||
220 | #elif PAGE_SHIFT == 16 | ||
221 | hpaddr |= orig_pte->raddr & 0xf000; | ||
222 | #else | ||
223 | #error Unknown page size | ||
224 | #endif | ||
225 | |||
226 | /* and write the mapping ea -> hpa into the pt */ | ||
227 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); | ||
228 | map = find_sid_vsid(vcpu, vsid); | ||
229 | if (!map) { | ||
230 | kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); | ||
231 | map = find_sid_vsid(vcpu, vsid); | ||
232 | } | ||
233 | BUG_ON(!map); | ||
234 | |||
235 | vsid = map->host_vsid; | ||
236 | va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M); | ||
237 | |||
238 | if (!orig_pte->may_write) | ||
239 | rflags |= HPTE_R_PP; | ||
240 | else | ||
241 | mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | ||
242 | |||
243 | if (!orig_pte->may_execute) | ||
244 | rflags |= HPTE_R_N; | ||
245 | |||
246 | hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); | ||
247 | |||
248 | map_again: | ||
249 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | ||
250 | |||
251 | /* In case we tried normal mapping already, let's nuke old entries */ | ||
252 | if (attempt > 1) | ||
253 | if (ppc_md.hpte_remove(hpteg) < 0) | ||
254 | return -1; | ||
255 | |||
256 | ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); | ||
257 | |||
258 | if (ret < 0) { | ||
259 | /* If we couldn't map a primary PTE, try a secondary */ | ||
260 | #ifdef USE_SECONDARY | ||
261 | hash = ~hash; | ||
262 | attempt++; | ||
263 | if (attempt % 2) | ||
264 | vflags = HPTE_V_SECONDARY; | ||
265 | else | ||
266 | vflags = 0; | ||
267 | #else | ||
268 | attempt = 2; | ||
269 | #endif | ||
270 | goto map_again; | ||
271 | } else { | ||
272 | int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); | ||
273 | struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id]; | ||
274 | |||
275 | dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%lx (0x%llx) -> %lx\n", | ||
276 | ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', | ||
277 | (rflags & HPTE_R_N) ? '-' : 'x', | ||
278 | orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr); | ||
279 | |||
280 | pte->slot = hpteg + (ret & 7); | ||
281 | pte->host_va = va; | ||
282 | pte->pte = *orig_pte; | ||
283 | pte->pfn = hpaddr >> PAGE_SHIFT; | ||
284 | } | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | ||
290 | { | ||
291 | struct kvmppc_sid_map *map; | ||
292 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
293 | u16 sid_map_mask; | ||
294 | static int backwards_map = 0; | ||
295 | |||
296 | if (vcpu->arch.msr & MSR_PR) | ||
297 | gvsid |= VSID_PR; | ||
298 | |||
299 | /* We might get collisions that trap in preceding order, so let's | ||
300 | map them differently */ | ||
301 | |||
302 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | ||
303 | if (backwards_map) | ||
304 | sid_map_mask = SID_MAP_MASK - sid_map_mask; | ||
305 | |||
306 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; | ||
307 | |||
308 | /* Make sure we're taking the other map next time */ | ||
309 | backwards_map = !backwards_map; | ||
310 | |||
311 | /* Uh-oh ... out of mappings. Let's flush! */ | ||
312 | if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { | ||
313 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; | ||
314 | memset(vcpu_book3s->sid_map, 0, | ||
315 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); | ||
316 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | ||
317 | kvmppc_mmu_flush_segments(vcpu); | ||
318 | } | ||
319 | map->host_vsid = vcpu_book3s->vsid_next++; | ||
320 | |||
321 | map->guest_vsid = gvsid; | ||
322 | map->valid = true; | ||
323 | |||
324 | return map; | ||
325 | } | ||
326 | |||
327 | static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) | ||
328 | { | ||
329 | int i; | ||
330 | int max_slb_size = 64; | ||
331 | int found_inval = -1; | ||
332 | int r; | ||
333 | |||
334 | if (!get_paca()->kvm_slb_max) | ||
335 | get_paca()->kvm_slb_max = 1; | ||
336 | |||
337 | /* Are we overwriting? */ | ||
338 | for (i = 1; i < get_paca()->kvm_slb_max; i++) { | ||
339 | if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V)) | ||
340 | found_inval = i; | ||
341 | else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid) | ||
342 | return i; | ||
343 | } | ||
344 | |||
345 | /* Found a spare entry that was invalidated before */ | ||
346 | if (found_inval > 0) | ||
347 | return found_inval; | ||
348 | |||
349 | /* No spare invalid entry, so create one */ | ||
350 | |||
351 | if (mmu_slb_size < 64) | ||
352 | max_slb_size = mmu_slb_size; | ||
353 | |||
354 | /* Overflowing -> purge */ | ||
355 | if ((get_paca()->kvm_slb_max) == max_slb_size) | ||
356 | kvmppc_mmu_flush_segments(vcpu); | ||
357 | |||
358 | r = get_paca()->kvm_slb_max; | ||
359 | get_paca()->kvm_slb_max++; | ||
360 | |||
361 | return r; | ||
362 | } | ||
363 | |||
364 | int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) | ||
365 | { | ||
366 | u64 esid = eaddr >> SID_SHIFT; | ||
367 | u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; | ||
368 | u64 slb_vsid = SLB_VSID_USER; | ||
369 | u64 gvsid; | ||
370 | int slb_index; | ||
371 | struct kvmppc_sid_map *map; | ||
372 | |||
373 | slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); | ||
374 | |||
375 | if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { | ||
376 | /* Invalidate an entry */ | ||
377 | get_paca()->kvm_slb[slb_index].esid = 0; | ||
378 | return -ENOENT; | ||
379 | } | ||
380 | |||
381 | map = find_sid_vsid(vcpu, gvsid); | ||
382 | if (!map) | ||
383 | map = create_sid_map(vcpu, gvsid); | ||
384 | |||
385 | map->guest_esid = esid; | ||
386 | |||
387 | slb_vsid |= (map->host_vsid << 12); | ||
388 | slb_vsid &= ~SLB_VSID_KP; | ||
389 | slb_esid |= slb_index; | ||
390 | |||
391 | get_paca()->kvm_slb[slb_index].esid = slb_esid; | ||
392 | get_paca()->kvm_slb[slb_index].vsid = slb_vsid; | ||
393 | |||
394 | dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); | ||
395 | |||
396 | return 0; | ||
397 | } | ||
398 | |||
399 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | ||
400 | { | ||
401 | get_paca()->kvm_slb_max = 1; | ||
402 | get_paca()->kvm_slb[0].esid = 0; | ||
403 | } | ||
404 | |||
405 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | ||
406 | { | ||
407 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | ||
408 | } | ||
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S new file mode 100644 index 000000000000..c83c60ad96c5 --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S | |||
@@ -0,0 +1,204 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/kvm_asm.h> | ||
22 | #include <asm/reg.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | #include <asm/exception-64s.h> | ||
26 | |||
27 | /***************************************************************************** | ||
28 | * * | ||
29 | * Real Mode handlers that need to be in low physical memory * | ||
30 | * * | ||
31 | ****************************************************************************/ | ||
32 | |||
33 | |||
34 | .macro INTERRUPT_TRAMPOLINE intno | ||
35 | |||
36 | .global kvmppc_trampoline_\intno | ||
37 | kvmppc_trampoline_\intno: | ||
38 | |||
39 | mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */ | ||
40 | |||
41 | /* | ||
42 | * First thing to do is to find out if we're coming | ||
43 | * from a KVM guest or a Linux process. | ||
44 | * | ||
45 | * To distinguish, we check a magic byte in the PACA | ||
46 | */ | ||
47 | mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */ | ||
48 | std r12, PACA_KVM_SCRATCH0(r13) | ||
49 | mfcr r12 | ||
50 | stw r12, PACA_KVM_SCRATCH1(r13) | ||
51 | lbz r12, PACA_KVM_IN_GUEST(r13) | ||
52 | cmpwi r12, KVM_GUEST_MODE_NONE | ||
53 | bne ..kvmppc_handler_hasmagic_\intno | ||
54 | /* No KVM guest? Then jump back to the Linux handler! */ | ||
55 | lwz r12, PACA_KVM_SCRATCH1(r13) | ||
56 | mtcr r12 | ||
57 | ld r12, PACA_KVM_SCRATCH0(r13) | ||
58 | mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ | ||
59 | b kvmppc_resume_\intno /* Get back original handler */ | ||
60 | |||
61 | /* Now we know we're handling a KVM guest */ | ||
62 | ..kvmppc_handler_hasmagic_\intno: | ||
63 | |||
64 | /* Should we just skip the faulting instruction? */ | ||
65 | cmpwi r12, KVM_GUEST_MODE_SKIP | ||
66 | beq kvmppc_handler_skip_ins | ||
67 | |||
68 | /* Let's store which interrupt we're handling */ | ||
69 | li r12, \intno | ||
70 | |||
71 | /* Jump into the SLB exit code that goes to the highmem handler */ | ||
72 | b kvmppc_handler_trampoline_exit | ||
73 | |||
74 | .endm | ||
75 | |||
76 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSTEM_RESET | ||
77 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK | ||
78 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE | ||
79 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_SEGMENT | ||
80 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE | ||
81 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_SEGMENT | ||
82 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL | ||
83 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT | ||
84 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM | ||
85 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL | ||
86 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DECREMENTER | ||
87 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_SYSCALL | ||
88 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_TRACE | ||
89 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PERFMON | ||
90 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC | ||
91 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX | ||
92 | |||
93 | /* | ||
94 | * Bring us back to the faulting code, but skip the | ||
95 | * faulting instruction. | ||
96 | * | ||
97 | * This is a generic exit path from the interrupt | ||
98 | * trampolines above. | ||
99 | * | ||
100 | * Input Registers: | ||
101 | * | ||
102 | * R12 = free | ||
103 | * R13 = PACA | ||
104 | * PACA.KVM.SCRATCH0 = guest R12 | ||
105 | * PACA.KVM.SCRATCH1 = guest CR | ||
106 | * SPRG_SCRATCH0 = guest R13 | ||
107 | * | ||
108 | */ | ||
109 | kvmppc_handler_skip_ins: | ||
110 | |||
111 | /* Patch the IP to the next instruction */ | ||
112 | mfsrr0 r12 | ||
113 | addi r12, r12, 4 | ||
114 | mtsrr0 r12 | ||
115 | |||
116 | /* Clean up all state */ | ||
117 | lwz r12, PACA_KVM_SCRATCH1(r13) | ||
118 | mtcr r12 | ||
119 | ld r12, PACA_KVM_SCRATCH0(r13) | ||
120 | mfspr r13, SPRN_SPRG_SCRATCH0 | ||
121 | |||
122 | /* And get back into the code */ | ||
123 | RFI | ||
124 | |||
125 | /* | ||
126 | * This trampoline brings us back to a real mode handler | ||
127 | * | ||
128 | * Input Registers: | ||
129 | * | ||
130 | * R5 = SRR0 | ||
131 | * R6 = SRR1 | ||
132 | * LR = real-mode IP | ||
133 | * | ||
134 | */ | ||
135 | .global kvmppc_handler_lowmem_trampoline | ||
136 | kvmppc_handler_lowmem_trampoline: | ||
137 | |||
138 | mtsrr0 r5 | ||
139 | mtsrr1 r6 | ||
140 | blr | ||
141 | kvmppc_handler_lowmem_trampoline_end: | ||
142 | |||
143 | /* | ||
144 | * Call a function in real mode | ||
145 | * | ||
146 | * Input Registers: | ||
147 | * | ||
148 | * R3 = function | ||
149 | * R4 = MSR | ||
150 | * R5 = CTR | ||
151 | * | ||
152 | */ | ||
153 | _GLOBAL(kvmppc_rmcall) | ||
154 | mtmsr r4 /* Disable relocation, so mtsrr | ||
155 | doesn't get interrupted */ | ||
156 | mtctr r5 | ||
157 | mtsrr0 r3 | ||
158 | mtsrr1 r4 | ||
159 | RFI | ||
160 | |||
161 | /* | ||
162 | * Activate current's external feature (FPU/Altivec/VSX) | ||
163 | */ | ||
164 | #define define_load_up(what) \ | ||
165 | \ | ||
166 | _GLOBAL(kvmppc_load_up_ ## what); \ | ||
167 | subi r1, r1, INT_FRAME_SIZE; \ | ||
168 | mflr r3; \ | ||
169 | std r3, _LINK(r1); \ | ||
170 | mfmsr r4; \ | ||
171 | std r31, GPR3(r1); \ | ||
172 | mr r31, r4; \ | ||
173 | li r5, MSR_DR; \ | ||
174 | oris r5, r5, MSR_EE@h; \ | ||
175 | andc r4, r4, r5; \ | ||
176 | mtmsr r4; \ | ||
177 | \ | ||
178 | bl .load_up_ ## what; \ | ||
179 | \ | ||
180 | mtmsr r31; \ | ||
181 | ld r3, _LINK(r1); \ | ||
182 | ld r31, GPR3(r1); \ | ||
183 | addi r1, r1, INT_FRAME_SIZE; \ | ||
184 | mtlr r3; \ | ||
185 | blr | ||
186 | |||
187 | define_load_up(fpu) | ||
188 | #ifdef CONFIG_ALTIVEC | ||
189 | define_load_up(altivec) | ||
190 | #endif | ||
191 | #ifdef CONFIG_VSX | ||
192 | define_load_up(vsx) | ||
193 | #endif | ||
194 | |||
195 | .global kvmppc_trampoline_lowmem | ||
196 | kvmppc_trampoline_lowmem: | ||
197 | .long kvmppc_handler_lowmem_trampoline - _stext | ||
198 | |||
199 | .global kvmppc_trampoline_enter | ||
200 | kvmppc_trampoline_enter: | ||
201 | .long kvmppc_handler_trampoline_enter - _stext | ||
202 | |||
203 | #include "book3s_64_slb.S" | ||
204 | |||
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S new file mode 100644 index 000000000000..35b762722187 --- /dev/null +++ b/arch/powerpc/kvm/book3s_64_slb.S | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10)) | ||
21 | #define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8) | ||
22 | #define UNBOLT_SLB_ENTRY(num) \ | ||
23 | ld r9, SHADOW_SLB_ESID(num)(r12); \ | ||
24 | /* Invalid? Skip. */; \ | ||
25 | rldicl. r0, r9, 37, 63; \ | ||
26 | beq slb_entry_skip_ ## num; \ | ||
27 | xoris r9, r9, SLB_ESID_V@h; \ | ||
28 | std r9, SHADOW_SLB_ESID(num)(r12); \ | ||
29 | slb_entry_skip_ ## num: | ||
30 | |||
31 | #define REBOLT_SLB_ENTRY(num) \ | ||
32 | ld r10, SHADOW_SLB_ESID(num)(r11); \ | ||
33 | cmpdi r10, 0; \ | ||
34 | beq slb_exit_skip_ ## num; \ | ||
35 | oris r10, r10, SLB_ESID_V@h; \ | ||
36 | ld r9, SHADOW_SLB_VSID(num)(r11); \ | ||
37 | slbmte r9, r10; \ | ||
38 | std r10, SHADOW_SLB_ESID(num)(r11); \ | ||
39 | slb_exit_skip_ ## num: | ||
40 | |||
41 | /****************************************************************************** | ||
42 | * * | ||
43 | * Entry code * | ||
44 | * * | ||
45 | *****************************************************************************/ | ||
46 | |||
47 | .global kvmppc_handler_trampoline_enter | ||
48 | kvmppc_handler_trampoline_enter: | ||
49 | |||
50 | /* Required state: | ||
51 | * | ||
52 | * MSR = ~IR|DR | ||
53 | * R13 = PACA | ||
54 | * R1 = host R1 | ||
55 | * R2 = host R2 | ||
56 | * R9 = guest IP | ||
57 | * R10 = guest MSR | ||
58 | * all other GPRS = free | ||
59 | * PACA[KVM_CR] = guest CR | ||
60 | * PACA[KVM_XER] = guest XER | ||
61 | */ | ||
62 | |||
63 | mtsrr0 r9 | ||
64 | mtsrr1 r10 | ||
65 | |||
66 | /* Activate guest mode, so faults get handled by KVM */ | ||
67 | li r11, KVM_GUEST_MODE_GUEST | ||
68 | stb r11, PACA_KVM_IN_GUEST(r13) | ||
69 | |||
70 | /* Remove LPAR shadow entries */ | ||
71 | |||
72 | #if SLB_NUM_BOLTED == 3 | ||
73 | |||
74 | ld r12, PACA_SLBSHADOWPTR(r13) | ||
75 | |||
76 | /* Save off the first entry so we can slbie it later */ | ||
77 | ld r10, SHADOW_SLB_ESID(0)(r12) | ||
78 | ld r11, SHADOW_SLB_VSID(0)(r12) | ||
79 | |||
80 | /* Remove bolted entries */ | ||
81 | UNBOLT_SLB_ENTRY(0) | ||
82 | UNBOLT_SLB_ENTRY(1) | ||
83 | UNBOLT_SLB_ENTRY(2) | ||
84 | |||
85 | #else | ||
86 | #error unknown number of bolted entries | ||
87 | #endif | ||
88 | |||
89 | /* Flush SLB */ | ||
90 | |||
91 | slbia | ||
92 | |||
93 | /* r0 = esid & ESID_MASK */ | ||
94 | rldicr r10, r10, 0, 35 | ||
95 | /* r0 |= CLASS_BIT(VSID) */ | ||
96 | rldic r12, r11, 56 - 36, 36 | ||
97 | or r10, r10, r12 | ||
98 | slbie r10 | ||
99 | |||
100 | isync | ||
101 | |||
102 | /* Fill SLB with our shadow */ | ||
103 | |||
104 | lbz r12, PACA_KVM_SLB_MAX(r13) | ||
105 | mulli r12, r12, 16 | ||
106 | addi r12, r12, PACA_KVM_SLB | ||
107 | add r12, r12, r13 | ||
108 | |||
109 | /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */ | ||
110 | li r11, PACA_KVM_SLB | ||
111 | add r11, r11, r13 | ||
112 | |||
113 | slb_loop_enter: | ||
114 | |||
115 | ld r10, 0(r11) | ||
116 | |||
117 | rldicl. r0, r10, 37, 63 | ||
118 | beq slb_loop_enter_skip | ||
119 | |||
120 | ld r9, 8(r11) | ||
121 | slbmte r9, r10 | ||
122 | |||
123 | slb_loop_enter_skip: | ||
124 | addi r11, r11, 16 | ||
125 | cmpd cr0, r11, r12 | ||
126 | blt slb_loop_enter | ||
127 | |||
128 | slb_do_enter: | ||
129 | |||
130 | /* Enter guest */ | ||
131 | |||
132 | ld r0, (PACA_KVM_R0)(r13) | ||
133 | ld r1, (PACA_KVM_R1)(r13) | ||
134 | ld r2, (PACA_KVM_R2)(r13) | ||
135 | ld r3, (PACA_KVM_R3)(r13) | ||
136 | ld r4, (PACA_KVM_R4)(r13) | ||
137 | ld r5, (PACA_KVM_R5)(r13) | ||
138 | ld r6, (PACA_KVM_R6)(r13) | ||
139 | ld r7, (PACA_KVM_R7)(r13) | ||
140 | ld r8, (PACA_KVM_R8)(r13) | ||
141 | ld r9, (PACA_KVM_R9)(r13) | ||
142 | ld r10, (PACA_KVM_R10)(r13) | ||
143 | ld r12, (PACA_KVM_R12)(r13) | ||
144 | |||
145 | lwz r11, (PACA_KVM_CR)(r13) | ||
146 | mtcr r11 | ||
147 | |||
148 | ld r11, (PACA_KVM_XER)(r13) | ||
149 | mtxer r11 | ||
150 | |||
151 | ld r11, (PACA_KVM_R11)(r13) | ||
152 | ld r13, (PACA_KVM_R13)(r13) | ||
153 | |||
154 | RFI | ||
155 | kvmppc_handler_trampoline_enter_end: | ||
156 | |||
157 | |||
158 | |||
159 | /****************************************************************************** | ||
160 | * * | ||
161 | * Exit code * | ||
162 | * * | ||
163 | *****************************************************************************/ | ||
164 | |||
165 | .global kvmppc_handler_trampoline_exit | ||
166 | kvmppc_handler_trampoline_exit: | ||
167 | |||
168 | /* Register usage at this point: | ||
169 | * | ||
170 | * SPRG_SCRATCH0 = guest R13 | ||
171 | * R12 = exit handler id | ||
172 | * R13 = PACA | ||
173 | * PACA.KVM.SCRATCH0 = guest R12 | ||
174 | * PACA.KVM.SCRATCH1 = guest CR | ||
175 | * | ||
176 | */ | ||
177 | |||
178 | /* Save registers */ | ||
179 | |||
180 | std r0, PACA_KVM_R0(r13) | ||
181 | std r1, PACA_KVM_R1(r13) | ||
182 | std r2, PACA_KVM_R2(r13) | ||
183 | std r3, PACA_KVM_R3(r13) | ||
184 | std r4, PACA_KVM_R4(r13) | ||
185 | std r5, PACA_KVM_R5(r13) | ||
186 | std r6, PACA_KVM_R6(r13) | ||
187 | std r7, PACA_KVM_R7(r13) | ||
188 | std r8, PACA_KVM_R8(r13) | ||
189 | std r9, PACA_KVM_R9(r13) | ||
190 | std r10, PACA_KVM_R10(r13) | ||
191 | std r11, PACA_KVM_R11(r13) | ||
192 | |||
193 | /* Restore R1/R2 so we can handle faults */ | ||
194 | ld r1, PACA_KVM_HOST_R1(r13) | ||
195 | ld r2, PACA_KVM_HOST_R2(r13) | ||
196 | |||
197 | /* Save guest PC and MSR in GPRs */ | ||
198 | mfsrr0 r3 | ||
199 | mfsrr1 r4 | ||
200 | |||
201 | /* Get scratch'ed off registers */ | ||
202 | mfspr r9, SPRN_SPRG_SCRATCH0 | ||
203 | std r9, PACA_KVM_R13(r13) | ||
204 | |||
205 | ld r8, PACA_KVM_SCRATCH0(r13) | ||
206 | std r8, PACA_KVM_R12(r13) | ||
207 | |||
208 | lwz r7, PACA_KVM_SCRATCH1(r13) | ||
209 | stw r7, PACA_KVM_CR(r13) | ||
210 | |||
211 | /* Save more register state */ | ||
212 | |||
213 | mfxer r6 | ||
214 | stw r6, PACA_KVM_XER(r13) | ||
215 | |||
216 | mfdar r5 | ||
217 | mfdsisr r6 | ||
218 | |||
219 | /* | ||
220 | * In order for us to easily get the last instruction, | ||
221 | * we got the #vmexit at, we exploit the fact that the | ||
222 | * virtual layout is still the same here, so we can just | ||
223 | * ld from the guest's PC address | ||
224 | */ | ||
225 | |||
226 | /* We only load the last instruction when it's safe */ | ||
227 | cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE | ||
228 | beq ld_last_inst | ||
229 | cmpwi r12, BOOK3S_INTERRUPT_PROGRAM | ||
230 | beq ld_last_inst | ||
231 | |||
232 | b no_ld_last_inst | ||
233 | |||
234 | ld_last_inst: | ||
235 | /* Save off the guest instruction we're at */ | ||
236 | |||
237 | /* Set guest mode to 'jump over instruction' so if lwz faults | ||
238 | * we'll just continue at the next IP. */ | ||
239 | li r9, KVM_GUEST_MODE_SKIP | ||
240 | stb r9, PACA_KVM_IN_GUEST(r13) | ||
241 | |||
242 | /* 1) enable paging for data */ | ||
243 | mfmsr r9 | ||
244 | ori r11, r9, MSR_DR /* Enable paging for data */ | ||
245 | mtmsr r11 | ||
246 | /* 2) fetch the instruction */ | ||
247 | li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */ | ||
248 | lwz r0, 0(r3) | ||
249 | /* 3) disable paging again */ | ||
250 | mtmsr r9 | ||
251 | |||
252 | no_ld_last_inst: | ||
253 | |||
254 | /* Unset guest mode */ | ||
255 | li r9, KVM_GUEST_MODE_NONE | ||
256 | stb r9, PACA_KVM_IN_GUEST(r13) | ||
257 | |||
258 | /* Restore bolted entries from the shadow and fix it along the way */ | ||
259 | |||
260 | /* We don't store anything in entry 0, so we don't need to take care of it */ | ||
261 | slbia | ||
262 | isync | ||
263 | |||
264 | #if SLB_NUM_BOLTED == 3 | ||
265 | |||
266 | ld r11, PACA_SLBSHADOWPTR(r13) | ||
267 | |||
268 | REBOLT_SLB_ENTRY(0) | ||
269 | REBOLT_SLB_ENTRY(1) | ||
270 | REBOLT_SLB_ENTRY(2) | ||
271 | |||
272 | #else | ||
273 | #error unknown number of bolted entries | ||
274 | #endif | ||
275 | |||
276 | slb_do_exit: | ||
277 | |||
278 | /* Register usage at this point: | ||
279 | * | ||
280 | * R0 = guest last inst | ||
281 | * R1 = host R1 | ||
282 | * R2 = host R2 | ||
283 | * R3 = guest PC | ||
284 | * R4 = guest MSR | ||
285 | * R5 = guest DAR | ||
286 | * R6 = guest DSISR | ||
287 | * R12 = exit handler id | ||
288 | * R13 = PACA | ||
289 | * PACA.KVM.* = guest * | ||
290 | * | ||
291 | */ | ||
292 | |||
293 | /* RFI into the highmem handler */ | ||
294 | mfmsr r7 | ||
295 | ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */ | ||
296 | mtsrr1 r7 | ||
297 | ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */ | ||
298 | mtsrr0 r8 | ||
299 | |||
300 | RFI | ||
301 | kvmppc_handler_trampoline_exit_end: | ||
302 | |||
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index e7bf4d029484..2a3a1953d4bd 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/gfp.h> | ||
24 | #include <linux/module.h> | 25 | #include <linux/module.h> |
25 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
26 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
@@ -69,10 +70,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
69 | 70 | ||
70 | for (i = 0; i < 32; i += 4) { | 71 | for (i = 0; i < 32; i += 4) { |
71 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, | 72 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, |
72 | vcpu->arch.gpr[i], | 73 | kvmppc_get_gpr(vcpu, i), |
73 | vcpu->arch.gpr[i+1], | 74 | kvmppc_get_gpr(vcpu, i+1), |
74 | vcpu->arch.gpr[i+2], | 75 | kvmppc_get_gpr(vcpu, i+2), |
75 | vcpu->arch.gpr[i+3]); | 76 | kvmppc_get_gpr(vcpu, i+3)); |
76 | } | 77 | } |
77 | } | 78 | } |
78 | 79 | ||
@@ -82,8 +83,32 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, | |||
82 | set_bit(priority, &vcpu->arch.pending_exceptions); | 83 | set_bit(priority, &vcpu->arch.pending_exceptions); |
83 | } | 84 | } |
84 | 85 | ||
85 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) | 86 | static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, |
87 | ulong dear_flags, ulong esr_flags) | ||
86 | { | 88 | { |
89 | vcpu->arch.queued_dear = dear_flags; | ||
90 | vcpu->arch.queued_esr = esr_flags; | ||
91 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | ||
92 | } | ||
93 | |||
94 | static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, | ||
95 | ulong dear_flags, ulong esr_flags) | ||
96 | { | ||
97 | vcpu->arch.queued_dear = dear_flags; | ||
98 | vcpu->arch.queued_esr = esr_flags; | ||
99 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | ||
100 | } | ||
101 | |||
102 | static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, | ||
103 | ulong esr_flags) | ||
104 | { | ||
105 | vcpu->arch.queued_esr = esr_flags; | ||
106 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | ||
107 | } | ||
108 | |||
109 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) | ||
110 | { | ||
111 | vcpu->arch.queued_esr = esr_flags; | ||
87 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); | 112 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
88 | } | 113 | } |
89 | 114 | ||
@@ -97,6 +122,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |||
97 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | 122 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
98 | } | 123 | } |
99 | 124 | ||
125 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | ||
126 | { | ||
127 | clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | ||
128 | } | ||
129 | |||
100 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 130 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
101 | struct kvm_interrupt *irq) | 131 | struct kvm_interrupt *irq) |
102 | { | 132 | { |
@@ -109,14 +139,19 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
109 | { | 139 | { |
110 | int allowed = 0; | 140 | int allowed = 0; |
111 | ulong msr_mask; | 141 | ulong msr_mask; |
142 | bool update_esr = false, update_dear = false; | ||
112 | 143 | ||
113 | switch (priority) { | 144 | switch (priority) { |
114 | case BOOKE_IRQPRIO_PROGRAM: | ||
115 | case BOOKE_IRQPRIO_DTLB_MISS: | 145 | case BOOKE_IRQPRIO_DTLB_MISS: |
116 | case BOOKE_IRQPRIO_ITLB_MISS: | ||
117 | case BOOKE_IRQPRIO_SYSCALL: | ||
118 | case BOOKE_IRQPRIO_DATA_STORAGE: | 146 | case BOOKE_IRQPRIO_DATA_STORAGE: |
147 | update_dear = true; | ||
148 | /* fall through */ | ||
119 | case BOOKE_IRQPRIO_INST_STORAGE: | 149 | case BOOKE_IRQPRIO_INST_STORAGE: |
150 | case BOOKE_IRQPRIO_PROGRAM: | ||
151 | update_esr = true; | ||
152 | /* fall through */ | ||
153 | case BOOKE_IRQPRIO_ITLB_MISS: | ||
154 | case BOOKE_IRQPRIO_SYSCALL: | ||
120 | case BOOKE_IRQPRIO_FP_UNAVAIL: | 155 | case BOOKE_IRQPRIO_FP_UNAVAIL: |
121 | case BOOKE_IRQPRIO_SPE_UNAVAIL: | 156 | case BOOKE_IRQPRIO_SPE_UNAVAIL: |
122 | case BOOKE_IRQPRIO_SPE_FP_DATA: | 157 | case BOOKE_IRQPRIO_SPE_FP_DATA: |
@@ -151,6 +186,10 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
151 | vcpu->arch.srr0 = vcpu->arch.pc; | 186 | vcpu->arch.srr0 = vcpu->arch.pc; |
152 | vcpu->arch.srr1 = vcpu->arch.msr; | 187 | vcpu->arch.srr1 = vcpu->arch.msr; |
153 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 188 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
189 | if (update_esr == true) | ||
190 | vcpu->arch.esr = vcpu->arch.queued_esr; | ||
191 | if (update_dear == true) | ||
192 | vcpu->arch.dear = vcpu->arch.queued_dear; | ||
154 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); | 193 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); |
155 | 194 | ||
156 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 195 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
@@ -223,8 +262,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
223 | if (vcpu->arch.msr & MSR_PR) { | 262 | if (vcpu->arch.msr & MSR_PR) { |
224 | /* Program traps generated by user-level software must be handled | 263 | /* Program traps generated by user-level software must be handled |
225 | * by the guest kernel. */ | 264 | * by the guest kernel. */ |
226 | vcpu->arch.esr = vcpu->arch.fault_esr; | 265 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
227 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); | ||
228 | r = RESUME_GUEST; | 266 | r = RESUME_GUEST; |
229 | kvmppc_account_exit(vcpu, USR_PR_INST); | 267 | kvmppc_account_exit(vcpu, USR_PR_INST); |
230 | break; | 268 | break; |
@@ -280,16 +318,14 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
280 | break; | 318 | break; |
281 | 319 | ||
282 | case BOOKE_INTERRUPT_DATA_STORAGE: | 320 | case BOOKE_INTERRUPT_DATA_STORAGE: |
283 | vcpu->arch.dear = vcpu->arch.fault_dear; | 321 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, |
284 | vcpu->arch.esr = vcpu->arch.fault_esr; | 322 | vcpu->arch.fault_esr); |
285 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | ||
286 | kvmppc_account_exit(vcpu, DSI_EXITS); | 323 | kvmppc_account_exit(vcpu, DSI_EXITS); |
287 | r = RESUME_GUEST; | 324 | r = RESUME_GUEST; |
288 | break; | 325 | break; |
289 | 326 | ||
290 | case BOOKE_INTERRUPT_INST_STORAGE: | 327 | case BOOKE_INTERRUPT_INST_STORAGE: |
291 | vcpu->arch.esr = vcpu->arch.fault_esr; | 328 | kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); |
292 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | ||
293 | kvmppc_account_exit(vcpu, ISI_EXITS); | 329 | kvmppc_account_exit(vcpu, ISI_EXITS); |
294 | r = RESUME_GUEST; | 330 | r = RESUME_GUEST; |
295 | break; | 331 | break; |
@@ -310,9 +346,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
310 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); | 346 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); |
311 | if (gtlb_index < 0) { | 347 | if (gtlb_index < 0) { |
312 | /* The guest didn't have a mapping for it. */ | 348 | /* The guest didn't have a mapping for it. */ |
313 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | 349 | kvmppc_core_queue_dtlb_miss(vcpu, |
314 | vcpu->arch.dear = vcpu->arch.fault_dear; | 350 | vcpu->arch.fault_dear, |
315 | vcpu->arch.esr = vcpu->arch.fault_esr; | 351 | vcpu->arch.fault_esr); |
316 | kvmppc_mmu_dtlb_miss(vcpu); | 352 | kvmppc_mmu_dtlb_miss(vcpu); |
317 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); | 353 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
318 | r = RESUME_GUEST; | 354 | r = RESUME_GUEST; |
@@ -426,7 +462,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
426 | { | 462 | { |
427 | vcpu->arch.pc = 0; | 463 | vcpu->arch.pc = 0; |
428 | vcpu->arch.msr = 0; | 464 | vcpu->arch.msr = 0; |
429 | vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */ | 465 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
430 | 466 | ||
431 | vcpu->arch.shadow_pid = 1; | 467 | vcpu->arch.shadow_pid = 1; |
432 | 468 | ||
@@ -444,10 +480,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
444 | int i; | 480 | int i; |
445 | 481 | ||
446 | regs->pc = vcpu->arch.pc; | 482 | regs->pc = vcpu->arch.pc; |
447 | regs->cr = vcpu->arch.cr; | 483 | regs->cr = kvmppc_get_cr(vcpu); |
448 | regs->ctr = vcpu->arch.ctr; | 484 | regs->ctr = vcpu->arch.ctr; |
449 | regs->lr = vcpu->arch.lr; | 485 | regs->lr = vcpu->arch.lr; |
450 | regs->xer = vcpu->arch.xer; | 486 | regs->xer = kvmppc_get_xer(vcpu); |
451 | regs->msr = vcpu->arch.msr; | 487 | regs->msr = vcpu->arch.msr; |
452 | regs->srr0 = vcpu->arch.srr0; | 488 | regs->srr0 = vcpu->arch.srr0; |
453 | regs->srr1 = vcpu->arch.srr1; | 489 | regs->srr1 = vcpu->arch.srr1; |
@@ -461,7 +497,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
461 | regs->sprg7 = vcpu->arch.sprg6; | 497 | regs->sprg7 = vcpu->arch.sprg6; |
462 | 498 | ||
463 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 499 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
464 | regs->gpr[i] = vcpu->arch.gpr[i]; | 500 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
465 | 501 | ||
466 | return 0; | 502 | return 0; |
467 | } | 503 | } |
@@ -471,10 +507,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
471 | int i; | 507 | int i; |
472 | 508 | ||
473 | vcpu->arch.pc = regs->pc; | 509 | vcpu->arch.pc = regs->pc; |
474 | vcpu->arch.cr = regs->cr; | 510 | kvmppc_set_cr(vcpu, regs->cr); |
475 | vcpu->arch.ctr = regs->ctr; | 511 | vcpu->arch.ctr = regs->ctr; |
476 | vcpu->arch.lr = regs->lr; | 512 | vcpu->arch.lr = regs->lr; |
477 | vcpu->arch.xer = regs->xer; | 513 | kvmppc_set_xer(vcpu, regs->xer); |
478 | kvmppc_set_msr(vcpu, regs->msr); | 514 | kvmppc_set_msr(vcpu, regs->msr); |
479 | vcpu->arch.srr0 = regs->srr0; | 515 | vcpu->arch.srr0 = regs->srr0; |
480 | vcpu->arch.srr1 = regs->srr1; | 516 | vcpu->arch.srr1 = regs->srr1; |
@@ -486,8 +522,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
486 | vcpu->arch.sprg6 = regs->sprg5; | 522 | vcpu->arch.sprg6 = regs->sprg5; |
487 | vcpu->arch.sprg7 = regs->sprg6; | 523 | vcpu->arch.sprg7 = regs->sprg6; |
488 | 524 | ||
489 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) | 525 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
490 | vcpu->arch.gpr[i] = regs->gpr[i]; | 526 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
491 | 527 | ||
492 | return 0; | 528 | return 0; |
493 | } | 529 | } |
@@ -520,6 +556,11 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |||
520 | return kvmppc_core_vcpu_translate(vcpu, tr); | 556 | return kvmppc_core_vcpu_translate(vcpu, tr); |
521 | } | 557 | } |
522 | 558 | ||
559 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | ||
560 | { | ||
561 | return -ENOTSUPP; | ||
562 | } | ||
563 | |||
523 | int __init kvmppc_booke_init(void) | 564 | int __init kvmppc_booke_init(void) |
524 | { | 565 | { |
525 | unsigned long ivor[16]; | 566 | unsigned long ivor[16]; |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index aebc65e93f4b..cbc790ee1928 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -62,20 +62,20 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
62 | 62 | ||
63 | case OP_31_XOP_MFMSR: | 63 | case OP_31_XOP_MFMSR: |
64 | rt = get_rt(inst); | 64 | rt = get_rt(inst); |
65 | vcpu->arch.gpr[rt] = vcpu->arch.msr; | 65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr); |
66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); |
67 | break; | 67 | break; |
68 | 68 | ||
69 | case OP_31_XOP_MTMSR: | 69 | case OP_31_XOP_MTMSR: |
70 | rs = get_rs(inst); | 70 | rs = get_rs(inst); |
71 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); | 71 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); |
72 | kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); | 72 | kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); |
73 | break; | 73 | break; |
74 | 74 | ||
75 | case OP_31_XOP_WRTEE: | 75 | case OP_31_XOP_WRTEE: |
76 | rs = get_rs(inst); | 76 | rs = get_rs(inst); |
77 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 77 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) |
78 | | (vcpu->arch.gpr[rs] & MSR_EE); | 78 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); |
79 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 79 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
80 | break; | 80 | break; |
81 | 81 | ||
@@ -101,22 +101,23 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
101 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 101 | int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) |
102 | { | 102 | { |
103 | int emulated = EMULATE_DONE; | 103 | int emulated = EMULATE_DONE; |
104 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
104 | 105 | ||
105 | switch (sprn) { | 106 | switch (sprn) { |
106 | case SPRN_DEAR: | 107 | case SPRN_DEAR: |
107 | vcpu->arch.dear = vcpu->arch.gpr[rs]; break; | 108 | vcpu->arch.dear = spr_val; break; |
108 | case SPRN_ESR: | 109 | case SPRN_ESR: |
109 | vcpu->arch.esr = vcpu->arch.gpr[rs]; break; | 110 | vcpu->arch.esr = spr_val; break; |
110 | case SPRN_DBCR0: | 111 | case SPRN_DBCR0: |
111 | vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break; | 112 | vcpu->arch.dbcr0 = spr_val; break; |
112 | case SPRN_DBCR1: | 113 | case SPRN_DBCR1: |
113 | vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break; | 114 | vcpu->arch.dbcr1 = spr_val; break; |
114 | case SPRN_DBSR: | 115 | case SPRN_DBSR: |
115 | vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break; | 116 | vcpu->arch.dbsr &= ~spr_val; break; |
116 | case SPRN_TSR: | 117 | case SPRN_TSR: |
117 | vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break; | 118 | vcpu->arch.tsr &= ~spr_val; break; |
118 | case SPRN_TCR: | 119 | case SPRN_TCR: |
119 | vcpu->arch.tcr = vcpu->arch.gpr[rs]; | 120 | vcpu->arch.tcr = spr_val; |
120 | kvmppc_emulate_dec(vcpu); | 121 | kvmppc_emulate_dec(vcpu); |
121 | break; | 122 | break; |
122 | 123 | ||
@@ -124,64 +125,64 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
124 | * loaded into the real SPRGs when resuming the | 125 | * loaded into the real SPRGs when resuming the |
125 | * guest. */ | 126 | * guest. */ |
126 | case SPRN_SPRG4: | 127 | case SPRN_SPRG4: |
127 | vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break; | 128 | vcpu->arch.sprg4 = spr_val; break; |
128 | case SPRN_SPRG5: | 129 | case SPRN_SPRG5: |
129 | vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break; | 130 | vcpu->arch.sprg5 = spr_val; break; |
130 | case SPRN_SPRG6: | 131 | case SPRN_SPRG6: |
131 | vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break; | 132 | vcpu->arch.sprg6 = spr_val; break; |
132 | case SPRN_SPRG7: | 133 | case SPRN_SPRG7: |
133 | vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break; | 134 | vcpu->arch.sprg7 = spr_val; break; |
134 | 135 | ||
135 | case SPRN_IVPR: | 136 | case SPRN_IVPR: |
136 | vcpu->arch.ivpr = vcpu->arch.gpr[rs]; | 137 | vcpu->arch.ivpr = spr_val; |
137 | break; | 138 | break; |
138 | case SPRN_IVOR0: | 139 | case SPRN_IVOR0: |
139 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs]; | 140 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; |
140 | break; | 141 | break; |
141 | case SPRN_IVOR1: | 142 | case SPRN_IVOR1: |
142 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs]; | 143 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val; |
143 | break; | 144 | break; |
144 | case SPRN_IVOR2: | 145 | case SPRN_IVOR2: |
145 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs]; | 146 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; |
146 | break; | 147 | break; |
147 | case SPRN_IVOR3: | 148 | case SPRN_IVOR3: |
148 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs]; | 149 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; |
149 | break; | 150 | break; |
150 | case SPRN_IVOR4: | 151 | case SPRN_IVOR4: |
151 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs]; | 152 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val; |
152 | break; | 153 | break; |
153 | case SPRN_IVOR5: | 154 | case SPRN_IVOR5: |
154 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs]; | 155 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val; |
155 | break; | 156 | break; |
156 | case SPRN_IVOR6: | 157 | case SPRN_IVOR6: |
157 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs]; | 158 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val; |
158 | break; | 159 | break; |
159 | case SPRN_IVOR7: | 160 | case SPRN_IVOR7: |
160 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs]; | 161 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val; |
161 | break; | 162 | break; |
162 | case SPRN_IVOR8: | 163 | case SPRN_IVOR8: |
163 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs]; | 164 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; |
164 | break; | 165 | break; |
165 | case SPRN_IVOR9: | 166 | case SPRN_IVOR9: |
166 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs]; | 167 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; |
167 | break; | 168 | break; |
168 | case SPRN_IVOR10: | 169 | case SPRN_IVOR10: |
169 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs]; | 170 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val; |
170 | break; | 171 | break; |
171 | case SPRN_IVOR11: | 172 | case SPRN_IVOR11: |
172 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs]; | 173 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val; |
173 | break; | 174 | break; |
174 | case SPRN_IVOR12: | 175 | case SPRN_IVOR12: |
175 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs]; | 176 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val; |
176 | break; | 177 | break; |
177 | case SPRN_IVOR13: | 178 | case SPRN_IVOR13: |
178 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs]; | 179 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val; |
179 | break; | 180 | break; |
180 | case SPRN_IVOR14: | 181 | case SPRN_IVOR14: |
181 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs]; | 182 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val; |
182 | break; | 183 | break; |
183 | case SPRN_IVOR15: | 184 | case SPRN_IVOR15: |
184 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs]; | 185 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val; |
185 | break; | 186 | break; |
186 | 187 | ||
187 | default: | 188 | default: |
@@ -197,65 +198,65 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
197 | 198 | ||
198 | switch (sprn) { | 199 | switch (sprn) { |
199 | case SPRN_IVPR: | 200 | case SPRN_IVPR: |
200 | vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break; | 201 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; |
201 | case SPRN_DEAR: | 202 | case SPRN_DEAR: |
202 | vcpu->arch.gpr[rt] = vcpu->arch.dear; break; | 203 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break; |
203 | case SPRN_ESR: | 204 | case SPRN_ESR: |
204 | vcpu->arch.gpr[rt] = vcpu->arch.esr; break; | 205 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; |
205 | case SPRN_DBCR0: | 206 | case SPRN_DBCR0: |
206 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break; | 207 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; |
207 | case SPRN_DBCR1: | 208 | case SPRN_DBCR1: |
208 | vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break; | 209 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; |
209 | case SPRN_DBSR: | 210 | case SPRN_DBSR: |
210 | vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break; | 211 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; |
211 | 212 | ||
212 | case SPRN_IVOR0: | 213 | case SPRN_IVOR0: |
213 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | 214 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); |
214 | break; | 215 | break; |
215 | case SPRN_IVOR1: | 216 | case SPRN_IVOR1: |
216 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | 217 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); |
217 | break; | 218 | break; |
218 | case SPRN_IVOR2: | 219 | case SPRN_IVOR2: |
219 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | 220 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); |
220 | break; | 221 | break; |
221 | case SPRN_IVOR3: | 222 | case SPRN_IVOR3: |
222 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | 223 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); |
223 | break; | 224 | break; |
224 | case SPRN_IVOR4: | 225 | case SPRN_IVOR4: |
225 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | 226 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); |
226 | break; | 227 | break; |
227 | case SPRN_IVOR5: | 228 | case SPRN_IVOR5: |
228 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | 229 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); |
229 | break; | 230 | break; |
230 | case SPRN_IVOR6: | 231 | case SPRN_IVOR6: |
231 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | 232 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); |
232 | break; | 233 | break; |
233 | case SPRN_IVOR7: | 234 | case SPRN_IVOR7: |
234 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | 235 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); |
235 | break; | 236 | break; |
236 | case SPRN_IVOR8: | 237 | case SPRN_IVOR8: |
237 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | 238 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); |
238 | break; | 239 | break; |
239 | case SPRN_IVOR9: | 240 | case SPRN_IVOR9: |
240 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | 241 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); |
241 | break; | 242 | break; |
242 | case SPRN_IVOR10: | 243 | case SPRN_IVOR10: |
243 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | 244 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); |
244 | break; | 245 | break; |
245 | case SPRN_IVOR11: | 246 | case SPRN_IVOR11: |
246 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | 247 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); |
247 | break; | 248 | break; |
248 | case SPRN_IVOR12: | 249 | case SPRN_IVOR12: |
249 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | 250 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); |
250 | break; | 251 | break; |
251 | case SPRN_IVOR13: | 252 | case SPRN_IVOR13: |
252 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | 253 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); |
253 | break; | 254 | break; |
254 | case SPRN_IVOR14: | 255 | case SPRN_IVOR14: |
255 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | 256 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); |
256 | break; | 257 | break; |
257 | case SPRN_IVOR15: | 258 | case SPRN_IVOR15: |
258 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | 259 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); |
259 | break; | 260 | break; |
260 | 261 | ||
261 | default: | 262 | default: |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 64949eef43f1..669a5c5fc7d7 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/kvm_host.h> | 15 | #include <linux/kvm_host.h> |
16 | #include <linux/slab.h> | ||
16 | #include <linux/err.h> | 17 | #include <linux/err.h> |
17 | 18 | ||
18 | #include <asm/reg.h> | 19 | #include <asm/reg.h> |
@@ -60,6 +61,12 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
60 | 61 | ||
61 | kvmppc_e500_tlb_setup(vcpu_e500); | 62 | kvmppc_e500_tlb_setup(vcpu_e500); |
62 | 63 | ||
64 | /* Registers init */ | ||
65 | vcpu->arch.pvr = mfspr(SPRN_PVR); | ||
66 | |||
67 | /* Since booke kvm only support one core, update all vcpus' PIR to 0 */ | ||
68 | vcpu->vcpu_id = 0; | ||
69 | |||
63 | return 0; | 70 | return 0; |
64 | } | 71 | } |
65 | 72 | ||
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index be95b8d8e3b7..8e3edfbc9634 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -74,54 +74,59 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
74 | { | 74 | { |
75 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 75 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
76 | int emulated = EMULATE_DONE; | 76 | int emulated = EMULATE_DONE; |
77 | ulong spr_val = kvmppc_get_gpr(vcpu, rs); | ||
77 | 78 | ||
78 | switch (sprn) { | 79 | switch (sprn) { |
79 | case SPRN_PID: | 80 | case SPRN_PID: |
80 | vcpu_e500->pid[0] = vcpu->arch.shadow_pid = | 81 | vcpu_e500->pid[0] = vcpu->arch.shadow_pid = |
81 | vcpu->arch.pid = vcpu->arch.gpr[rs]; | 82 | vcpu->arch.pid = spr_val; |
82 | break; | 83 | break; |
83 | case SPRN_PID1: | 84 | case SPRN_PID1: |
84 | vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break; | 85 | vcpu_e500->pid[1] = spr_val; break; |
85 | case SPRN_PID2: | 86 | case SPRN_PID2: |
86 | vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break; | 87 | vcpu_e500->pid[2] = spr_val; break; |
87 | case SPRN_MAS0: | 88 | case SPRN_MAS0: |
88 | vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break; | 89 | vcpu_e500->mas0 = spr_val; break; |
89 | case SPRN_MAS1: | 90 | case SPRN_MAS1: |
90 | vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break; | 91 | vcpu_e500->mas1 = spr_val; break; |
91 | case SPRN_MAS2: | 92 | case SPRN_MAS2: |
92 | vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break; | 93 | vcpu_e500->mas2 = spr_val; break; |
93 | case SPRN_MAS3: | 94 | case SPRN_MAS3: |
94 | vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break; | 95 | vcpu_e500->mas3 = spr_val; break; |
95 | case SPRN_MAS4: | 96 | case SPRN_MAS4: |
96 | vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break; | 97 | vcpu_e500->mas4 = spr_val; break; |
97 | case SPRN_MAS6: | 98 | case SPRN_MAS6: |
98 | vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break; | 99 | vcpu_e500->mas6 = spr_val; break; |
99 | case SPRN_MAS7: | 100 | case SPRN_MAS7: |
100 | vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break; | 101 | vcpu_e500->mas7 = spr_val; break; |
102 | case SPRN_L1CSR0: | ||
103 | vcpu_e500->l1csr0 = spr_val; | ||
104 | vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); | ||
105 | break; | ||
101 | case SPRN_L1CSR1: | 106 | case SPRN_L1CSR1: |
102 | vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break; | 107 | vcpu_e500->l1csr1 = spr_val; break; |
103 | case SPRN_HID0: | 108 | case SPRN_HID0: |
104 | vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break; | 109 | vcpu_e500->hid0 = spr_val; break; |
105 | case SPRN_HID1: | 110 | case SPRN_HID1: |
106 | vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break; | 111 | vcpu_e500->hid1 = spr_val; break; |
107 | 112 | ||
108 | case SPRN_MMUCSR0: | 113 | case SPRN_MMUCSR0: |
109 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, | 114 | emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, |
110 | vcpu->arch.gpr[rs]); | 115 | spr_val); |
111 | break; | 116 | break; |
112 | 117 | ||
113 | /* extra exceptions */ | 118 | /* extra exceptions */ |
114 | case SPRN_IVOR32: | 119 | case SPRN_IVOR32: |
115 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs]; | 120 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val; |
116 | break; | 121 | break; |
117 | case SPRN_IVOR33: | 122 | case SPRN_IVOR33: |
118 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs]; | 123 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val; |
119 | break; | 124 | break; |
120 | case SPRN_IVOR34: | 125 | case SPRN_IVOR34: |
121 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs]; | 126 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val; |
122 | break; | 127 | break; |
123 | case SPRN_IVOR35: | 128 | case SPRN_IVOR35: |
124 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs]; | 129 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; |
125 | break; | 130 | break; |
126 | 131 | ||
127 | default: | 132 | default: |
@@ -138,63 +143,57 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
138 | 143 | ||
139 | switch (sprn) { | 144 | switch (sprn) { |
140 | case SPRN_PID: | 145 | case SPRN_PID: |
141 | vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break; | 146 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; |
142 | case SPRN_PID1: | 147 | case SPRN_PID1: |
143 | vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break; | 148 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; |
144 | case SPRN_PID2: | 149 | case SPRN_PID2: |
145 | vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break; | 150 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; |
146 | case SPRN_MAS0: | 151 | case SPRN_MAS0: |
147 | vcpu->arch.gpr[rt] = vcpu_e500->mas0; break; | 152 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break; |
148 | case SPRN_MAS1: | 153 | case SPRN_MAS1: |
149 | vcpu->arch.gpr[rt] = vcpu_e500->mas1; break; | 154 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break; |
150 | case SPRN_MAS2: | 155 | case SPRN_MAS2: |
151 | vcpu->arch.gpr[rt] = vcpu_e500->mas2; break; | 156 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break; |
152 | case SPRN_MAS3: | 157 | case SPRN_MAS3: |
153 | vcpu->arch.gpr[rt] = vcpu_e500->mas3; break; | 158 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break; |
154 | case SPRN_MAS4: | 159 | case SPRN_MAS4: |
155 | vcpu->arch.gpr[rt] = vcpu_e500->mas4; break; | 160 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break; |
156 | case SPRN_MAS6: | 161 | case SPRN_MAS6: |
157 | vcpu->arch.gpr[rt] = vcpu_e500->mas6; break; | 162 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break; |
158 | case SPRN_MAS7: | 163 | case SPRN_MAS7: |
159 | vcpu->arch.gpr[rt] = vcpu_e500->mas7; break; | 164 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break; |
160 | 165 | ||
161 | case SPRN_TLB0CFG: | 166 | case SPRN_TLB0CFG: |
162 | vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG); | 167 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; |
163 | vcpu->arch.gpr[rt] &= ~0xfffUL; | ||
164 | vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0]; | ||
165 | break; | ||
166 | |||
167 | case SPRN_TLB1CFG: | 168 | case SPRN_TLB1CFG: |
168 | vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG); | 169 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break; |
169 | vcpu->arch.gpr[rt] &= ~0xfffUL; | 170 | case SPRN_L1CSR0: |
170 | vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1]; | 171 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; |
171 | break; | ||
172 | |||
173 | case SPRN_L1CSR1: | 172 | case SPRN_L1CSR1: |
174 | vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break; | 173 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; |
175 | case SPRN_HID0: | 174 | case SPRN_HID0: |
176 | vcpu->arch.gpr[rt] = vcpu_e500->hid0; break; | 175 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; |
177 | case SPRN_HID1: | 176 | case SPRN_HID1: |
178 | vcpu->arch.gpr[rt] = vcpu_e500->hid1; break; | 177 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; |
179 | 178 | ||
180 | case SPRN_MMUCSR0: | 179 | case SPRN_MMUCSR0: |
181 | vcpu->arch.gpr[rt] = 0; break; | 180 | kvmppc_set_gpr(vcpu, rt, 0); break; |
182 | 181 | ||
183 | case SPRN_MMUCFG: | 182 | case SPRN_MMUCFG: |
184 | vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break; | 183 | kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break; |
185 | 184 | ||
186 | /* extra exceptions */ | 185 | /* extra exceptions */ |
187 | case SPRN_IVOR32: | 186 | case SPRN_IVOR32: |
188 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; | 187 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); |
189 | break; | 188 | break; |
190 | case SPRN_IVOR33: | 189 | case SPRN_IVOR33: |
191 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; | 190 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); |
192 | break; | 191 | break; |
193 | case SPRN_IVOR34: | 192 | case SPRN_IVOR34: |
194 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; | 193 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); |
195 | break; | 194 | break; |
196 | case SPRN_IVOR35: | 195 | case SPRN_IVOR35: |
197 | vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; | 196 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); |
198 | break; | 197 | break; |
199 | default: | 198 | default: |
200 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 199 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); |
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index fb1e1dc11ba5..21011e12caeb 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/slab.h> | ||
16 | #include <linux/string.h> | 17 | #include <linux/string.h> |
17 | #include <linux/kvm.h> | 18 | #include <linux/kvm.h> |
18 | #include <linux/kvm_host.h> | 19 | #include <linux/kvm_host.h> |
@@ -417,7 +418,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) | |||
417 | int esel, tlbsel; | 418 | int esel, tlbsel; |
418 | gva_t ea; | 419 | gva_t ea; |
419 | 420 | ||
420 | ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb]; | 421 | ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb); |
421 | 422 | ||
422 | ia = (ea >> 2) & 0x1; | 423 | ia = (ea >> 2) & 0x1; |
423 | 424 | ||
@@ -470,7 +471,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) | |||
470 | struct tlbe *gtlbe = NULL; | 471 | struct tlbe *gtlbe = NULL; |
471 | gva_t ea; | 472 | gva_t ea; |
472 | 473 | ||
473 | ea = vcpu->arch.gpr[rb]; | 474 | ea = kvmppc_get_gpr(vcpu, rb); |
474 | 475 | ||
475 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | 476 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { |
476 | esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); | 477 | esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); |
@@ -728,6 +729,12 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
728 | if (vcpu_e500->shadow_pages[1] == NULL) | 729 | if (vcpu_e500->shadow_pages[1] == NULL) |
729 | goto err_out_page0; | 730 | goto err_out_page0; |
730 | 731 | ||
732 | /* Init TLB configuration register */ | ||
733 | vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; | ||
734 | vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0]; | ||
735 | vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL; | ||
736 | vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1]; | ||
737 | |||
731 | return 0; | 738 | return 0; |
732 | 739 | ||
733 | err_out_page0: | 740 | err_out_page0: |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 7737146af3fb..cb72a65f4ecc 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/jiffies.h> | 20 | #include <linux/jiffies.h> |
21 | #include <linux/timer.h> | 21 | #include <linux/hrtimer.h> |
22 | #include <linux/types.h> | 22 | #include <linux/types.h> |
23 | #include <linux/string.h> | 23 | #include <linux/string.h> |
24 | #include <linux/kvm_host.h> | 24 | #include <linux/kvm_host.h> |
@@ -32,6 +32,7 @@ | |||
32 | #include "trace.h" | 32 | #include "trace.h" |
33 | 33 | ||
34 | #define OP_TRAP 3 | 34 | #define OP_TRAP 3 |
35 | #define OP_TRAP_64 2 | ||
35 | 36 | ||
36 | #define OP_31_XOP_LWZX 23 | 37 | #define OP_31_XOP_LWZX 23 |
37 | #define OP_31_XOP_LBZX 87 | 38 | #define OP_31_XOP_LBZX 87 |
@@ -64,19 +65,48 @@ | |||
64 | #define OP_STH 44 | 65 | #define OP_STH 44 |
65 | #define OP_STHU 45 | 66 | #define OP_STHU 45 |
66 | 67 | ||
68 | #ifdef CONFIG_PPC64 | ||
69 | static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) | ||
70 | { | ||
71 | return 1; | ||
72 | } | ||
73 | #else | ||
74 | static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) | ||
75 | { | ||
76 | return vcpu->arch.tcr & TCR_DIE; | ||
77 | } | ||
78 | #endif | ||
79 | |||
67 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | 80 | void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) |
68 | { | 81 | { |
69 | if (vcpu->arch.tcr & TCR_DIE) { | 82 | unsigned long dec_nsec; |
83 | |||
84 | pr_debug("mtDEC: %x\n", vcpu->arch.dec); | ||
85 | #ifdef CONFIG_PPC64 | ||
86 | /* mtdec lowers the interrupt line when positive. */ | ||
87 | kvmppc_core_dequeue_dec(vcpu); | ||
88 | |||
89 | /* POWER4+ triggers a dec interrupt if the value is < 0 */ | ||
90 | if (vcpu->arch.dec & 0x80000000) { | ||
91 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | ||
92 | kvmppc_core_queue_dec(vcpu); | ||
93 | return; | ||
94 | } | ||
95 | #endif | ||
96 | if (kvmppc_dec_enabled(vcpu)) { | ||
70 | /* The decrementer ticks at the same rate as the timebase, so | 97 | /* The decrementer ticks at the same rate as the timebase, so |
71 | * that's how we convert the guest DEC value to the number of | 98 | * that's how we convert the guest DEC value to the number of |
72 | * host ticks. */ | 99 | * host ticks. */ |
73 | unsigned long nr_jiffies; | ||
74 | 100 | ||
75 | nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy; | 101 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); |
76 | mod_timer(&vcpu->arch.dec_timer, | 102 | dec_nsec = vcpu->arch.dec; |
77 | get_jiffies_64() + nr_jiffies); | 103 | dec_nsec *= 1000; |
104 | dec_nsec /= tb_ticks_per_usec; | ||
105 | hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), | ||
106 | HRTIMER_MODE_REL); | ||
107 | vcpu->arch.dec_jiffies = get_tb(); | ||
78 | } else { | 108 | } else { |
79 | del_timer(&vcpu->arch.dec_timer); | 109 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); |
80 | } | 110 | } |
81 | } | 111 | } |
82 | 112 | ||
@@ -111,10 +141,20 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
111 | /* this default type might be overwritten by subcategories */ | 141 | /* this default type might be overwritten by subcategories */ |
112 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 142 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
113 | 143 | ||
144 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | ||
145 | |||
146 | /* Try again next time */ | ||
147 | if (inst == KVM_INST_FETCH_FAILED) | ||
148 | return EMULATE_DONE; | ||
149 | |||
114 | switch (get_op(inst)) { | 150 | switch (get_op(inst)) { |
115 | case OP_TRAP: | 151 | case OP_TRAP: |
116 | vcpu->arch.esr |= ESR_PTR; | 152 | #ifdef CONFIG_PPC64 |
117 | kvmppc_core_queue_program(vcpu); | 153 | case OP_TRAP_64: |
154 | kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); | ||
155 | #else | ||
156 | kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); | ||
157 | #endif | ||
118 | advance = 0; | 158 | advance = 0; |
119 | break; | 159 | break; |
120 | 160 | ||
@@ -134,14 +174,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
134 | case OP_31_XOP_STWX: | 174 | case OP_31_XOP_STWX: |
135 | rs = get_rs(inst); | 175 | rs = get_rs(inst); |
136 | emulated = kvmppc_handle_store(run, vcpu, | 176 | emulated = kvmppc_handle_store(run, vcpu, |
137 | vcpu->arch.gpr[rs], | 177 | kvmppc_get_gpr(vcpu, rs), |
138 | 4, 1); | 178 | 4, 1); |
139 | break; | 179 | break; |
140 | 180 | ||
141 | case OP_31_XOP_STBX: | 181 | case OP_31_XOP_STBX: |
142 | rs = get_rs(inst); | 182 | rs = get_rs(inst); |
143 | emulated = kvmppc_handle_store(run, vcpu, | 183 | emulated = kvmppc_handle_store(run, vcpu, |
144 | vcpu->arch.gpr[rs], | 184 | kvmppc_get_gpr(vcpu, rs), |
145 | 1, 1); | 185 | 1, 1); |
146 | break; | 186 | break; |
147 | 187 | ||
@@ -150,14 +190,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
150 | ra = get_ra(inst); | 190 | ra = get_ra(inst); |
151 | rb = get_rb(inst); | 191 | rb = get_rb(inst); |
152 | 192 | ||
153 | ea = vcpu->arch.gpr[rb]; | 193 | ea = kvmppc_get_gpr(vcpu, rb); |
154 | if (ra) | 194 | if (ra) |
155 | ea += vcpu->arch.gpr[ra]; | 195 | ea += kvmppc_get_gpr(vcpu, ra); |
156 | 196 | ||
157 | emulated = kvmppc_handle_store(run, vcpu, | 197 | emulated = kvmppc_handle_store(run, vcpu, |
158 | vcpu->arch.gpr[rs], | 198 | kvmppc_get_gpr(vcpu, rs), |
159 | 1, 1); | 199 | 1, 1); |
160 | vcpu->arch.gpr[rs] = ea; | 200 | kvmppc_set_gpr(vcpu, rs, ea); |
161 | break; | 201 | break; |
162 | 202 | ||
163 | case OP_31_XOP_LHZX: | 203 | case OP_31_XOP_LHZX: |
@@ -170,12 +210,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
170 | ra = get_ra(inst); | 210 | ra = get_ra(inst); |
171 | rb = get_rb(inst); | 211 | rb = get_rb(inst); |
172 | 212 | ||
173 | ea = vcpu->arch.gpr[rb]; | 213 | ea = kvmppc_get_gpr(vcpu, rb); |
174 | if (ra) | 214 | if (ra) |
175 | ea += vcpu->arch.gpr[ra]; | 215 | ea += kvmppc_get_gpr(vcpu, ra); |
176 | 216 | ||
177 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 217 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
178 | vcpu->arch.gpr[ra] = ea; | 218 | kvmppc_set_gpr(vcpu, ra, ea); |
179 | break; | 219 | break; |
180 | 220 | ||
181 | case OP_31_XOP_MFSPR: | 221 | case OP_31_XOP_MFSPR: |
@@ -184,38 +224,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
184 | 224 | ||
185 | switch (sprn) { | 225 | switch (sprn) { |
186 | case SPRN_SRR0: | 226 | case SPRN_SRR0: |
187 | vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; | 227 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; |
188 | case SPRN_SRR1: | 228 | case SPRN_SRR1: |
189 | vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; | 229 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; |
190 | case SPRN_PVR: | 230 | case SPRN_PVR: |
191 | vcpu->arch.gpr[rt] = mfspr(SPRN_PVR); break; | 231 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; |
192 | case SPRN_PIR: | 232 | case SPRN_PIR: |
193 | vcpu->arch.gpr[rt] = mfspr(SPRN_PIR); break; | 233 | kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; |
234 | case SPRN_MSSSR0: | ||
235 | kvmppc_set_gpr(vcpu, rt, 0); break; | ||
194 | 236 | ||
195 | /* Note: mftb and TBRL/TBWL are user-accessible, so | 237 | /* Note: mftb and TBRL/TBWL are user-accessible, so |
196 | * the guest can always access the real TB anyways. | 238 | * the guest can always access the real TB anyways. |
197 | * In fact, we probably will never see these traps. */ | 239 | * In fact, we probably will never see these traps. */ |
198 | case SPRN_TBWL: | 240 | case SPRN_TBWL: |
199 | vcpu->arch.gpr[rt] = mftbl(); break; | 241 | kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; |
200 | case SPRN_TBWU: | 242 | case SPRN_TBWU: |
201 | vcpu->arch.gpr[rt] = mftbu(); break; | 243 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; |
202 | 244 | ||
203 | case SPRN_SPRG0: | 245 | case SPRN_SPRG0: |
204 | vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; | 246 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; |
205 | case SPRN_SPRG1: | 247 | case SPRN_SPRG1: |
206 | vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; | 248 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; |
207 | case SPRN_SPRG2: | 249 | case SPRN_SPRG2: |
208 | vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; | 250 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; |
209 | case SPRN_SPRG3: | 251 | case SPRN_SPRG3: |
210 | vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; | 252 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; |
211 | /* Note: SPRG4-7 are user-readable, so we don't get | 253 | /* Note: SPRG4-7 are user-readable, so we don't get |
212 | * a trap. */ | 254 | * a trap. */ |
213 | 255 | ||
256 | case SPRN_DEC: | ||
257 | { | ||
258 | u64 jd = get_tb() - vcpu->arch.dec_jiffies; | ||
259 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); | ||
260 | pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", | ||
261 | vcpu->arch.dec, jd, | ||
262 | kvmppc_get_gpr(vcpu, rt)); | ||
263 | break; | ||
264 | } | ||
214 | default: | 265 | default: |
215 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); | 266 | emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); |
216 | if (emulated == EMULATE_FAIL) { | 267 | if (emulated == EMULATE_FAIL) { |
217 | printk("mfspr: unknown spr %x\n", sprn); | 268 | printk("mfspr: unknown spr %x\n", sprn); |
218 | vcpu->arch.gpr[rt] = 0; | 269 | kvmppc_set_gpr(vcpu, rt, 0); |
219 | } | 270 | } |
220 | break; | 271 | break; |
221 | } | 272 | } |
@@ -227,7 +278,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
227 | rb = get_rb(inst); | 278 | rb = get_rb(inst); |
228 | 279 | ||
229 | emulated = kvmppc_handle_store(run, vcpu, | 280 | emulated = kvmppc_handle_store(run, vcpu, |
230 | vcpu->arch.gpr[rs], | 281 | kvmppc_get_gpr(vcpu, rs), |
231 | 2, 1); | 282 | 2, 1); |
232 | break; | 283 | break; |
233 | 284 | ||
@@ -236,14 +287,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
236 | ra = get_ra(inst); | 287 | ra = get_ra(inst); |
237 | rb = get_rb(inst); | 288 | rb = get_rb(inst); |
238 | 289 | ||
239 | ea = vcpu->arch.gpr[rb]; | 290 | ea = kvmppc_get_gpr(vcpu, rb); |
240 | if (ra) | 291 | if (ra) |
241 | ea += vcpu->arch.gpr[ra]; | 292 | ea += kvmppc_get_gpr(vcpu, ra); |
242 | 293 | ||
243 | emulated = kvmppc_handle_store(run, vcpu, | 294 | emulated = kvmppc_handle_store(run, vcpu, |
244 | vcpu->arch.gpr[rs], | 295 | kvmppc_get_gpr(vcpu, rs), |
245 | 2, 1); | 296 | 2, 1); |
246 | vcpu->arch.gpr[ra] = ea; | 297 | kvmppc_set_gpr(vcpu, ra, ea); |
247 | break; | 298 | break; |
248 | 299 | ||
249 | case OP_31_XOP_MTSPR: | 300 | case OP_31_XOP_MTSPR: |
@@ -251,28 +302,30 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
251 | rs = get_rs(inst); | 302 | rs = get_rs(inst); |
252 | switch (sprn) { | 303 | switch (sprn) { |
253 | case SPRN_SRR0: | 304 | case SPRN_SRR0: |
254 | vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; | 305 | vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; |
255 | case SPRN_SRR1: | 306 | case SPRN_SRR1: |
256 | vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; | 307 | vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; |
257 | 308 | ||
258 | /* XXX We need to context-switch the timebase for | 309 | /* XXX We need to context-switch the timebase for |
259 | * watchdog and FIT. */ | 310 | * watchdog and FIT. */ |
260 | case SPRN_TBWL: break; | 311 | case SPRN_TBWL: break; |
261 | case SPRN_TBWU: break; | 312 | case SPRN_TBWU: break; |
262 | 313 | ||
314 | case SPRN_MSSSR0: break; | ||
315 | |||
263 | case SPRN_DEC: | 316 | case SPRN_DEC: |
264 | vcpu->arch.dec = vcpu->arch.gpr[rs]; | 317 | vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); |
265 | kvmppc_emulate_dec(vcpu); | 318 | kvmppc_emulate_dec(vcpu); |
266 | break; | 319 | break; |
267 | 320 | ||
268 | case SPRN_SPRG0: | 321 | case SPRN_SPRG0: |
269 | vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; | 322 | vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; |
270 | case SPRN_SPRG1: | 323 | case SPRN_SPRG1: |
271 | vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; | 324 | vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; |
272 | case SPRN_SPRG2: | 325 | case SPRN_SPRG2: |
273 | vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; | 326 | vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; |
274 | case SPRN_SPRG3: | 327 | case SPRN_SPRG3: |
275 | vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; | 328 | vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; |
276 | 329 | ||
277 | default: | 330 | default: |
278 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); | 331 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); |
@@ -304,7 +357,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
304 | rb = get_rb(inst); | 357 | rb = get_rb(inst); |
305 | 358 | ||
306 | emulated = kvmppc_handle_store(run, vcpu, | 359 | emulated = kvmppc_handle_store(run, vcpu, |
307 | vcpu->arch.gpr[rs], | 360 | kvmppc_get_gpr(vcpu, rs), |
308 | 4, 0); | 361 | 4, 0); |
309 | break; | 362 | break; |
310 | 363 | ||
@@ -319,7 +372,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
319 | rb = get_rb(inst); | 372 | rb = get_rb(inst); |
320 | 373 | ||
321 | emulated = kvmppc_handle_store(run, vcpu, | 374 | emulated = kvmppc_handle_store(run, vcpu, |
322 | vcpu->arch.gpr[rs], | 375 | kvmppc_get_gpr(vcpu, rs), |
323 | 2, 0); | 376 | 2, 0); |
324 | break; | 377 | break; |
325 | 378 | ||
@@ -338,7 +391,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
338 | ra = get_ra(inst); | 391 | ra = get_ra(inst); |
339 | rt = get_rt(inst); | 392 | rt = get_rt(inst); |
340 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); | 393 | emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); |
341 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 394 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
342 | break; | 395 | break; |
343 | 396 | ||
344 | case OP_LBZ: | 397 | case OP_LBZ: |
@@ -350,35 +403,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
350 | ra = get_ra(inst); | 403 | ra = get_ra(inst); |
351 | rt = get_rt(inst); | 404 | rt = get_rt(inst); |
352 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); | 405 | emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); |
353 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 406 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
354 | break; | 407 | break; |
355 | 408 | ||
356 | case OP_STW: | 409 | case OP_STW: |
357 | rs = get_rs(inst); | 410 | rs = get_rs(inst); |
358 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 411 | emulated = kvmppc_handle_store(run, vcpu, |
412 | kvmppc_get_gpr(vcpu, rs), | ||
359 | 4, 1); | 413 | 4, 1); |
360 | break; | 414 | break; |
361 | 415 | ||
362 | case OP_STWU: | 416 | case OP_STWU: |
363 | ra = get_ra(inst); | 417 | ra = get_ra(inst); |
364 | rs = get_rs(inst); | 418 | rs = get_rs(inst); |
365 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 419 | emulated = kvmppc_handle_store(run, vcpu, |
420 | kvmppc_get_gpr(vcpu, rs), | ||
366 | 4, 1); | 421 | 4, 1); |
367 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 422 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
368 | break; | 423 | break; |
369 | 424 | ||
370 | case OP_STB: | 425 | case OP_STB: |
371 | rs = get_rs(inst); | 426 | rs = get_rs(inst); |
372 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 427 | emulated = kvmppc_handle_store(run, vcpu, |
428 | kvmppc_get_gpr(vcpu, rs), | ||
373 | 1, 1); | 429 | 1, 1); |
374 | break; | 430 | break; |
375 | 431 | ||
376 | case OP_STBU: | 432 | case OP_STBU: |
377 | ra = get_ra(inst); | 433 | ra = get_ra(inst); |
378 | rs = get_rs(inst); | 434 | rs = get_rs(inst); |
379 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 435 | emulated = kvmppc_handle_store(run, vcpu, |
436 | kvmppc_get_gpr(vcpu, rs), | ||
380 | 1, 1); | 437 | 1, 1); |
381 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 438 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
382 | break; | 439 | break; |
383 | 440 | ||
384 | case OP_LHZ: | 441 | case OP_LHZ: |
@@ -390,21 +447,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
390 | ra = get_ra(inst); | 447 | ra = get_ra(inst); |
391 | rt = get_rt(inst); | 448 | rt = get_rt(inst); |
392 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); | 449 | emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); |
393 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 450 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
394 | break; | 451 | break; |
395 | 452 | ||
396 | case OP_STH: | 453 | case OP_STH: |
397 | rs = get_rs(inst); | 454 | rs = get_rs(inst); |
398 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 455 | emulated = kvmppc_handle_store(run, vcpu, |
456 | kvmppc_get_gpr(vcpu, rs), | ||
399 | 2, 1); | 457 | 2, 1); |
400 | break; | 458 | break; |
401 | 459 | ||
402 | case OP_STHU: | 460 | case OP_STHU: |
403 | ra = get_ra(inst); | 461 | ra = get_ra(inst); |
404 | rs = get_rs(inst); | 462 | rs = get_rs(inst); |
405 | emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], | 463 | emulated = kvmppc_handle_store(run, vcpu, |
464 | kvmppc_get_gpr(vcpu, rs), | ||
406 | 2, 1); | 465 | 2, 1); |
407 | vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; | 466 | kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); |
408 | break; | 467 | break; |
409 | 468 | ||
410 | default: | 469 | default: |
@@ -417,6 +476,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
417 | advance = 0; | 476 | advance = 0; |
418 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " | 477 | printk(KERN_ERR "Couldn't emulate instruction 0x%08x " |
419 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); | 478 | "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); |
479 | kvmppc_core_queue_program(vcpu, 0); | ||
420 | } | 480 | } |
421 | } | 481 | } |
422 | 482 | ||
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 2a4551f78f60..297fcd2ff7d0 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -23,7 +23,9 @@ | |||
23 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
26 | #include <linux/hrtimer.h> | ||
26 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/slab.h> | ||
27 | #include <asm/cputable.h> | 29 | #include <asm/cputable.h> |
28 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
29 | #include <asm/kvm_ppc.h> | 31 | #include <asm/kvm_ppc.h> |
@@ -78,8 +80,9 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
78 | return r; | 80 | return r; |
79 | } | 81 | } |
80 | 82 | ||
81 | void kvm_arch_hardware_enable(void *garbage) | 83 | int kvm_arch_hardware_enable(void *garbage) |
82 | { | 84 | { |
85 | return 0; | ||
83 | } | 86 | } |
84 | 87 | ||
85 | void kvm_arch_hardware_disable(void *garbage) | 88 | void kvm_arch_hardware_disable(void *garbage) |
@@ -135,6 +138,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
135 | { | 138 | { |
136 | kvmppc_free_vcpus(kvm); | 139 | kvmppc_free_vcpus(kvm); |
137 | kvm_free_physmem(kvm); | 140 | kvm_free_physmem(kvm); |
141 | cleanup_srcu_struct(&kvm->srcu); | ||
138 | kfree(kvm); | 142 | kfree(kvm); |
139 | } | 143 | } |
140 | 144 | ||
@@ -143,6 +147,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
143 | int r; | 147 | int r; |
144 | 148 | ||
145 | switch (ext) { | 149 | switch (ext) { |
150 | case KVM_CAP_PPC_SEGSTATE: | ||
151 | r = 1; | ||
152 | break; | ||
146 | case KVM_CAP_COALESCED_MMIO: | 153 | case KVM_CAP_COALESCED_MMIO: |
147 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 154 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
148 | break; | 155 | break; |
@@ -160,14 +167,24 @@ long kvm_arch_dev_ioctl(struct file *filp, | |||
160 | return -EINVAL; | 167 | return -EINVAL; |
161 | } | 168 | } |
162 | 169 | ||
163 | int kvm_arch_set_memory_region(struct kvm *kvm, | 170 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
164 | struct kvm_userspace_memory_region *mem, | 171 | struct kvm_memory_slot *memslot, |
165 | struct kvm_memory_slot old, | 172 | struct kvm_memory_slot old, |
166 | int user_alloc) | 173 | struct kvm_userspace_memory_region *mem, |
174 | int user_alloc) | ||
167 | { | 175 | { |
168 | return 0; | 176 | return 0; |
169 | } | 177 | } |
170 | 178 | ||
179 | void kvm_arch_commit_memory_region(struct kvm *kvm, | ||
180 | struct kvm_userspace_memory_region *mem, | ||
181 | struct kvm_memory_slot old, | ||
182 | int user_alloc) | ||
183 | { | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | |||
171 | void kvm_arch_flush_shadow(struct kvm *kvm) | 188 | void kvm_arch_flush_shadow(struct kvm *kvm) |
172 | { | 189 | { |
173 | } | 190 | } |
@@ -208,10 +225,25 @@ static void kvmppc_decrementer_func(unsigned long data) | |||
208 | } | 225 | } |
209 | } | 226 | } |
210 | 227 | ||
228 | /* | ||
229 | * low level hrtimer wake routine. Because this runs in hardirq context | ||
230 | * we schedule a tasklet to do the real work. | ||
231 | */ | ||
232 | enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) | ||
233 | { | ||
234 | struct kvm_vcpu *vcpu; | ||
235 | |||
236 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); | ||
237 | tasklet_schedule(&vcpu->arch.tasklet); | ||
238 | |||
239 | return HRTIMER_NORESTART; | ||
240 | } | ||
241 | |||
211 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | 242 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
212 | { | 243 | { |
213 | setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func, | 244 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
214 | (unsigned long)vcpu); | 245 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); |
246 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; | ||
215 | 247 | ||
216 | return 0; | 248 | return 0; |
217 | } | 249 | } |
@@ -240,34 +272,35 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
240 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | 272 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, |
241 | struct kvm_run *run) | 273 | struct kvm_run *run) |
242 | { | 274 | { |
243 | ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | 275 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); |
244 | *gpr = run->dcr.data; | ||
245 | } | 276 | } |
246 | 277 | ||
247 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | 278 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
248 | struct kvm_run *run) | 279 | struct kvm_run *run) |
249 | { | 280 | { |
250 | ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | 281 | ulong gpr; |
251 | 282 | ||
252 | if (run->mmio.len > sizeof(*gpr)) { | 283 | if (run->mmio.len > sizeof(gpr)) { |
253 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); | 284 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
254 | return; | 285 | return; |
255 | } | 286 | } |
256 | 287 | ||
257 | if (vcpu->arch.mmio_is_bigendian) { | 288 | if (vcpu->arch.mmio_is_bigendian) { |
258 | switch (run->mmio.len) { | 289 | switch (run->mmio.len) { |
259 | case 4: *gpr = *(u32 *)run->mmio.data; break; | 290 | case 4: gpr = *(u32 *)run->mmio.data; break; |
260 | case 2: *gpr = *(u16 *)run->mmio.data; break; | 291 | case 2: gpr = *(u16 *)run->mmio.data; break; |
261 | case 1: *gpr = *(u8 *)run->mmio.data; break; | 292 | case 1: gpr = *(u8 *)run->mmio.data; break; |
262 | } | 293 | } |
263 | } else { | 294 | } else { |
264 | /* Convert BE data from userland back to LE. */ | 295 | /* Convert BE data from userland back to LE. */ |
265 | switch (run->mmio.len) { | 296 | switch (run->mmio.len) { |
266 | case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; | 297 | case 4: gpr = ld_le32((u32 *)run->mmio.data); break; |
267 | case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; | 298 | case 2: gpr = ld_le16((u16 *)run->mmio.data); break; |
268 | case 1: *gpr = *(u8 *)run->mmio.data; break; | 299 | case 1: gpr = *(u8 *)run->mmio.data; break; |
269 | } | 300 | } |
270 | } | 301 | } |
302 | |||
303 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); | ||
271 | } | 304 | } |
272 | 305 | ||
273 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | 306 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
@@ -409,11 +442,6 @@ out: | |||
409 | return r; | 442 | return r; |
410 | } | 443 | } |
411 | 444 | ||
412 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | ||
413 | { | ||
414 | return -ENOTSUPP; | ||
415 | } | ||
416 | |||
417 | long kvm_arch_vm_ioctl(struct file *filp, | 445 | long kvm_arch_vm_ioctl(struct file *filp, |
418 | unsigned int ioctl, unsigned long arg) | 446 | unsigned int ioctl, unsigned long arg) |
419 | { | 447 | { |
@@ -421,7 +449,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
421 | 449 | ||
422 | switch (ioctl) { | 450 | switch (ioctl) { |
423 | default: | 451 | default: |
424 | r = -EINVAL; | 452 | r = -ENOTTY; |
425 | } | 453 | } |
426 | 454 | ||
427 | return r; | 455 | return r; |
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c index 2aa371e30079..70378551c0cc 100644 --- a/arch/powerpc/kvm/timing.c +++ b/arch/powerpc/kvm/timing.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | #include <linux/debugfs.h> | 24 | #include <linux/debugfs.h> |
25 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include <linux/module.h> | ||
26 | 27 | ||
27 | #include <asm/time.h> | 28 | #include <asm/time.h> |
28 | #include <asm-generic/div64.h> | 29 | #include <asm-generic/div64.h> |
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h index 806ef67868bd..8167d42a776f 100644 --- a/arch/powerpc/kvm/timing.h +++ b/arch/powerpc/kvm/timing.h | |||
@@ -51,7 +51,7 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type) | |||
51 | 51 | ||
52 | /* The BUILD_BUG_ON below breaks in funny ways, commented out | 52 | /* The BUILD_BUG_ON below breaks in funny ways, commented out |
53 | * for now ... -BenH | 53 | * for now ... -BenH |
54 | BUILD_BUG_ON(__builtin_constant_p(type)); | 54 | BUILD_BUG_ON(!__builtin_constant_p(type)); |
55 | */ | 55 | */ |
56 | switch (type) { | 56 | switch (type) { |
57 | case EXT_INTR_EXITS: | 57 | case EXT_INTR_EXITS: |
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index 67f219de0455..a8e840018052 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h | |||
@@ -12,8 +12,8 @@ | |||
12 | * Tracepoint for guest mode entry. | 12 | * Tracepoint for guest mode entry. |
13 | */ | 13 | */ |
14 | TRACE_EVENT(kvm_ppc_instr, | 14 | TRACE_EVENT(kvm_ppc_instr, |
15 | TP_PROTO(unsigned int inst, unsigned long pc, unsigned int emulate), | 15 | TP_PROTO(unsigned int inst, unsigned long _pc, unsigned int emulate), |
16 | TP_ARGS(inst, pc, emulate), | 16 | TP_ARGS(inst, _pc, emulate), |
17 | 17 | ||
18 | TP_STRUCT__entry( | 18 | TP_STRUCT__entry( |
19 | __field( unsigned int, inst ) | 19 | __field( unsigned int, inst ) |
@@ -23,7 +23,7 @@ TRACE_EVENT(kvm_ppc_instr, | |||
23 | 23 | ||
24 | TP_fast_assign( | 24 | TP_fast_assign( |
25 | __entry->inst = inst; | 25 | __entry->inst = inst; |
26 | __entry->pc = pc; | 26 | __entry->pc = _pc; |
27 | __entry->emulate = emulate; | 27 | __entry->emulate = emulate; |
28 | ), | 28 | ), |
29 | 29 | ||