diff options
author | Alexander Graf <agraf@suse.de> | 2010-07-29 08:48:04 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:50:56 -0400 |
commit | 819a63dc792b0888edd3eda306a9e1e049dcbb1c (patch) | |
tree | 70599dcd83eb9892785cd9e7ca5d92534bedcdd7 /arch/powerpc/kernel/kvm_emul.S | |
parent | 92234722ed631f472f1c4d79d35d8e5cf6910002 (diff) |
KVM: PPC: PV mtmsrd L=1
The PowerPC ISA has a special instruction for mtmsr that only changes the EE
and RI bits, namely the L=1 form.
Since that one is reasonably often occuring and simple to implement, let's
go with this first. Writing EE=0 is always just a store. Doing EE=1 also
requires us to check for pending interrupts and if necessary exit back to the
hypervisor.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kernel/kvm_emul.S')
-rw-r--r-- | arch/powerpc/kernel/kvm_emul.S | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S index 1dac72dd6f6e..10dc4a6632fd 100644 --- a/arch/powerpc/kernel/kvm_emul.S +++ b/arch/powerpc/kernel/kvm_emul.S | |||
@@ -64,3 +64,59 @@ kvm_hypercall_start: | |||
64 | /* Disable critical section. We are critical if \ | 64 | /* Disable critical section. We are critical if \ |
65 | shared->critical == r1 and r2 is always != r1 */ \ | 65 | shared->critical == r1 and r2 is always != r1 */ \ |
66 | STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); | 66 | STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); |
67 | |||
68 | .global kvm_emulate_mtmsrd | ||
69 | kvm_emulate_mtmsrd: | ||
70 | |||
71 | SCRATCH_SAVE | ||
72 | |||
73 | /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */ | ||
74 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
75 | lis r30, (~(MSR_EE | MSR_RI))@h | ||
76 | ori r30, r30, (~(MSR_EE | MSR_RI))@l | ||
77 | and r31, r31, r30 | ||
78 | |||
79 | /* OR the register's (MSR_EE|MSR_RI) on MSR */ | ||
80 | kvm_emulate_mtmsrd_reg: | ||
81 | andi. r30, r0, (MSR_EE|MSR_RI) | ||
82 | or r31, r31, r30 | ||
83 | |||
84 | /* Put MSR back into magic page */ | ||
85 | STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
86 | |||
87 | /* Check if we have to fetch an interrupt */ | ||
88 | lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | ||
89 | cmpwi r31, 0 | ||
90 | beq+ no_check | ||
91 | |||
92 | /* Check if we may trigger an interrupt */ | ||
93 | andi. r30, r30, MSR_EE | ||
94 | beq no_check | ||
95 | |||
96 | SCRATCH_RESTORE | ||
97 | |||
98 | /* Nag hypervisor */ | ||
99 | tlbsync | ||
100 | |||
101 | b kvm_emulate_mtmsrd_branch | ||
102 | |||
103 | no_check: | ||
104 | |||
105 | SCRATCH_RESTORE | ||
106 | |||
107 | /* Go back to caller */ | ||
108 | kvm_emulate_mtmsrd_branch: | ||
109 | b . | ||
110 | kvm_emulate_mtmsrd_end: | ||
111 | |||
112 | .global kvm_emulate_mtmsrd_branch_offs | ||
113 | kvm_emulate_mtmsrd_branch_offs: | ||
114 | .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4 | ||
115 | |||
116 | .global kvm_emulate_mtmsrd_reg_offs | ||
117 | kvm_emulate_mtmsrd_reg_offs: | ||
118 | .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4 | ||
119 | |||
120 | .global kvm_emulate_mtmsrd_len | ||
121 | kvm_emulate_mtmsrd_len: | ||
122 | .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 | ||