aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/kvm_emul.S
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-08-05 05:26:04 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:13 -0400
commit512ba59ed9c580b5e5575beda0041bb19a641127 (patch)
treea262fbb9260d4af7e39f184ef2b90a8c8e02e8c2 /arch/powerpc/kernel/kvm_emul.S
parentcbe487fac7fc016dbabbcbe83f11384e1803a56d (diff)
KVM: PPC: Make PV mtmsr work with r30 and r31
So far we've been restricting ourselves to r0-r29 as registers an mtmsr instruction could use. This was bad, as there are some code paths in Linux actually using r30. So let's instead handle all registers gracefully and get rid of that stupid limitation Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kernel/kvm_emul.S')
-rw-r--r--arch/powerpc/kernel/kvm_emul.S17
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S
index a6e97e7a55e0..65305325250b 100644
--- a/arch/powerpc/kernel/kvm_emul.S
+++ b/arch/powerpc/kernel/kvm_emul.S
@@ -135,7 +135,8 @@ kvm_emulate_mtmsr:
135 135
136 /* Find the changed bits between old and new MSR */ 136 /* Find the changed bits between old and new MSR */
137kvm_emulate_mtmsr_reg1: 137kvm_emulate_mtmsr_reg1:
138 xor r31, r0, r31 138 ori r30, r0, 0
139 xor r31, r30, r31
139 140
140 /* Check if we need to really do mtmsr */ 141 /* Check if we need to really do mtmsr */
141 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS) 142 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS)
@@ -156,14 +157,17 @@ kvm_emulate_mtmsr_orig_ins:
156 157
157maybe_stay_in_guest: 158maybe_stay_in_guest:
158 159
160 /* Get the target register in r30 */
161kvm_emulate_mtmsr_reg2:
162 ori r30, r0, 0
163
159 /* Check if we have to fetch an interrupt */ 164 /* Check if we have to fetch an interrupt */
160 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) 165 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
161 cmpwi r31, 0 166 cmpwi r31, 0
162 beq+ no_mtmsr 167 beq+ no_mtmsr
163 168
164 /* Check if we may trigger an interrupt */ 169 /* Check if we may trigger an interrupt */
165kvm_emulate_mtmsr_reg2: 170 andi. r31, r30, MSR_EE
166 andi. r31, r0, MSR_EE
167 beq no_mtmsr 171 beq no_mtmsr
168 172
169 b do_mtmsr 173 b do_mtmsr
@@ -171,8 +175,7 @@ kvm_emulate_mtmsr_reg2:
171no_mtmsr: 175no_mtmsr:
172 176
173 /* Put MSR into magic page because we don't call mtmsr */ 177 /* Put MSR into magic page because we don't call mtmsr */
174kvm_emulate_mtmsr_reg3: 178 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
175 STL64(r0, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
176 179
177 SCRATCH_RESTORE 180 SCRATCH_RESTORE
178 181
@@ -193,10 +196,6 @@ kvm_emulate_mtmsr_reg1_offs:
193kvm_emulate_mtmsr_reg2_offs: 196kvm_emulate_mtmsr_reg2_offs:
194 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4 197 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4
195 198
196.global kvm_emulate_mtmsr_reg3_offs
197kvm_emulate_mtmsr_reg3_offs:
198 .long (kvm_emulate_mtmsr_reg3 - kvm_emulate_mtmsr) / 4
199
200.global kvm_emulate_mtmsr_orig_ins_offs 199.global kvm_emulate_mtmsr_orig_ins_offs
201kvm_emulate_mtmsr_orig_ins_offs: 200kvm_emulate_mtmsr_orig_ins_offs:
202 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4 201 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4