diff options
author | Alexander Graf <agraf@suse.de> | 2010-07-29 08:48:04 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:50:56 -0400 |
commit | 819a63dc792b0888edd3eda306a9e1e049dcbb1c (patch) | |
tree | 70599dcd83eb9892785cd9e7ca5d92534bedcdd7 /arch/powerpc | |
parent | 92234722ed631f472f1c4d79d35d8e5cf6910002 (diff) |
KVM: PPC: PV mtmsrd L=1
The PowerPC ISA has a special instruction for mtmsr that only changes the EE
and RI bits, namely the L=1 form.
Since that one is reasonably often occuring and simple to implement, let's
go with this first. Writing EE=0 is always just a store. Doing EE=1 also
requires us to check for pending interrupts and if necessary exit back to the
hypervisor.
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kernel/kvm.c | 45 | ||||
-rw-r--r-- | arch/powerpc/kernel/kvm_emul.S | 56 |
2 files changed, 101 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 239a70d750a2..717ab0dded25 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #define KVM_INST_MTSPR_DSISR 0x7c1203a6 | 63 | #define KVM_INST_MTSPR_DSISR 0x7c1203a6 |
64 | 64 | ||
65 | #define KVM_INST_TLBSYNC 0x7c00046c | 65 | #define KVM_INST_TLBSYNC 0x7c00046c |
66 | #define KVM_INST_MTMSRD_L1 0x7c010164 | ||
66 | 67 | ||
67 | static bool kvm_patching_worked = true; | 68 | static bool kvm_patching_worked = true; |
68 | static char kvm_tmp[1024 * 1024]; | 69 | static char kvm_tmp[1024 * 1024]; |
@@ -138,6 +139,43 @@ static u32 *kvm_alloc(int len) | |||
138 | return p; | 139 | return p; |
139 | } | 140 | } |
140 | 141 | ||
142 | extern u32 kvm_emulate_mtmsrd_branch_offs; | ||
143 | extern u32 kvm_emulate_mtmsrd_reg_offs; | ||
144 | extern u32 kvm_emulate_mtmsrd_len; | ||
145 | extern u32 kvm_emulate_mtmsrd[]; | ||
146 | |||
147 | static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) | ||
148 | { | ||
149 | u32 *p; | ||
150 | int distance_start; | ||
151 | int distance_end; | ||
152 | ulong next_inst; | ||
153 | |||
154 | p = kvm_alloc(kvm_emulate_mtmsrd_len * 4); | ||
155 | if (!p) | ||
156 | return; | ||
157 | |||
158 | /* Find out where we are and put everything there */ | ||
159 | distance_start = (ulong)p - (ulong)inst; | ||
160 | next_inst = ((ulong)inst + 4); | ||
161 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs]; | ||
162 | |||
163 | /* Make sure we only write valid b instructions */ | ||
164 | if (distance_start > KVM_INST_B_MAX) { | ||
165 | kvm_patching_worked = false; | ||
166 | return; | ||
167 | } | ||
168 | |||
169 | /* Modify the chunk to fit the invocation */ | ||
170 | memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4); | ||
171 | p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
172 | p[kvm_emulate_mtmsrd_reg_offs] |= rt; | ||
173 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4); | ||
174 | |||
175 | /* Patch the invocation */ | ||
176 | kvm_patch_ins_b(inst, distance_start); | ||
177 | } | ||
178 | |||
141 | static void kvm_map_magic_page(void *data) | 179 | static void kvm_map_magic_page(void *data) |
142 | { | 180 | { |
143 | kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE, | 181 | kvm_hypercall2(KVM_HC_PPC_MAP_MAGIC_PAGE, |
@@ -211,6 +249,13 @@ static void kvm_check_ins(u32 *inst) | |||
211 | case KVM_INST_TLBSYNC: | 249 | case KVM_INST_TLBSYNC: |
212 | kvm_patch_ins_nop(inst); | 250 | kvm_patch_ins_nop(inst); |
213 | break; | 251 | break; |
252 | |||
253 | /* Rewrites */ | ||
254 | case KVM_INST_MTMSRD_L1: | ||
255 | /* We use r30 and r31 during the hook */ | ||
256 | if (get_rt(inst_rt) < 30) | ||
257 | kvm_patch_ins_mtmsrd(inst, inst_rt); | ||
258 | break; | ||
214 | } | 259 | } |
215 | 260 | ||
216 | switch (_inst) { | 261 | switch (_inst) { |
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S index 1dac72dd6f6e..10dc4a6632fd 100644 --- a/arch/powerpc/kernel/kvm_emul.S +++ b/arch/powerpc/kernel/kvm_emul.S | |||
@@ -64,3 +64,59 @@ kvm_hypercall_start: | |||
64 | /* Disable critical section. We are critical if \ | 64 | /* Disable critical section. We are critical if \ |
65 | shared->critical == r1 and r2 is always != r1 */ \ | 65 | shared->critical == r1 and r2 is always != r1 */ \ |
66 | STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); | 66 | STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); |
67 | |||
68 | .global kvm_emulate_mtmsrd | ||
69 | kvm_emulate_mtmsrd: | ||
70 | |||
71 | SCRATCH_SAVE | ||
72 | |||
73 | /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */ | ||
74 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
75 | lis r30, (~(MSR_EE | MSR_RI))@h | ||
76 | ori r30, r30, (~(MSR_EE | MSR_RI))@l | ||
77 | and r31, r31, r30 | ||
78 | |||
79 | /* OR the register's (MSR_EE|MSR_RI) on MSR */ | ||
80 | kvm_emulate_mtmsrd_reg: | ||
81 | andi. r30, r0, (MSR_EE|MSR_RI) | ||
82 | or r31, r31, r30 | ||
83 | |||
84 | /* Put MSR back into magic page */ | ||
85 | STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
86 | |||
87 | /* Check if we have to fetch an interrupt */ | ||
88 | lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | ||
89 | cmpwi r31, 0 | ||
90 | beq+ no_check | ||
91 | |||
92 | /* Check if we may trigger an interrupt */ | ||
93 | andi. r30, r30, MSR_EE | ||
94 | beq no_check | ||
95 | |||
96 | SCRATCH_RESTORE | ||
97 | |||
98 | /* Nag hypervisor */ | ||
99 | tlbsync | ||
100 | |||
101 | b kvm_emulate_mtmsrd_branch | ||
102 | |||
103 | no_check: | ||
104 | |||
105 | SCRATCH_RESTORE | ||
106 | |||
107 | /* Go back to caller */ | ||
108 | kvm_emulate_mtmsrd_branch: | ||
109 | b . | ||
110 | kvm_emulate_mtmsrd_end: | ||
111 | |||
112 | .global kvm_emulate_mtmsrd_branch_offs | ||
113 | kvm_emulate_mtmsrd_branch_offs: | ||
114 | .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4 | ||
115 | |||
116 | .global kvm_emulate_mtmsrd_reg_offs | ||
117 | kvm_emulate_mtmsrd_reg_offs: | ||
118 | .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4 | ||
119 | |||
120 | .global kvm_emulate_mtmsrd_len | ||
121 | kvm_emulate_mtmsrd_len: | ||
122 | .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 | ||