aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSheng Yang <sheng@linux.intel.com>2009-12-18 03:48:44 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:35:40 -0500
commit2bf78fa7b9b0d2917fd6587eadb3c0f6bbaf1718 (patch)
tree614464f8cc2e6e8bd8610e00601caec35e0c252d
parent8a7e3f01e692cd202fb7c042cf2be9ff8c599a1e (diff)
KVM: Extended shared_msr_global to per CPU
shared_msr_global saved host value of relevant MSRs, but it have an assumption that all MSRs it tracked shared the value across the different CPUs. It's not true with some MSRs, e.g. MSR_TSC_AUX. Extend it to per CPU to provide the support of MSR_TSC_AUX, and more alike MSRs. Notice now the shared_msr_global still have one assumption: it can only deal with the MSRs that won't change in host after KVM module loaded. Signed-off-by: Sheng Yang <sheng@linux.intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/x86.c55
1 files changed, 33 insertions, 22 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 84dd33e717fd..4e7bbc49b7e4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -93,16 +93,16 @@ module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
93 93
94struct kvm_shared_msrs_global { 94struct kvm_shared_msrs_global {
95 int nr; 95 int nr;
96 struct kvm_shared_msr { 96 u32 msrs[KVM_NR_SHARED_MSRS];
97 u32 msr;
98 u64 value;
99 } msrs[KVM_NR_SHARED_MSRS];
100}; 97};
101 98
102struct kvm_shared_msrs { 99struct kvm_shared_msrs {
103 struct user_return_notifier urn; 100 struct user_return_notifier urn;
104 bool registered; 101 bool registered;
105 u64 current_value[KVM_NR_SHARED_MSRS]; 102 struct kvm_shared_msr_values {
103 u64 host;
104 u64 curr;
105 } values[KVM_NR_SHARED_MSRS];
106}; 106};
107 107
108static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; 108static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
@@ -147,53 +147,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
147static void kvm_on_user_return(struct user_return_notifier *urn) 147static void kvm_on_user_return(struct user_return_notifier *urn)
148{ 148{
149 unsigned slot; 149 unsigned slot;
150 struct kvm_shared_msr *global;
151 struct kvm_shared_msrs *locals 150 struct kvm_shared_msrs *locals
152 = container_of(urn, struct kvm_shared_msrs, urn); 151 = container_of(urn, struct kvm_shared_msrs, urn);
152 struct kvm_shared_msr_values *values;
153 153
154 for (slot = 0; slot < shared_msrs_global.nr; ++slot) { 154 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
155 global = &shared_msrs_global.msrs[slot]; 155 values = &locals->values[slot];
156 if (global->value != locals->current_value[slot]) { 156 if (values->host != values->curr) {
157 wrmsrl(global->msr, global->value); 157 wrmsrl(shared_msrs_global.msrs[slot], values->host);
158 locals->current_value[slot] = global->value; 158 values->curr = values->host;
159 } 159 }
160 } 160 }
161 locals->registered = false; 161 locals->registered = false;
162 user_return_notifier_unregister(urn); 162 user_return_notifier_unregister(urn);
163} 163}
164 164
165void kvm_define_shared_msr(unsigned slot, u32 msr) 165static void shared_msr_update(unsigned slot, u32 msr)
166{ 166{
167 int cpu; 167 struct kvm_shared_msrs *smsr;
168 u64 value; 168 u64 value;
169 169
170 smsr = &__get_cpu_var(shared_msrs);
171 /* only read, and nobody should modify it at this time,
172 * so don't need lock */
173 if (slot >= shared_msrs_global.nr) {
174 printk(KERN_ERR "kvm: invalid MSR slot!");
175 return;
176 }
177 rdmsrl_safe(msr, &value);
178 smsr->values[slot].host = value;
179 smsr->values[slot].curr = value;
180}
181
182void kvm_define_shared_msr(unsigned slot, u32 msr)
183{
170 if (slot >= shared_msrs_global.nr) 184 if (slot >= shared_msrs_global.nr)
171 shared_msrs_global.nr = slot + 1; 185 shared_msrs_global.nr = slot + 1;
172 shared_msrs_global.msrs[slot].msr = msr; 186 shared_msrs_global.msrs[slot] = msr;
173 rdmsrl_safe(msr, &value); 187 /* we need ensured the shared_msr_global have been updated */
174 shared_msrs_global.msrs[slot].value = value; 188 smp_wmb();
175 for_each_online_cpu(cpu)
176 per_cpu(shared_msrs, cpu).current_value[slot] = value;
177} 189}
178EXPORT_SYMBOL_GPL(kvm_define_shared_msr); 190EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
179 191
180static void kvm_shared_msr_cpu_online(void) 192static void kvm_shared_msr_cpu_online(void)
181{ 193{
182 unsigned i; 194 unsigned i;
183 struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs);
184 195
185 for (i = 0; i < shared_msrs_global.nr; ++i) 196 for (i = 0; i < shared_msrs_global.nr; ++i)
186 locals->current_value[i] = shared_msrs_global.msrs[i].value; 197 shared_msr_update(i, shared_msrs_global.msrs[i]);
187} 198}
188 199
189void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) 200void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
190{ 201{
191 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); 202 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
192 203
193 if (((value ^ smsr->current_value[slot]) & mask) == 0) 204 if (((value ^ smsr->values[slot].curr) & mask) == 0)
194 return; 205 return;
195 smsr->current_value[slot] = value; 206 smsr->values[slot].curr = value;
196 wrmsrl(shared_msrs_global.msrs[slot].msr, value); 207 wrmsrl(shared_msrs_global.msrs[slot], value);
197 if (!smsr->registered) { 208 if (!smsr->registered) {
198 smsr->urn.on_user_return = kvm_on_user_return; 209 smsr->urn.on_user_return = kvm_on_user_return;
199 user_return_notifier_register(&smsr->urn); 210 user_return_notifier_register(&smsr->urn);