aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2013-03-04 18:10:32 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2013-03-04 18:10:32 -0500
commitee2c25efdd46d7ed5605d6fe877bdf4b47a4ab2e (patch)
tree35890281e93e667a8e262d76ef250025eb30a8c1 /arch/x86/kvm/x86.c
parent3ab66e8a455a4877889c65a848f2fb32be502f2c (diff)
parent6dbe51c251a327e012439c4772097a13df43c5b8 (diff)
Merge branch 'master' into queue
* master: (15791 commits) Linux 3.9-rc1 btrfs/raid56: Add missing #include <linux/vmalloc.h> fix compat_sys_rt_sigprocmask() SUNRPC: One line comment fix ext4: enable quotas before orphan cleanup ext4: don't allow quota mount options when quota feature enabled ext4: fix a warning from sparse check for ext4_dir_llseek ext4: convert number of blocks to clusters properly ext4: fix possible memory leak in ext4_remount() jbd2: fix ERR_PTR dereference in jbd2__journal_start metag: Provide dma_get_sgtable() metag: prom.h: remove declaration of metag_dt_memblock_reserve() metag: copy devicetree to non-init memory metag: cleanup metag_ksyms.c includes metag: move mm/init.c exports out of metag_ksyms.c metag: move usercopy.c exports out of metag_ksyms.c metag: move setup.c exports out of metag_ksyms.c metag: move kick.c exports out of metag_ksyms.c metag: move traps.c exports out of metag_ksyms.c metag: move irq enable out of irqflags.h on SMP ... Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Conflicts: arch/x86/kernel/kvmclock.c
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c40
1 files changed, 27 insertions, 13 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d0cf7371a558..811c5c9c8880 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -120,7 +120,7 @@ struct kvm_shared_msrs {
120}; 120};
121 121
122static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; 122static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
123static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); 123static struct kvm_shared_msrs __percpu *shared_msrs;
124 124
125struct kvm_stats_debugfs_item debugfs_entries[] = { 125struct kvm_stats_debugfs_item debugfs_entries[] = {
126 { "pf_fixed", VCPU_STAT(pf_fixed) }, 126 { "pf_fixed", VCPU_STAT(pf_fixed) },
@@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
191 191
192static void shared_msr_update(unsigned slot, u32 msr) 192static void shared_msr_update(unsigned slot, u32 msr)
193{ 193{
194 struct kvm_shared_msrs *smsr;
195 u64 value; 194 u64 value;
195 unsigned int cpu = smp_processor_id();
196 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
196 197
197 smsr = &__get_cpu_var(shared_msrs);
198 /* only read, and nobody should modify it at this time, 198 /* only read, and nobody should modify it at this time,
199 * so don't need lock */ 199 * so don't need lock */
200 if (slot >= shared_msrs_global.nr) { 200 if (slot >= shared_msrs_global.nr) {
@@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void)
226 226
227void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) 227void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
228{ 228{
229 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); 229 unsigned int cpu = smp_processor_id();
230 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
230 231
231 if (((value ^ smsr->values[slot].curr) & mask) == 0) 232 if (((value ^ smsr->values[slot].curr) & mask) == 0)
232 return; 233 return;
@@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
242 243
243static void drop_user_return_notifiers(void *ignore) 244static void drop_user_return_notifiers(void *ignore)
244{ 245{
245 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); 246 unsigned int cpu = smp_processor_id();
247 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
246 248
247 if (smsr->registered) 249 if (smsr->registered)
248 kvm_on_user_return(&smsr->urn); 250 kvm_on_user_return(&smsr->urn);
@@ -1877,6 +1879,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1877 u64 data = msr_info->data; 1879 u64 data = msr_info->data;
1878 1880
1879 switch (msr) { 1881 switch (msr) {
1882 case MSR_AMD64_NB_CFG:
1883 case MSR_IA32_UCODE_REV:
1884 case MSR_IA32_UCODE_WRITE:
1885 case MSR_VM_HSAVE_PA:
1886 case MSR_AMD64_PATCH_LOADER:
1887 case MSR_AMD64_BU_CFG2:
1888 break;
1889
1880 case MSR_EFER: 1890 case MSR_EFER:
1881 return set_efer(vcpu, data); 1891 return set_efer(vcpu, data);
1882 case MSR_K7_HWCR: 1892 case MSR_K7_HWCR:
@@ -1896,8 +1906,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1896 return 1; 1906 return 1;
1897 } 1907 }
1898 break; 1908 break;
1899 case MSR_AMD64_NB_CFG:
1900 break;
1901 case MSR_IA32_DEBUGCTLMSR: 1909 case MSR_IA32_DEBUGCTLMSR:
1902 if (!data) { 1910 if (!data) {
1903 /* We support the non-activated case already */ 1911 /* We support the non-activated case already */
@@ -1910,11 +1918,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1910 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", 1918 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1911 __func__, data); 1919 __func__, data);
1912 break; 1920 break;
1913 case MSR_IA32_UCODE_REV:
1914 case MSR_IA32_UCODE_WRITE:
1915 case MSR_VM_HSAVE_PA:
1916 case MSR_AMD64_PATCH_LOADER:
1917 break;
1918 case 0x200 ... 0x2ff: 1921 case 0x200 ... 0x2ff:
1919 return set_msr_mtrr(vcpu, msr, data); 1922 return set_msr_mtrr(vcpu, msr, data);
1920 case MSR_IA32_APICBASE: 1923 case MSR_IA32_APICBASE:
@@ -2249,6 +2252,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2249 case MSR_K8_INT_PENDING_MSG: 2252 case MSR_K8_INT_PENDING_MSG:
2250 case MSR_AMD64_NB_CFG: 2253 case MSR_AMD64_NB_CFG:
2251 case MSR_FAM10H_MMIO_CONF_BASE: 2254 case MSR_FAM10H_MMIO_CONF_BASE:
2255 case MSR_AMD64_BU_CFG2:
2252 data = 0; 2256 data = 0;
2253 break; 2257 break;
2254 case MSR_P6_PERFCTR0: 2258 case MSR_P6_PERFCTR0:
@@ -5269,9 +5273,16 @@ int kvm_arch_init(void *opaque)
5269 goto out; 5273 goto out;
5270 } 5274 }
5271 5275
5276 r = -ENOMEM;
5277 shared_msrs = alloc_percpu(struct kvm_shared_msrs);
5278 if (!shared_msrs) {
5279 printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
5280 goto out;
5281 }
5282
5272 r = kvm_mmu_module_init(); 5283 r = kvm_mmu_module_init();
5273 if (r) 5284 if (r)
5274 goto out; 5285 goto out_free_percpu;
5275 5286
5276 kvm_set_mmio_spte_mask(); 5287 kvm_set_mmio_spte_mask();
5277 kvm_init_msr_list(); 5288 kvm_init_msr_list();
@@ -5294,6 +5305,8 @@ int kvm_arch_init(void *opaque)
5294 5305
5295 return 0; 5306 return 0;
5296 5307
5308out_free_percpu:
5309 free_percpu(shared_msrs);
5297out: 5310out:
5298 return r; 5311 return r;
5299} 5312}
@@ -5311,6 +5324,7 @@ void kvm_arch_exit(void)
5311#endif 5324#endif
5312 kvm_x86_ops = NULL; 5325 kvm_x86_ops = NULL;
5313 kvm_mmu_module_exit(); 5326 kvm_mmu_module_exit();
5327 free_percpu(shared_msrs);
5314} 5328}
5315 5329
5316int kvm_emulate_halt(struct kvm_vcpu *vcpu) 5330int kvm_emulate_halt(struct kvm_vcpu *vcpu)