aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/kvm_main.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-07-11 11:17:21 -0400
committerAvi Kivity <avi@qumranet.com>2007-10-13 04:18:20 -0400
commit15ad71460d75fd7ca41bb248a2310f3f39b302ba (patch)
tree1ea549e5c5629561c121a54def146fb6b706c2d4 /drivers/kvm/kvm_main.c
parent519ef35341b4f360f072ea74e398b70a5a2fc270 (diff)
KVM: Use the scheduler preemption notifiers to make kvm preemptible
Current kvm disables preemption while the new virtualization registers are in use. This of course is not very good for latency sensitive workloads (one use of virtualization is to offload user interface and other latency insensitive stuff to a container, so that it is easier to analyze the remaining workload). This patch re-enables preemption for kvm; preemption is now only disabled when switching the registers in and out, and during the switch to guest mode and back. Contains fixes from Shaohua Li <shaohua.li@intel.com>. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r--drivers/kvm/kvm_main.c43
1 files changed, 37 insertions, 6 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 20947462f401..6035e6d35417 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -54,6 +54,8 @@ static cpumask_t cpus_hardware_enabled;
54 54
55struct kvm_arch_ops *kvm_arch_ops; 55struct kvm_arch_ops *kvm_arch_ops;
56 56
57static __read_mostly struct preempt_ops kvm_preempt_ops;
58
57#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x) 59#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
58 60
59static struct kvm_stats_debugfs_item { 61static struct kvm_stats_debugfs_item {
@@ -239,13 +241,21 @@ EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
239 */ 241 */
240static void vcpu_load(struct kvm_vcpu *vcpu) 242static void vcpu_load(struct kvm_vcpu *vcpu)
241{ 243{
244 int cpu;
245
242 mutex_lock(&vcpu->mutex); 246 mutex_lock(&vcpu->mutex);
243 kvm_arch_ops->vcpu_load(vcpu); 247 cpu = get_cpu();
248 preempt_notifier_register(&vcpu->preempt_notifier);
249 kvm_arch_ops->vcpu_load(vcpu, cpu);
250 put_cpu();
244} 251}
245 252
246static void vcpu_put(struct kvm_vcpu *vcpu) 253static void vcpu_put(struct kvm_vcpu *vcpu)
247{ 254{
255 preempt_disable();
248 kvm_arch_ops->vcpu_put(vcpu); 256 kvm_arch_ops->vcpu_put(vcpu);
257 preempt_notifier_unregister(&vcpu->preempt_notifier);
258 preempt_enable();
249 mutex_unlock(&vcpu->mutex); 259 mutex_unlock(&vcpu->mutex);
250} 260}
251 261
@@ -1672,9 +1682,7 @@ void kvm_resched(struct kvm_vcpu *vcpu)
1672{ 1682{
1673 if (!need_resched()) 1683 if (!need_resched())
1674 return; 1684 return;
1675 vcpu_put(vcpu);
1676 cond_resched(); 1685 cond_resched();
1677 vcpu_load(vcpu);
1678} 1686}
1679EXPORT_SYMBOL_GPL(kvm_resched); 1687EXPORT_SYMBOL_GPL(kvm_resched);
1680 1688
@@ -1722,11 +1730,9 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
1722 unsigned bytes; 1730 unsigned bytes;
1723 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1; 1731 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1724 1732
1725 kvm_arch_ops->vcpu_put(vcpu);
1726 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE, 1733 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1727 PAGE_KERNEL); 1734 PAGE_KERNEL);
1728 if (!q) { 1735 if (!q) {
1729 kvm_arch_ops->vcpu_load(vcpu);
1730 free_pio_guest_pages(vcpu); 1736 free_pio_guest_pages(vcpu);
1731 return -ENOMEM; 1737 return -ENOMEM;
1732 } 1738 }
@@ -1738,7 +1744,6 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
1738 memcpy(p, q, bytes); 1744 memcpy(p, q, bytes);
1739 q -= vcpu->pio.guest_page_offset; 1745 q -= vcpu->pio.guest_page_offset;
1740 vunmap(q); 1746 vunmap(q);
1741 kvm_arch_ops->vcpu_load(vcpu);
1742 free_pio_guest_pages(vcpu); 1747 free_pio_guest_pages(vcpu);
1743 return 0; 1748 return 0;
1744} 1749}
@@ -2413,6 +2418,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2413 if (IS_ERR(vcpu)) 2418 if (IS_ERR(vcpu))
2414 return PTR_ERR(vcpu); 2419 return PTR_ERR(vcpu);
2415 2420
2421 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2422
2416 vcpu_load(vcpu); 2423 vcpu_load(vcpu);
2417 r = kvm_mmu_setup(vcpu); 2424 r = kvm_mmu_setup(vcpu);
2418 vcpu_put(vcpu); 2425 vcpu_put(vcpu);
@@ -3145,6 +3152,27 @@ static struct sys_device kvm_sysdev = {
3145 3152
3146hpa_t bad_page_address; 3153hpa_t bad_page_address;
3147 3154
3155static inline
3156struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
3157{
3158 return container_of(pn, struct kvm_vcpu, preempt_notifier);
3159}
3160
3161static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
3162{
3163 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3164
3165 kvm_arch_ops->vcpu_load(vcpu, cpu);
3166}
3167
3168static void kvm_sched_out(struct preempt_notifier *pn,
3169 struct task_struct *next)
3170{
3171 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3172
3173 kvm_arch_ops->vcpu_put(vcpu);
3174}
3175
3148int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module) 3176int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3149{ 3177{
3150 int r; 3178 int r;
@@ -3191,6 +3219,9 @@ int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module)
3191 goto out_free; 3219 goto out_free;
3192 } 3220 }
3193 3221
3222 kvm_preempt_ops.sched_in = kvm_sched_in;
3223 kvm_preempt_ops.sched_out = kvm_sched_out;
3224
3194 return r; 3225 return r;
3195 3226
3196out_free: 3227out_free: