diff options
author | Gleb Natapov <gleb@redhat.com> | 2010-10-14 05:22:54 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:23:19 -0500 |
commit | 6c047cd982f944fa63b2d96de2a06463d113f9fa (patch) | |
tree | 33ea24341320d9adda2d2d3c0314a7244bac7862 /arch/x86/kernel/kvm.c | |
parent | 7c90705bf2a373aa238661bdb6446f27299ef489 (diff) |
KVM paravirt: Handle async PF in non preemptable context
If async page fault is received by idle task or when preemp_count is
not zero guest cannot reschedule, so do sti; hlt and wait for page to be
ready. vcpu can still process interrupts while it waits for the page to
be ready.
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r-- | arch/x86/kernel/kvm.c | 40 |
1 files changed, 34 insertions, 6 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index d5640634fef6..47ea93e6b0d8 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/cpu.h> | 37 | #include <asm/cpu.h> |
38 | #include <asm/traps.h> | 38 | #include <asm/traps.h> |
39 | #include <asm/desc.h> | 39 | #include <asm/desc.h> |
40 | #include <asm/tlbflush.h> | ||
40 | 41 | ||
41 | #define MMU_QUEUE_SIZE 1024 | 42 | #define MMU_QUEUE_SIZE 1024 |
42 | 43 | ||
@@ -78,6 +79,8 @@ struct kvm_task_sleep_node { | |||
78 | wait_queue_head_t wq; | 79 | wait_queue_head_t wq; |
79 | u32 token; | 80 | u32 token; |
80 | int cpu; | 81 | int cpu; |
82 | bool halted; | ||
83 | struct mm_struct *mm; | ||
81 | }; | 84 | }; |
82 | 85 | ||
83 | static struct kvm_task_sleep_head { | 86 | static struct kvm_task_sleep_head { |
@@ -106,6 +109,11 @@ void kvm_async_pf_task_wait(u32 token) | |||
106 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; | 109 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; |
107 | struct kvm_task_sleep_node n, *e; | 110 | struct kvm_task_sleep_node n, *e; |
108 | DEFINE_WAIT(wait); | 111 | DEFINE_WAIT(wait); |
112 | int cpu, idle; | ||
113 | |||
114 | cpu = get_cpu(); | ||
115 | idle = idle_cpu(cpu); | ||
116 | put_cpu(); | ||
109 | 117 | ||
110 | spin_lock(&b->lock); | 118 | spin_lock(&b->lock); |
111 | e = _find_apf_task(b, token); | 119 | e = _find_apf_task(b, token); |
@@ -119,19 +127,33 @@ void kvm_async_pf_task_wait(u32 token) | |||
119 | 127 | ||
120 | n.token = token; | 128 | n.token = token; |
121 | n.cpu = smp_processor_id(); | 129 | n.cpu = smp_processor_id(); |
130 | n.mm = current->active_mm; | ||
131 | n.halted = idle || preempt_count() > 1; | ||
132 | atomic_inc(&n.mm->mm_count); | ||
122 | init_waitqueue_head(&n.wq); | 133 | init_waitqueue_head(&n.wq); |
123 | hlist_add_head(&n.link, &b->list); | 134 | hlist_add_head(&n.link, &b->list); |
124 | spin_unlock(&b->lock); | 135 | spin_unlock(&b->lock); |
125 | 136 | ||
126 | for (;;) { | 137 | for (;;) { |
127 | prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); | 138 | if (!n.halted) |
139 | prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); | ||
128 | if (hlist_unhashed(&n.link)) | 140 | if (hlist_unhashed(&n.link)) |
129 | break; | 141 | break; |
130 | local_irq_enable(); | 142 | |
131 | schedule(); | 143 | if (!n.halted) { |
132 | local_irq_disable(); | 144 | local_irq_enable(); |
145 | schedule(); | ||
146 | local_irq_disable(); | ||
147 | } else { | ||
148 | /* | ||
149 | * We cannot reschedule. So halt. | ||
150 | */ | ||
151 | native_safe_halt(); | ||
152 | local_irq_disable(); | ||
153 | } | ||
133 | } | 154 | } |
134 | finish_wait(&n.wq, &wait); | 155 | if (!n.halted) |
156 | finish_wait(&n.wq, &wait); | ||
135 | 157 | ||
136 | return; | 158 | return; |
137 | } | 159 | } |
@@ -140,7 +162,12 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); | |||
140 | static void apf_task_wake_one(struct kvm_task_sleep_node *n) | 162 | static void apf_task_wake_one(struct kvm_task_sleep_node *n) |
141 | { | 163 | { |
142 | hlist_del_init(&n->link); | 164 | hlist_del_init(&n->link); |
143 | if (waitqueue_active(&n->wq)) | 165 | if (!n->mm) |
166 | return; | ||
167 | mmdrop(n->mm); | ||
168 | if (n->halted) | ||
169 | smp_send_reschedule(n->cpu); | ||
170 | else if (waitqueue_active(&n->wq)) | ||
144 | wake_up(&n->wq); | 171 | wake_up(&n->wq); |
145 | } | 172 | } |
146 | 173 | ||
@@ -193,6 +220,7 @@ again: | |||
193 | } | 220 | } |
194 | n->token = token; | 221 | n->token = token; |
195 | n->cpu = smp_processor_id(); | 222 | n->cpu = smp_processor_id(); |
223 | n->mm = NULL; | ||
196 | init_waitqueue_head(&n->wq); | 224 | init_waitqueue_head(&n->wq); |
197 | hlist_add_head(&n->link, &b->list); | 225 | hlist_add_head(&n->link, &b->list); |
198 | } else | 226 | } else |