diff options
author | Avi Kivity <avi@qumranet.com> | 2007-09-10 04:28:17 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-10-13 04:18:27 -0400 |
commit | 2e3e5882dca3ab409aa8c9c96f47610b576719f8 (patch) | |
tree | 53184e9026b67710bd74615b818c53a6b62c3ab4 /drivers/kvm/mmu.c | |
parent | cbdd1bea2a2dce4c0b45c5f0122c150d9f07f0bc (diff) |
KVM: MMU: Don't do GFP_NOWAIT allocations
Before preempt notifiers, kvm needed to allocate memory with GFP_NOWAIT so
as not to have to enable preemption and take a heavyweight exit. On oom, we'd
fall back to a GFP_KERNEL allocation.
With preemption notifiers, we can do a GFP_KERNEL allocation, and perform
the heavyweight exit only if the kernel decides to put us to sleep.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r-- | drivers/kvm/mmu.c | 34 |
1 files changed, 10 insertions, 24 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 7b42c88b0b57..6d84d30f5ed0 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -202,15 +202,14 @@ static void set_shadow_pte(u64 *sptep, u64 spte) | |||
202 | } | 202 | } |
203 | 203 | ||
204 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 204 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
205 | struct kmem_cache *base_cache, int min, | 205 | struct kmem_cache *base_cache, int min) |
206 | gfp_t gfp_flags) | ||
207 | { | 206 | { |
208 | void *obj; | 207 | void *obj; |
209 | 208 | ||
210 | if (cache->nobjs >= min) | 209 | if (cache->nobjs >= min) |
211 | return 0; | 210 | return 0; |
212 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { | 211 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { |
213 | obj = kmem_cache_zalloc(base_cache, gfp_flags); | 212 | obj = kmem_cache_zalloc(base_cache, GFP_KERNEL); |
214 | if (!obj) | 213 | if (!obj) |
215 | return -ENOMEM; | 214 | return -ENOMEM; |
216 | cache->objects[cache->nobjs++] = obj; | 215 | cache->objects[cache->nobjs++] = obj; |
@@ -225,14 +224,14 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | |||
225 | } | 224 | } |
226 | 225 | ||
227 | static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, | 226 | static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, |
228 | int min, gfp_t gfp_flags) | 227 | int min) |
229 | { | 228 | { |
230 | struct page *page; | 229 | struct page *page; |
231 | 230 | ||
232 | if (cache->nobjs >= min) | 231 | if (cache->nobjs >= min) |
233 | return 0; | 232 | return 0; |
234 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { | 233 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { |
235 | page = alloc_page(gfp_flags); | 234 | page = alloc_page(GFP_KERNEL); |
236 | if (!page) | 235 | if (!page) |
237 | return -ENOMEM; | 236 | return -ENOMEM; |
238 | set_page_private(page, 0); | 237 | set_page_private(page, 0); |
@@ -247,41 +246,28 @@ static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) | |||
247 | free_page((unsigned long)mc->objects[--mc->nobjs]); | 246 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
248 | } | 247 | } |
249 | 248 | ||
250 | static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) | 249 | static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) |
251 | { | 250 | { |
252 | int r; | 251 | int r; |
253 | 252 | ||
253 | kvm_mmu_free_some_pages(vcpu); | ||
254 | r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, | 254 | r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, |
255 | pte_chain_cache, 4, gfp_flags); | 255 | pte_chain_cache, 4); |
256 | if (r) | 256 | if (r) |
257 | goto out; | 257 | goto out; |
258 | r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, | 258 | r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, |
259 | rmap_desc_cache, 1, gfp_flags); | 259 | rmap_desc_cache, 1); |
260 | if (r) | 260 | if (r) |
261 | goto out; | 261 | goto out; |
262 | r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags); | 262 | r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4); |
263 | if (r) | 263 | if (r) |
264 | goto out; | 264 | goto out; |
265 | r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, | 265 | r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, |
266 | mmu_page_header_cache, 4, gfp_flags); | 266 | mmu_page_header_cache, 4); |
267 | out: | 267 | out: |
268 | return r; | 268 | return r; |
269 | } | 269 | } |
270 | 270 | ||
271 | static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | ||
272 | { | ||
273 | int r; | ||
274 | |||
275 | r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT); | ||
276 | kvm_mmu_free_some_pages(vcpu); | ||
277 | if (r < 0) { | ||
278 | mutex_unlock(&vcpu->kvm->lock); | ||
279 | r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL); | ||
280 | mutex_lock(&vcpu->kvm->lock); | ||
281 | } | ||
282 | return r; | ||
283 | } | ||
284 | |||
285 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) | 271 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
286 | { | 272 | { |
287 | mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); | 273 | mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); |