diff options
author | Avi Kivity <avi@qumranet.com> | 2007-07-20 01:18:27 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2007-07-20 13:23:59 -0400 |
commit | c1158e63dfeb3928e94c768f0a403b3e0e799f70 (patch) | |
tree | 03a30831c27255d82d479b0242017fb2e9c342a5 /drivers/kvm | |
parent | 35f3f28613bc7263949db23a4c7078e425810c8c (diff) |
KVM: MMU: Fix oopses with SLUB
The kvm mmu uses page->private on shadow page tables; so does slub, and
an oops result. Fix by allocating regular pages for shadows instead of
using slub.
Tested-by: S.Çağlar Onur <caglar@pardus.org.tr>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/mmu.c | 39 |
1 files changed, 26 insertions, 13 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 48d28f1ff4a1..d99d2fe53dca 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -154,7 +154,6 @@ struct kvm_rmap_desc { | |||
154 | 154 | ||
155 | static struct kmem_cache *pte_chain_cache; | 155 | static struct kmem_cache *pte_chain_cache; |
156 | static struct kmem_cache *rmap_desc_cache; | 156 | static struct kmem_cache *rmap_desc_cache; |
157 | static struct kmem_cache *mmu_page_cache; | ||
158 | static struct kmem_cache *mmu_page_header_cache; | 157 | static struct kmem_cache *mmu_page_header_cache; |
159 | 158 | ||
160 | static int is_write_protection(struct kvm_vcpu *vcpu) | 159 | static int is_write_protection(struct kvm_vcpu *vcpu) |
@@ -225,6 +224,29 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) | |||
225 | kfree(mc->objects[--mc->nobjs]); | 224 | kfree(mc->objects[--mc->nobjs]); |
226 | } | 225 | } |
227 | 226 | ||
227 | static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, | ||
228 | int min, gfp_t gfp_flags) | ||
229 | { | ||
230 | struct page *page; | ||
231 | |||
232 | if (cache->nobjs >= min) | ||
233 | return 0; | ||
234 | while (cache->nobjs < ARRAY_SIZE(cache->objects)) { | ||
235 | page = alloc_page(gfp_flags); | ||
236 | if (!page) | ||
237 | return -ENOMEM; | ||
238 | set_page_private(page, 0); | ||
239 | cache->objects[cache->nobjs++] = page_address(page); | ||
240 | } | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc) | ||
245 | { | ||
246 | while (mc->nobjs) | ||
247 | __free_page(mc->objects[--mc->nobjs]); | ||
248 | } | ||
249 | |||
228 | static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) | 250 | static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) |
229 | { | 251 | { |
230 | int r; | 252 | int r; |
@@ -237,8 +259,7 @@ static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags) | |||
237 | rmap_desc_cache, 1, gfp_flags); | 259 | rmap_desc_cache, 1, gfp_flags); |
238 | if (r) | 260 | if (r) |
239 | goto out; | 261 | goto out; |
240 | r = mmu_topup_memory_cache(&vcpu->mmu_page_cache, | 262 | r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags); |
241 | mmu_page_cache, 4, gfp_flags); | ||
242 | if (r) | 263 | if (r) |
243 | goto out; | 264 | goto out; |
244 | r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, | 265 | r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, |
@@ -266,7 +287,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) | |||
266 | { | 287 | { |
267 | mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); | 288 | mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); |
268 | mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); | 289 | mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); |
269 | mmu_free_memory_cache(&vcpu->mmu_page_cache); | 290 | mmu_free_memory_cache_page(&vcpu->mmu_page_cache); |
270 | mmu_free_memory_cache(&vcpu->mmu_page_header_cache); | 291 | mmu_free_memory_cache(&vcpu->mmu_page_header_cache); |
271 | } | 292 | } |
272 | 293 | ||
@@ -458,7 +479,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, | |||
458 | { | 479 | { |
459 | ASSERT(is_empty_shadow_page(page_head->spt)); | 480 | ASSERT(is_empty_shadow_page(page_head->spt)); |
460 | list_del(&page_head->link); | 481 | list_del(&page_head->link); |
461 | kfree(page_head->spt); | 482 | __free_page(virt_to_page(page_head->spt)); |
462 | kfree(page_head); | 483 | kfree(page_head); |
463 | ++kvm->n_free_mmu_pages; | 484 | ++kvm->n_free_mmu_pages; |
464 | } | 485 | } |
@@ -1301,8 +1322,6 @@ void kvm_mmu_module_exit(void) | |||
1301 | kmem_cache_destroy(pte_chain_cache); | 1322 | kmem_cache_destroy(pte_chain_cache); |
1302 | if (rmap_desc_cache) | 1323 | if (rmap_desc_cache) |
1303 | kmem_cache_destroy(rmap_desc_cache); | 1324 | kmem_cache_destroy(rmap_desc_cache); |
1304 | if (mmu_page_cache) | ||
1305 | kmem_cache_destroy(mmu_page_cache); | ||
1306 | if (mmu_page_header_cache) | 1325 | if (mmu_page_header_cache) |
1307 | kmem_cache_destroy(mmu_page_header_cache); | 1326 | kmem_cache_destroy(mmu_page_header_cache); |
1308 | } | 1327 | } |
@@ -1320,12 +1339,6 @@ int kvm_mmu_module_init(void) | |||
1320 | if (!rmap_desc_cache) | 1339 | if (!rmap_desc_cache) |
1321 | goto nomem; | 1340 | goto nomem; |
1322 | 1341 | ||
1323 | mmu_page_cache = kmem_cache_create("kvm_mmu_page", | ||
1324 | PAGE_SIZE, | ||
1325 | PAGE_SIZE, 0, NULL); | ||
1326 | if (!mmu_page_cache) | ||
1327 | goto nomem; | ||
1328 | |||
1329 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", | 1342 | mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", |
1330 | sizeof(struct kvm_mmu_page), | 1343 | sizeof(struct kvm_mmu_page), |
1331 | 0, 0, NULL); | 1344 | 0, 0, NULL); |