aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMarkus Rechberger <markus.rechberger@amd.com>2007-02-19 07:37:47 -0500
committerAvi Kivity <avi@qumranet.com>2007-03-04 04:12:39 -0500
commit5972e9535e94bf875eb8eab8a667ba04c7583874 (patch)
treebdce4e46ab7277c8811cef4b9464646eff3fed3e /drivers
parent9d8f549dc69b1fc65d0b03916c02f12ca49b3ea0 (diff)
KVM: Use page_private()/set_page_private() apis
Besides using an established api, this allows using kvm in older kernels. Signed-off-by: Markus Rechberger <markus.rechberger@amd.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/kvm.h2
-rw-r--r--drivers/kvm/kvm_main.c2
-rw-r--r--drivers/kvm/mmu.c36
3 files changed, 20 insertions, 20 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 04574a9d4430..9a49b2ed2a1e 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -523,7 +523,7 @@ static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
523{ 523{
524 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 524 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
525 525
526 return (struct kvm_mmu_page *)page->private; 526 return (struct kvm_mmu_page *)page_private(page);
527} 527}
528 528
529static inline u16 read_fs(void) 529static inline u16 read_fs(void)
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index 13a99cac3679..122c05f283e1 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -670,7 +670,7 @@ raced:
670 | __GFP_ZERO); 670 | __GFP_ZERO);
671 if (!new.phys_mem[i]) 671 if (!new.phys_mem[i])
672 goto out_free; 672 goto out_free;
673 new.phys_mem[i]->private = 0; 673 set_page_private(new.phys_mem[i],0);
674 } 674 }
675 } 675 }
676 676
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index be793770f31b..a1a93368f314 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -298,18 +298,18 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
298 if (!is_rmap_pte(*spte)) 298 if (!is_rmap_pte(*spte))
299 return; 299 return;
300 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 300 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
301 if (!page->private) { 301 if (!page_private(page)) {
302 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); 302 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
303 page->private = (unsigned long)spte; 303 set_page_private(page,(unsigned long)spte);
304 } else if (!(page->private & 1)) { 304 } else if (!(page_private(page) & 1)) {
305 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); 305 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
306 desc = mmu_alloc_rmap_desc(vcpu); 306 desc = mmu_alloc_rmap_desc(vcpu);
307 desc->shadow_ptes[0] = (u64 *)page->private; 307 desc->shadow_ptes[0] = (u64 *)page_private(page);
308 desc->shadow_ptes[1] = spte; 308 desc->shadow_ptes[1] = spte;
309 page->private = (unsigned long)desc | 1; 309 set_page_private(page,(unsigned long)desc | 1);
310 } else { 310 } else {
311 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); 311 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
312 desc = (struct kvm_rmap_desc *)(page->private & ~1ul); 312 desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
313 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) 313 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
314 desc = desc->more; 314 desc = desc->more;
315 if (desc->shadow_ptes[RMAP_EXT-1]) { 315 if (desc->shadow_ptes[RMAP_EXT-1]) {
@@ -337,12 +337,12 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
337 if (j != 0) 337 if (j != 0)
338 return; 338 return;
339 if (!prev_desc && !desc->more) 339 if (!prev_desc && !desc->more)
340 page->private = (unsigned long)desc->shadow_ptes[0]; 340 set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
341 else 341 else
342 if (prev_desc) 342 if (prev_desc)
343 prev_desc->more = desc->more; 343 prev_desc->more = desc->more;
344 else 344 else
345 page->private = (unsigned long)desc->more | 1; 345 set_page_private(page,(unsigned long)desc->more | 1);
346 mmu_free_rmap_desc(vcpu, desc); 346 mmu_free_rmap_desc(vcpu, desc);
347} 347}
348 348
@@ -356,20 +356,20 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
356 if (!is_rmap_pte(*spte)) 356 if (!is_rmap_pte(*spte))
357 return; 357 return;
358 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 358 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
359 if (!page->private) { 359 if (!page_private(page)) {
360 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 360 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
361 BUG(); 361 BUG();
362 } else if (!(page->private & 1)) { 362 } else if (!(page_private(page) & 1)) {
363 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); 363 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
364 if ((u64 *)page->private != spte) { 364 if ((u64 *)page_private(page) != spte) {
365 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", 365 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
366 spte, *spte); 366 spte, *spte);
367 BUG(); 367 BUG();
368 } 368 }
369 page->private = 0; 369 set_page_private(page,0);
370 } else { 370 } else {
371 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); 371 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
372 desc = (struct kvm_rmap_desc *)(page->private & ~1ul); 372 desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
373 prev_desc = NULL; 373 prev_desc = NULL;
374 while (desc) { 374 while (desc) {
375 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) 375 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
@@ -398,11 +398,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
398 BUG_ON(!slot); 398 BUG_ON(!slot);
399 page = gfn_to_page(slot, gfn); 399 page = gfn_to_page(slot, gfn);
400 400
401 while (page->private) { 401 while (page_private(page)) {
402 if (!(page->private & 1)) 402 if (!(page_private(page) & 1))
403 spte = (u64 *)page->private; 403 spte = (u64 *)page_private(page);
404 else { 404 else {
405 desc = (struct kvm_rmap_desc *)(page->private & ~1ul); 405 desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
406 spte = desc->shadow_ptes[0]; 406 spte = desc->shadow_ptes[0];
407 } 407 }
408 BUG_ON(!spte); 408 BUG_ON(!spte);
@@ -1218,7 +1218,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1218 INIT_LIST_HEAD(&page_header->link); 1218 INIT_LIST_HEAD(&page_header->link);
1219 if ((page = alloc_page(GFP_KERNEL)) == NULL) 1219 if ((page = alloc_page(GFP_KERNEL)) == NULL)
1220 goto error_1; 1220 goto error_1;
1221 page->private = (unsigned long)page_header; 1221 set_page_private(page, (unsigned long)page_header);
1222 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; 1222 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
1223 memset(__va(page_header->page_hpa), 0, PAGE_SIZE); 1223 memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
1224 list_add(&page_header->link, &vcpu->free_pages); 1224 list_add(&page_header->link, &vcpu->free_pages);