diff options
author | Avi Kivity <avi@qumranet.com> | 2007-11-21 08:28:32 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-01-30 10:53:12 -0500 |
commit | 4db3531487bdde4027b701da7c1b8d094ae2cc91 (patch) | |
tree | b4901a0290549bdf6c43ec87ca6e006936333b2f /drivers/kvm | |
parent | 1d28f5f4a4984be4cd7200ed512c94517c13e392 (diff) |
KVM: MMU: Rename variables of type 'struct kvm_mmu_page *'
These are traditionally named 'page', but even more traditionally, that name
is reserved for variables that point to a 'struct page'. Rename them to 'sp'
(for "shadow page").
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/mmu.c | 300 |
1 files changed, 146 insertions, 154 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c index 9b75b102b8d1..86896da3e837 100644 --- a/drivers/kvm/mmu.c +++ b/drivers/kvm/mmu.c | |||
@@ -367,7 +367,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn) | |||
367 | */ | 367 | */ |
368 | static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | 368 | static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) |
369 | { | 369 | { |
370 | struct kvm_mmu_page *page; | 370 | struct kvm_mmu_page *sp; |
371 | struct kvm_rmap_desc *desc; | 371 | struct kvm_rmap_desc *desc; |
372 | unsigned long *rmapp; | 372 | unsigned long *rmapp; |
373 | int i; | 373 | int i; |
@@ -375,8 +375,8 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) | |||
375 | if (!is_rmap_pte(*spte)) | 375 | if (!is_rmap_pte(*spte)) |
376 | return; | 376 | return; |
377 | gfn = unalias_gfn(vcpu->kvm, gfn); | 377 | gfn = unalias_gfn(vcpu->kvm, gfn); |
378 | page = page_header(__pa(spte)); | 378 | sp = page_header(__pa(spte)); |
379 | page->gfns[spte - page->spt] = gfn; | 379 | sp->gfns[spte - sp->spt] = gfn; |
380 | rmapp = gfn_to_rmap(vcpu->kvm, gfn); | 380 | rmapp = gfn_to_rmap(vcpu->kvm, gfn); |
381 | if (!*rmapp) { | 381 | if (!*rmapp) { |
382 | rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); | 382 | rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); |
@@ -429,20 +429,20 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
429 | { | 429 | { |
430 | struct kvm_rmap_desc *desc; | 430 | struct kvm_rmap_desc *desc; |
431 | struct kvm_rmap_desc *prev_desc; | 431 | struct kvm_rmap_desc *prev_desc; |
432 | struct kvm_mmu_page *page; | 432 | struct kvm_mmu_page *sp; |
433 | struct page *release_page; | 433 | struct page *release_page; |
434 | unsigned long *rmapp; | 434 | unsigned long *rmapp; |
435 | int i; | 435 | int i; |
436 | 436 | ||
437 | if (!is_rmap_pte(*spte)) | 437 | if (!is_rmap_pte(*spte)) |
438 | return; | 438 | return; |
439 | page = page_header(__pa(spte)); | 439 | sp = page_header(__pa(spte)); |
440 | release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); | 440 | release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); |
441 | if (is_writeble_pte(*spte)) | 441 | if (is_writeble_pte(*spte)) |
442 | kvm_release_page_dirty(release_page); | 442 | kvm_release_page_dirty(release_page); |
443 | else | 443 | else |
444 | kvm_release_page_clean(release_page); | 444 | kvm_release_page_clean(release_page); |
445 | rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]); | 445 | rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]); |
446 | if (!*rmapp) { | 446 | if (!*rmapp) { |
447 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); | 447 | printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); |
448 | BUG(); | 448 | BUG(); |
@@ -537,14 +537,13 @@ static int is_empty_shadow_page(u64 *spt) | |||
537 | } | 537 | } |
538 | #endif | 538 | #endif |
539 | 539 | ||
540 | static void kvm_mmu_free_page(struct kvm *kvm, | 540 | static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
541 | struct kvm_mmu_page *page_head) | ||
542 | { | 541 | { |
543 | ASSERT(is_empty_shadow_page(page_head->spt)); | 542 | ASSERT(is_empty_shadow_page(sp->spt)); |
544 | list_del(&page_head->link); | 543 | list_del(&sp->link); |
545 | __free_page(virt_to_page(page_head->spt)); | 544 | __free_page(virt_to_page(sp->spt)); |
546 | __free_page(virt_to_page(page_head->gfns)); | 545 | __free_page(virt_to_page(sp->gfns)); |
547 | kfree(page_head); | 546 | kfree(sp); |
548 | ++kvm->n_free_mmu_pages; | 547 | ++kvm->n_free_mmu_pages; |
549 | } | 548 | } |
550 | 549 | ||
@@ -556,27 +555,26 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn) | |||
556 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | 555 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
557 | u64 *parent_pte) | 556 | u64 *parent_pte) |
558 | { | 557 | { |
559 | struct kvm_mmu_page *page; | 558 | struct kvm_mmu_page *sp; |
560 | 559 | ||
561 | if (!vcpu->kvm->n_free_mmu_pages) | 560 | if (!vcpu->kvm->n_free_mmu_pages) |
562 | return NULL; | 561 | return NULL; |
563 | 562 | ||
564 | page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, | 563 | sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof *sp); |
565 | sizeof *page); | 564 | sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); |
566 | page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); | 565 | sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); |
567 | page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); | 566 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
568 | set_page_private(virt_to_page(page->spt), (unsigned long)page); | 567 | list_add(&sp->link, &vcpu->kvm->active_mmu_pages); |
569 | list_add(&page->link, &vcpu->kvm->active_mmu_pages); | 568 | ASSERT(is_empty_shadow_page(sp->spt)); |
570 | ASSERT(is_empty_shadow_page(page->spt)); | 569 | sp->slot_bitmap = 0; |
571 | page->slot_bitmap = 0; | 570 | sp->multimapped = 0; |
572 | page->multimapped = 0; | 571 | sp->parent_pte = parent_pte; |
573 | page->parent_pte = parent_pte; | ||
574 | --vcpu->kvm->n_free_mmu_pages; | 572 | --vcpu->kvm->n_free_mmu_pages; |
575 | return page; | 573 | return sp; |
576 | } | 574 | } |
577 | 575 | ||
578 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, | 576 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, |
579 | struct kvm_mmu_page *page, u64 *parent_pte) | 577 | struct kvm_mmu_page *sp, u64 *parent_pte) |
580 | { | 578 | { |
581 | struct kvm_pte_chain *pte_chain; | 579 | struct kvm_pte_chain *pte_chain; |
582 | struct hlist_node *node; | 580 | struct hlist_node *node; |
@@ -584,20 +582,20 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, | |||
584 | 582 | ||
585 | if (!parent_pte) | 583 | if (!parent_pte) |
586 | return; | 584 | return; |
587 | if (!page->multimapped) { | 585 | if (!sp->multimapped) { |
588 | u64 *old = page->parent_pte; | 586 | u64 *old = sp->parent_pte; |
589 | 587 | ||
590 | if (!old) { | 588 | if (!old) { |
591 | page->parent_pte = parent_pte; | 589 | sp->parent_pte = parent_pte; |
592 | return; | 590 | return; |
593 | } | 591 | } |
594 | page->multimapped = 1; | 592 | sp->multimapped = 1; |
595 | pte_chain = mmu_alloc_pte_chain(vcpu); | 593 | pte_chain = mmu_alloc_pte_chain(vcpu); |
596 | INIT_HLIST_HEAD(&page->parent_ptes); | 594 | INIT_HLIST_HEAD(&sp->parent_ptes); |
597 | hlist_add_head(&pte_chain->link, &page->parent_ptes); | 595 | hlist_add_head(&pte_chain->link, &sp->parent_ptes); |
598 | pte_chain->parent_ptes[0] = old; | 596 | pte_chain->parent_ptes[0] = old; |
599 | } | 597 | } |
600 | hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) { | 598 | hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) { |
601 | if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1]) | 599 | if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1]) |
602 | continue; | 600 | continue; |
603 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) | 601 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) |
@@ -608,23 +606,23 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, | |||
608 | } | 606 | } |
609 | pte_chain = mmu_alloc_pte_chain(vcpu); | 607 | pte_chain = mmu_alloc_pte_chain(vcpu); |
610 | BUG_ON(!pte_chain); | 608 | BUG_ON(!pte_chain); |
611 | hlist_add_head(&pte_chain->link, &page->parent_ptes); | 609 | hlist_add_head(&pte_chain->link, &sp->parent_ptes); |
612 | pte_chain->parent_ptes[0] = parent_pte; | 610 | pte_chain->parent_ptes[0] = parent_pte; |
613 | } | 611 | } |
614 | 612 | ||
615 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, | 613 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, |
616 | u64 *parent_pte) | 614 | u64 *parent_pte) |
617 | { | 615 | { |
618 | struct kvm_pte_chain *pte_chain; | 616 | struct kvm_pte_chain *pte_chain; |
619 | struct hlist_node *node; | 617 | struct hlist_node *node; |
620 | int i; | 618 | int i; |
621 | 619 | ||
622 | if (!page->multimapped) { | 620 | if (!sp->multimapped) { |
623 | BUG_ON(page->parent_pte != parent_pte); | 621 | BUG_ON(sp->parent_pte != parent_pte); |
624 | page->parent_pte = NULL; | 622 | sp->parent_pte = NULL; |
625 | return; | 623 | return; |
626 | } | 624 | } |
627 | hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) | 625 | hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) |
628 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { | 626 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { |
629 | if (!pte_chain->parent_ptes[i]) | 627 | if (!pte_chain->parent_ptes[i]) |
630 | break; | 628 | break; |
@@ -640,9 +638,9 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, | |||
640 | if (i == 0) { | 638 | if (i == 0) { |
641 | hlist_del(&pte_chain->link); | 639 | hlist_del(&pte_chain->link); |
642 | mmu_free_pte_chain(pte_chain); | 640 | mmu_free_pte_chain(pte_chain); |
643 | if (hlist_empty(&page->parent_ptes)) { | 641 | if (hlist_empty(&sp->parent_ptes)) { |
644 | page->multimapped = 0; | 642 | sp->multimapped = 0; |
645 | page->parent_pte = NULL; | 643 | sp->parent_pte = NULL; |
646 | } | 644 | } |
647 | } | 645 | } |
648 | return; | 646 | return; |
@@ -650,22 +648,21 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page, | |||
650 | BUG(); | 648 | BUG(); |
651 | } | 649 | } |
652 | 650 | ||
653 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, | 651 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) |
654 | gfn_t gfn) | ||
655 | { | 652 | { |
656 | unsigned index; | 653 | unsigned index; |
657 | struct hlist_head *bucket; | 654 | struct hlist_head *bucket; |
658 | struct kvm_mmu_page *page; | 655 | struct kvm_mmu_page *sp; |
659 | struct hlist_node *node; | 656 | struct hlist_node *node; |
660 | 657 | ||
661 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 658 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); |
662 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 659 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
663 | bucket = &kvm->mmu_page_hash[index]; | 660 | bucket = &kvm->mmu_page_hash[index]; |
664 | hlist_for_each_entry(page, node, bucket, hash_link) | 661 | hlist_for_each_entry(sp, node, bucket, hash_link) |
665 | if (page->gfn == gfn && !page->role.metaphysical) { | 662 | if (sp->gfn == gfn && !sp->role.metaphysical) { |
666 | pgprintk("%s: found role %x\n", | 663 | pgprintk("%s: found role %x\n", |
667 | __FUNCTION__, page->role.word); | 664 | __FUNCTION__, sp->role.word); |
668 | return page; | 665 | return sp; |
669 | } | 666 | } |
670 | return NULL; | 667 | return NULL; |
671 | } | 668 | } |
@@ -682,7 +679,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
682 | unsigned index; | 679 | unsigned index; |
683 | unsigned quadrant; | 680 | unsigned quadrant; |
684 | struct hlist_head *bucket; | 681 | struct hlist_head *bucket; |
685 | struct kvm_mmu_page *page; | 682 | struct kvm_mmu_page *sp; |
686 | struct hlist_node *node; | 683 | struct hlist_node *node; |
687 | 684 | ||
688 | role.word = 0; | 685 | role.word = 0; |
@@ -699,35 +696,35 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
699 | gfn, role.word); | 696 | gfn, role.word); |
700 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 697 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
701 | bucket = &vcpu->kvm->mmu_page_hash[index]; | 698 | bucket = &vcpu->kvm->mmu_page_hash[index]; |
702 | hlist_for_each_entry(page, node, bucket, hash_link) | 699 | hlist_for_each_entry(sp, node, bucket, hash_link) |
703 | if (page->gfn == gfn && page->role.word == role.word) { | 700 | if (sp->gfn == gfn && sp->role.word == role.word) { |
704 | mmu_page_add_parent_pte(vcpu, page, parent_pte); | 701 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
705 | pgprintk("%s: found\n", __FUNCTION__); | 702 | pgprintk("%s: found\n", __FUNCTION__); |
706 | return page; | 703 | return sp; |
707 | } | 704 | } |
708 | page = kvm_mmu_alloc_page(vcpu, parent_pte); | 705 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); |
709 | if (!page) | 706 | if (!sp) |
710 | return page; | 707 | return sp; |
711 | pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word); | 708 | pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word); |
712 | page->gfn = gfn; | 709 | sp->gfn = gfn; |
713 | page->role = role; | 710 | sp->role = role; |
714 | hlist_add_head(&page->hash_link, bucket); | 711 | hlist_add_head(&sp->hash_link, bucket); |
715 | vcpu->mmu.prefetch_page(vcpu, page); | 712 | vcpu->mmu.prefetch_page(vcpu, sp); |
716 | if (!metaphysical) | 713 | if (!metaphysical) |
717 | rmap_write_protect(vcpu->kvm, gfn); | 714 | rmap_write_protect(vcpu->kvm, gfn); |
718 | return page; | 715 | return sp; |
719 | } | 716 | } |
720 | 717 | ||
721 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, | 718 | static void kvm_mmu_page_unlink_children(struct kvm *kvm, |
722 | struct kvm_mmu_page *page) | 719 | struct kvm_mmu_page *sp) |
723 | { | 720 | { |
724 | unsigned i; | 721 | unsigned i; |
725 | u64 *pt; | 722 | u64 *pt; |
726 | u64 ent; | 723 | u64 ent; |
727 | 724 | ||
728 | pt = page->spt; | 725 | pt = sp->spt; |
729 | 726 | ||
730 | if (page->role.level == PT_PAGE_TABLE_LEVEL) { | 727 | if (sp->role.level == PT_PAGE_TABLE_LEVEL) { |
731 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | 728 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
732 | if (is_shadow_present_pte(pt[i])) | 729 | if (is_shadow_present_pte(pt[i])) |
733 | rmap_remove(kvm, &pt[i]); | 730 | rmap_remove(kvm, &pt[i]); |
@@ -749,10 +746,9 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm, | |||
749 | kvm_flush_remote_tlbs(kvm); | 746 | kvm_flush_remote_tlbs(kvm); |
750 | } | 747 | } |
751 | 748 | ||
752 | static void kvm_mmu_put_page(struct kvm_mmu_page *page, | 749 | static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte) |
753 | u64 *parent_pte) | ||
754 | { | 750 | { |
755 | mmu_page_remove_parent_pte(page, parent_pte); | 751 | mmu_page_remove_parent_pte(sp, parent_pte); |
756 | } | 752 | } |
757 | 753 | ||
758 | static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) | 754 | static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) |
@@ -764,32 +760,31 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) | |||
764 | kvm->vcpus[i]->last_pte_updated = NULL; | 760 | kvm->vcpus[i]->last_pte_updated = NULL; |
765 | } | 761 | } |
766 | 762 | ||
767 | static void kvm_mmu_zap_page(struct kvm *kvm, | 763 | static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
768 | struct kvm_mmu_page *page) | ||
769 | { | 764 | { |
770 | u64 *parent_pte; | 765 | u64 *parent_pte; |
771 | 766 | ||
772 | ++kvm->stat.mmu_shadow_zapped; | 767 | ++kvm->stat.mmu_shadow_zapped; |
773 | while (page->multimapped || page->parent_pte) { | 768 | while (sp->multimapped || sp->parent_pte) { |
774 | if (!page->multimapped) | 769 | if (!sp->multimapped) |
775 | parent_pte = page->parent_pte; | 770 | parent_pte = sp->parent_pte; |
776 | else { | 771 | else { |
777 | struct kvm_pte_chain *chain; | 772 | struct kvm_pte_chain *chain; |
778 | 773 | ||
779 | chain = container_of(page->parent_ptes.first, | 774 | chain = container_of(sp->parent_ptes.first, |
780 | struct kvm_pte_chain, link); | 775 | struct kvm_pte_chain, link); |
781 | parent_pte = chain->parent_ptes[0]; | 776 | parent_pte = chain->parent_ptes[0]; |
782 | } | 777 | } |
783 | BUG_ON(!parent_pte); | 778 | BUG_ON(!parent_pte); |
784 | kvm_mmu_put_page(page, parent_pte); | 779 | kvm_mmu_put_page(sp, parent_pte); |
785 | set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte); | 780 | set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte); |
786 | } | 781 | } |
787 | kvm_mmu_page_unlink_children(kvm, page); | 782 | kvm_mmu_page_unlink_children(kvm, sp); |
788 | if (!page->root_count) { | 783 | if (!sp->root_count) { |
789 | hlist_del(&page->hash_link); | 784 | hlist_del(&sp->hash_link); |
790 | kvm_mmu_free_page(kvm, page); | 785 | kvm_mmu_free_page(kvm, sp); |
791 | } else | 786 | } else |
792 | list_move(&page->link, &kvm->active_mmu_pages); | 787 | list_move(&sp->link, &kvm->active_mmu_pages); |
793 | kvm_mmu_reset_last_pte_updated(kvm); | 788 | kvm_mmu_reset_last_pte_updated(kvm); |
794 | } | 789 | } |
795 | 790 | ||
@@ -831,7 +826,7 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
831 | { | 826 | { |
832 | unsigned index; | 827 | unsigned index; |
833 | struct hlist_head *bucket; | 828 | struct hlist_head *bucket; |
834 | struct kvm_mmu_page *page; | 829 | struct kvm_mmu_page *sp; |
835 | struct hlist_node *node, *n; | 830 | struct hlist_node *node, *n; |
836 | int r; | 831 | int r; |
837 | 832 | ||
@@ -839,11 +834,11 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
839 | r = 0; | 834 | r = 0; |
840 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 835 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
841 | bucket = &kvm->mmu_page_hash[index]; | 836 | bucket = &kvm->mmu_page_hash[index]; |
842 | hlist_for_each_entry_safe(page, node, n, bucket, hash_link) | 837 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) |
843 | if (page->gfn == gfn && !page->role.metaphysical) { | 838 | if (sp->gfn == gfn && !sp->role.metaphysical) { |
844 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, | 839 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, |
845 | page->role.word); | 840 | sp->role.word); |
846 | kvm_mmu_zap_page(kvm, page); | 841 | kvm_mmu_zap_page(kvm, sp); |
847 | r = 1; | 842 | r = 1; |
848 | } | 843 | } |
849 | return r; | 844 | return r; |
@@ -851,21 +846,20 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
851 | 846 | ||
852 | static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | 847 | static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) |
853 | { | 848 | { |
854 | struct kvm_mmu_page *page; | 849 | struct kvm_mmu_page *sp; |
855 | 850 | ||
856 | while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { | 851 | while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { |
857 | pgprintk("%s: zap %lx %x\n", | 852 | pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word); |
858 | __FUNCTION__, gfn, page->role.word); | 853 | kvm_mmu_zap_page(kvm, sp); |
859 | kvm_mmu_zap_page(kvm, page); | ||
860 | } | 854 | } |
861 | } | 855 | } |
862 | 856 | ||
863 | static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) | 857 | static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) |
864 | { | 858 | { |
865 | int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); | 859 | int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn)); |
866 | struct kvm_mmu_page *page_head = page_header(__pa(pte)); | 860 | struct kvm_mmu_page *sp = page_header(__pa(pte)); |
867 | 861 | ||
868 | __set_bit(slot, &page_head->slot_bitmap); | 862 | __set_bit(slot, &sp->slot_bitmap); |
869 | } | 863 | } |
870 | 864 | ||
871 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) | 865 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) |
@@ -951,7 +945,7 @@ static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, | |||
951 | static void mmu_free_roots(struct kvm_vcpu *vcpu) | 945 | static void mmu_free_roots(struct kvm_vcpu *vcpu) |
952 | { | 946 | { |
953 | int i; | 947 | int i; |
954 | struct kvm_mmu_page *page; | 948 | struct kvm_mmu_page *sp; |
955 | 949 | ||
956 | if (!VALID_PAGE(vcpu->mmu.root_hpa)) | 950 | if (!VALID_PAGE(vcpu->mmu.root_hpa)) |
957 | return; | 951 | return; |
@@ -959,8 +953,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) | |||
959 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { | 953 | if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
960 | hpa_t root = vcpu->mmu.root_hpa; | 954 | hpa_t root = vcpu->mmu.root_hpa; |
961 | 955 | ||
962 | page = page_header(root); | 956 | sp = page_header(root); |
963 | --page->root_count; | 957 | --sp->root_count; |
964 | vcpu->mmu.root_hpa = INVALID_PAGE; | 958 | vcpu->mmu.root_hpa = INVALID_PAGE; |
965 | return; | 959 | return; |
966 | } | 960 | } |
@@ -970,8 +964,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) | |||
970 | 964 | ||
971 | if (root) { | 965 | if (root) { |
972 | root &= PT64_BASE_ADDR_MASK; | 966 | root &= PT64_BASE_ADDR_MASK; |
973 | page = page_header(root); | 967 | sp = page_header(root); |
974 | --page->root_count; | 968 | --sp->root_count; |
975 | } | 969 | } |
976 | vcpu->mmu.pae_root[i] = INVALID_PAGE; | 970 | vcpu->mmu.pae_root[i] = INVALID_PAGE; |
977 | } | 971 | } |
@@ -982,7 +976,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
982 | { | 976 | { |
983 | int i; | 977 | int i; |
984 | gfn_t root_gfn; | 978 | gfn_t root_gfn; |
985 | struct kvm_mmu_page *page; | 979 | struct kvm_mmu_page *sp; |
986 | 980 | ||
987 | root_gfn = vcpu->cr3 >> PAGE_SHIFT; | 981 | root_gfn = vcpu->cr3 >> PAGE_SHIFT; |
988 | 982 | ||
@@ -991,10 +985,10 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
991 | hpa_t root = vcpu->mmu.root_hpa; | 985 | hpa_t root = vcpu->mmu.root_hpa; |
992 | 986 | ||
993 | ASSERT(!VALID_PAGE(root)); | 987 | ASSERT(!VALID_PAGE(root)); |
994 | page = kvm_mmu_get_page(vcpu, root_gfn, 0, | 988 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
995 | PT64_ROOT_LEVEL, 0, 0, NULL); | 989 | PT64_ROOT_LEVEL, 0, 0, NULL); |
996 | root = __pa(page->spt); | 990 | root = __pa(sp->spt); |
997 | ++page->root_count; | 991 | ++sp->root_count; |
998 | vcpu->mmu.root_hpa = root; | 992 | vcpu->mmu.root_hpa = root; |
999 | return; | 993 | return; |
1000 | } | 994 | } |
@@ -1011,11 +1005,11 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
1011 | root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT; | 1005 | root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT; |
1012 | } else if (vcpu->mmu.root_level == 0) | 1006 | } else if (vcpu->mmu.root_level == 0) |
1013 | root_gfn = 0; | 1007 | root_gfn = 0; |
1014 | page = kvm_mmu_get_page(vcpu, root_gfn, i << 30, | 1008 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
1015 | PT32_ROOT_LEVEL, !is_paging(vcpu), | 1009 | PT32_ROOT_LEVEL, !is_paging(vcpu), |
1016 | 0, NULL); | 1010 | 0, NULL); |
1017 | root = __pa(page->spt); | 1011 | root = __pa(sp->spt); |
1018 | ++page->root_count; | 1012 | ++sp->root_count; |
1019 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; | 1013 | vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; |
1020 | } | 1014 | } |
1021 | vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); | 1015 | vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); |
@@ -1196,7 +1190,7 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu) | |||
1196 | } | 1190 | } |
1197 | 1191 | ||
1198 | static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, | 1192 | static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, |
1199 | struct kvm_mmu_page *page, | 1193 | struct kvm_mmu_page *sp, |
1200 | u64 *spte) | 1194 | u64 *spte) |
1201 | { | 1195 | { |
1202 | u64 pte; | 1196 | u64 pte; |
@@ -1204,7 +1198,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, | |||
1204 | 1198 | ||
1205 | pte = *spte; | 1199 | pte = *spte; |
1206 | if (is_shadow_present_pte(pte)) { | 1200 | if (is_shadow_present_pte(pte)) { |
1207 | if (page->role.level == PT_PAGE_TABLE_LEVEL) | 1201 | if (sp->role.level == PT_PAGE_TABLE_LEVEL) |
1208 | rmap_remove(vcpu->kvm, spte); | 1202 | rmap_remove(vcpu->kvm, spte); |
1209 | else { | 1203 | else { |
1210 | child = page_header(pte & PT64_BASE_ADDR_MASK); | 1204 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
@@ -1215,23 +1209,21 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, | |||
1215 | } | 1209 | } |
1216 | 1210 | ||
1217 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, | 1211 | static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, |
1218 | struct kvm_mmu_page *page, | 1212 | struct kvm_mmu_page *sp, |
1219 | u64 *spte, | 1213 | u64 *spte, |
1220 | const void *new, int bytes, | 1214 | const void *new, int bytes, |
1221 | int offset_in_pte) | 1215 | int offset_in_pte) |
1222 | { | 1216 | { |
1223 | if (page->role.level != PT_PAGE_TABLE_LEVEL) { | 1217 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) { |
1224 | ++vcpu->kvm->stat.mmu_pde_zapped; | 1218 | ++vcpu->kvm->stat.mmu_pde_zapped; |
1225 | return; | 1219 | return; |
1226 | } | 1220 | } |
1227 | 1221 | ||
1228 | ++vcpu->kvm->stat.mmu_pte_updated; | 1222 | ++vcpu->kvm->stat.mmu_pte_updated; |
1229 | if (page->role.glevels == PT32_ROOT_LEVEL) | 1223 | if (sp->role.glevels == PT32_ROOT_LEVEL) |
1230 | paging32_update_pte(vcpu, page, spte, new, bytes, | 1224 | paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte); |
1231 | offset_in_pte); | ||
1232 | else | 1225 | else |
1233 | paging64_update_pte(vcpu, page, spte, new, bytes, | 1226 | paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte); |
1234 | offset_in_pte); | ||
1235 | } | 1227 | } |
1236 | 1228 | ||
1237 | static bool need_remote_flush(u64 old, u64 new) | 1229 | static bool need_remote_flush(u64 old, u64 new) |
@@ -1266,7 +1258,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1266 | const u8 *new, int bytes) | 1258 | const u8 *new, int bytes) |
1267 | { | 1259 | { |
1268 | gfn_t gfn = gpa >> PAGE_SHIFT; | 1260 | gfn_t gfn = gpa >> PAGE_SHIFT; |
1269 | struct kvm_mmu_page *page; | 1261 | struct kvm_mmu_page *sp; |
1270 | struct hlist_node *node, *n; | 1262 | struct hlist_node *node, *n; |
1271 | struct hlist_head *bucket; | 1263 | struct hlist_head *bucket; |
1272 | unsigned index; | 1264 | unsigned index; |
@@ -1296,10 +1288,10 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1296 | } | 1288 | } |
1297 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; | 1289 | index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; |
1298 | bucket = &vcpu->kvm->mmu_page_hash[index]; | 1290 | bucket = &vcpu->kvm->mmu_page_hash[index]; |
1299 | hlist_for_each_entry_safe(page, node, n, bucket, hash_link) { | 1291 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) { |
1300 | if (page->gfn != gfn || page->role.metaphysical) | 1292 | if (sp->gfn != gfn || sp->role.metaphysical) |
1301 | continue; | 1293 | continue; |
1302 | pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; | 1294 | pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8; |
1303 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); | 1295 | misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); |
1304 | misaligned |= bytes < 4; | 1296 | misaligned |= bytes < 4; |
1305 | if (misaligned || flooded) { | 1297 | if (misaligned || flooded) { |
@@ -1314,15 +1306,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1314 | * page. | 1306 | * page. |
1315 | */ | 1307 | */ |
1316 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", | 1308 | pgprintk("misaligned: gpa %llx bytes %d role %x\n", |
1317 | gpa, bytes, page->role.word); | 1309 | gpa, bytes, sp->role.word); |
1318 | kvm_mmu_zap_page(vcpu->kvm, page); | 1310 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1319 | ++vcpu->kvm->stat.mmu_flooded; | 1311 | ++vcpu->kvm->stat.mmu_flooded; |
1320 | continue; | 1312 | continue; |
1321 | } | 1313 | } |
1322 | page_offset = offset; | 1314 | page_offset = offset; |
1323 | level = page->role.level; | 1315 | level = sp->role.level; |
1324 | npte = 1; | 1316 | npte = 1; |
1325 | if (page->role.glevels == PT32_ROOT_LEVEL) { | 1317 | if (sp->role.glevels == PT32_ROOT_LEVEL) { |
1326 | page_offset <<= 1; /* 32->64 */ | 1318 | page_offset <<= 1; /* 32->64 */ |
1327 | /* | 1319 | /* |
1328 | * A 32-bit pde maps 4MB while the shadow pdes map | 1320 | * A 32-bit pde maps 4MB while the shadow pdes map |
@@ -1336,14 +1328,14 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1336 | } | 1328 | } |
1337 | quadrant = page_offset >> PAGE_SHIFT; | 1329 | quadrant = page_offset >> PAGE_SHIFT; |
1338 | page_offset &= ~PAGE_MASK; | 1330 | page_offset &= ~PAGE_MASK; |
1339 | if (quadrant != page->role.quadrant) | 1331 | if (quadrant != sp->role.quadrant) |
1340 | continue; | 1332 | continue; |
1341 | } | 1333 | } |
1342 | spte = &page->spt[page_offset / sizeof(*spte)]; | 1334 | spte = &sp->spt[page_offset / sizeof(*spte)]; |
1343 | while (npte--) { | 1335 | while (npte--) { |
1344 | entry = *spte; | 1336 | entry = *spte; |
1345 | mmu_pte_write_zap_pte(vcpu, page, spte); | 1337 | mmu_pte_write_zap_pte(vcpu, sp, spte); |
1346 | mmu_pte_write_new_pte(vcpu, page, spte, new, bytes, | 1338 | mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes, |
1347 | page_offset & (pte_size - 1)); | 1339 | page_offset & (pte_size - 1)); |
1348 | mmu_pte_write_flush_tlb(vcpu, entry, *spte); | 1340 | mmu_pte_write_flush_tlb(vcpu, entry, *spte); |
1349 | ++spte; | 1341 | ++spte; |
@@ -1362,11 +1354,11 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) | |||
1362 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) | 1354 | void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) |
1363 | { | 1355 | { |
1364 | while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) { | 1356 | while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) { |
1365 | struct kvm_mmu_page *page; | 1357 | struct kvm_mmu_page *sp; |
1366 | 1358 | ||
1367 | page = container_of(vcpu->kvm->active_mmu_pages.prev, | 1359 | sp = container_of(vcpu->kvm->active_mmu_pages.prev, |
1368 | struct kvm_mmu_page, link); | 1360 | struct kvm_mmu_page, link); |
1369 | kvm_mmu_zap_page(vcpu->kvm, page); | 1361 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1370 | ++vcpu->kvm->stat.mmu_recycled; | 1362 | ++vcpu->kvm->stat.mmu_recycled; |
1371 | } | 1363 | } |
1372 | } | 1364 | } |
@@ -1413,12 +1405,12 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault); | |||
1413 | 1405 | ||
1414 | static void free_mmu_pages(struct kvm_vcpu *vcpu) | 1406 | static void free_mmu_pages(struct kvm_vcpu *vcpu) |
1415 | { | 1407 | { |
1416 | struct kvm_mmu_page *page; | 1408 | struct kvm_mmu_page *sp; |
1417 | 1409 | ||
1418 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { | 1410 | while (!list_empty(&vcpu->kvm->active_mmu_pages)) { |
1419 | page = container_of(vcpu->kvm->active_mmu_pages.next, | 1411 | sp = container_of(vcpu->kvm->active_mmu_pages.next, |
1420 | struct kvm_mmu_page, link); | 1412 | struct kvm_mmu_page, link); |
1421 | kvm_mmu_zap_page(vcpu->kvm, page); | 1413 | kvm_mmu_zap_page(vcpu->kvm, sp); |
1422 | } | 1414 | } |
1423 | free_page((unsigned long)vcpu->mmu.pae_root); | 1415 | free_page((unsigned long)vcpu->mmu.pae_root); |
1424 | } | 1416 | } |
@@ -1480,16 +1472,16 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu) | |||
1480 | 1472 | ||
1481 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | 1473 | void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) |
1482 | { | 1474 | { |
1483 | struct kvm_mmu_page *page; | 1475 | struct kvm_mmu_page *sp; |
1484 | 1476 | ||
1485 | list_for_each_entry(page, &kvm->active_mmu_pages, link) { | 1477 | list_for_each_entry(sp, &kvm->active_mmu_pages, link) { |
1486 | int i; | 1478 | int i; |
1487 | u64 *pt; | 1479 | u64 *pt; |
1488 | 1480 | ||
1489 | if (!test_bit(slot, &page->slot_bitmap)) | 1481 | if (!test_bit(slot, &sp->slot_bitmap)) |
1490 | continue; | 1482 | continue; |
1491 | 1483 | ||
1492 | pt = page->spt; | 1484 | pt = sp->spt; |
1493 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) | 1485 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) |
1494 | /* avoid RMW */ | 1486 | /* avoid RMW */ |
1495 | if (pt[i] & PT_WRITABLE_MASK) | 1487 | if (pt[i] & PT_WRITABLE_MASK) |
@@ -1499,10 +1491,10 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | |||
1499 | 1491 | ||
1500 | void kvm_mmu_zap_all(struct kvm *kvm) | 1492 | void kvm_mmu_zap_all(struct kvm *kvm) |
1501 | { | 1493 | { |
1502 | struct kvm_mmu_page *page, *node; | 1494 | struct kvm_mmu_page *sp, *node; |
1503 | 1495 | ||
1504 | list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link) | 1496 | list_for_each_entry_safe(sp, node, &kvm->active_mmu_pages, link) |
1505 | kvm_mmu_zap_page(kvm, page); | 1497 | kvm_mmu_zap_page(kvm, sp); |
1506 | 1498 | ||
1507 | kvm_flush_remote_tlbs(kvm); | 1499 | kvm_flush_remote_tlbs(kvm); |
1508 | } | 1500 | } |
@@ -1668,13 +1660,13 @@ static int count_rmaps(struct kvm_vcpu *vcpu) | |||
1668 | static int count_writable_mappings(struct kvm_vcpu *vcpu) | 1660 | static int count_writable_mappings(struct kvm_vcpu *vcpu) |
1669 | { | 1661 | { |
1670 | int nmaps = 0; | 1662 | int nmaps = 0; |
1671 | struct kvm_mmu_page *page; | 1663 | struct kvm_mmu_page *sp; |
1672 | int i; | 1664 | int i; |
1673 | 1665 | ||
1674 | list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { | 1666 | list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) { |
1675 | u64 *pt = page->spt; | 1667 | u64 *pt = sp->spt; |
1676 | 1668 | ||
1677 | if (page->role.level != PT_PAGE_TABLE_LEVEL) | 1669 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) |
1678 | continue; | 1670 | continue; |
1679 | 1671 | ||
1680 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | 1672 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
@@ -1702,23 +1694,23 @@ static void audit_rmap(struct kvm_vcpu *vcpu) | |||
1702 | 1694 | ||
1703 | static void audit_write_protection(struct kvm_vcpu *vcpu) | 1695 | static void audit_write_protection(struct kvm_vcpu *vcpu) |
1704 | { | 1696 | { |
1705 | struct kvm_mmu_page *page; | 1697 | struct kvm_mmu_page *sp; |
1706 | struct kvm_memory_slot *slot; | 1698 | struct kvm_memory_slot *slot; |
1707 | unsigned long *rmapp; | 1699 | unsigned long *rmapp; |
1708 | gfn_t gfn; | 1700 | gfn_t gfn; |
1709 | 1701 | ||
1710 | list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) { | 1702 | list_for_each_entry(sp, &vcpu->kvm->active_mmu_pages, link) { |
1711 | if (page->role.metaphysical) | 1703 | if (sp->role.metaphysical) |
1712 | continue; | 1704 | continue; |
1713 | 1705 | ||
1714 | slot = gfn_to_memslot(vcpu->kvm, page->gfn); | 1706 | slot = gfn_to_memslot(vcpu->kvm, sp->gfn); |
1715 | gfn = unalias_gfn(vcpu->kvm, page->gfn); | 1707 | gfn = unalias_gfn(vcpu->kvm, sp->gfn); |
1716 | rmapp = &slot->rmap[gfn - slot->base_gfn]; | 1708 | rmapp = &slot->rmap[gfn - slot->base_gfn]; |
1717 | if (*rmapp) | 1709 | if (*rmapp) |
1718 | printk(KERN_ERR "%s: (%s) shadow page has writable" | 1710 | printk(KERN_ERR "%s: (%s) shadow page has writable" |
1719 | " mappings: gfn %lx role %x\n", | 1711 | " mappings: gfn %lx role %x\n", |
1720 | __FUNCTION__, audit_msg, page->gfn, | 1712 | __FUNCTION__, audit_msg, sp->gfn, |
1721 | page->role.word); | 1713 | sp->role.word); |
1722 | } | 1714 | } |
1723 | } | 1715 | } |
1724 | 1716 | ||