diff options
author | Changbin Du <changbin.du@intel.com> | 2018-01-30 06:19:49 -0500 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2018-03-06 00:19:18 -0500 |
commit | 44b467338094d86586d3ec351d8594a6cef0842a (patch) | |
tree | 9f1bdc0ee3d4791c2caf3798adcdc1521c0f02a7 | |
parent | 72f03d7ea16794c3ac4b7ae945510cf0015d3c3c (diff) |
drm/i915/gvt: Rework shadow page management code
This is a another big one and the GVT shadow page management code is
heavily refined.
The new code only use struct intel_vgpu_ppgtt_spt to represent a vgpu
shadow page table - w/ or wo/ a guest page associated with. A pure shadow
page (no guest page associated) will be used to shadow splited 2M huge
gtt. In this case, the spt.guest_page.gfn should be a zero.
To search a existed shadow page table, we have two new interfaces:
- intel_vgpu_find_spt_by_gfn(), find a spt by guest gfn. It must not
be a pure spt.
- intel_vgpu_find_spt_by_mfn, Find the spt using shadow page mfn in
shadowed PTE.
The oos_page management is remained as what is was.
v2: Split some changes into small standalone patches.
Signed-off-by: Changbin Du <changbin.du@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 470 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.h | 51 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/trace.h | 2 |
3 files changed, 203 insertions, 320 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 7b4a345a0d52..2189c45d44fc 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -593,11 +593,11 @@ static inline int ppgtt_spt_set_entry( | |||
593 | 593 | ||
594 | #define ppgtt_get_guest_entry(spt, e, index) \ | 594 | #define ppgtt_get_guest_entry(spt, e, index) \ |
595 | ppgtt_spt_get_entry(spt, NULL, \ | 595 | ppgtt_spt_get_entry(spt, NULL, \ |
596 | spt->guest_page_type, e, index, true) | 596 | spt->guest_page.type, e, index, true) |
597 | 597 | ||
598 | #define ppgtt_set_guest_entry(spt, e, index) \ | 598 | #define ppgtt_set_guest_entry(spt, e, index) \ |
599 | ppgtt_spt_set_entry(spt, NULL, \ | 599 | ppgtt_spt_set_entry(spt, NULL, \ |
600 | spt->guest_page_type, e, index, true) | 600 | spt->guest_page.type, e, index, true) |
601 | 601 | ||
602 | #define ppgtt_get_shadow_entry(spt, e, index) \ | 602 | #define ppgtt_get_shadow_entry(spt, e, index) \ |
603 | ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ | 603 | ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \ |
@@ -607,52 +607,29 @@ static inline int ppgtt_spt_set_entry( | |||
607 | ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ | 607 | ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ |
608 | spt->shadow_page.type, e, index, false) | 608 | spt->shadow_page.type, e, index, false) |
609 | 609 | ||
610 | /** | 610 | #define page_track_to_ppgtt_spt(ptr) \ |
611 | * intel_vgpu_init_page_track - init a page track data structure | 611 | container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page.track) |
612 | * @vgpu: a vGPU | 612 | |
613 | * @t: a page track data structure | 613 | static void *alloc_spt(gfp_t gfp_mask) |
614 | * @gfn: guest memory page frame number | ||
615 | * @handler: the function will be called when target guest memory page has | ||
616 | * been modified. | ||
617 | * | ||
618 | * This function is called when a user wants to prepare a page track data | ||
619 | * structure to track a guest memory page. | ||
620 | * | ||
621 | * Returns: | ||
622 | * Zero on success, negative error code if failed. | ||
623 | */ | ||
624 | int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, | ||
625 | struct intel_vgpu_page_track *t, | ||
626 | unsigned long gfn, | ||
627 | int (*handler)(void *, u64, void *, int), | ||
628 | void *data) | ||
629 | { | 614 | { |
630 | INIT_HLIST_NODE(&t->node); | 615 | struct intel_vgpu_ppgtt_spt *spt; |
631 | 616 | ||
632 | t->tracked = false; | 617 | spt = kzalloc(sizeof(*spt), gfp_mask); |
633 | t->gfn = gfn; | 618 | if (!spt) |
634 | t->handler = handler; | 619 | return NULL; |
635 | t->data = data; | ||
636 | 620 | ||
637 | hash_add(vgpu->gtt.tracked_guest_page_hash_table, &t->node, t->gfn); | 621 | spt->shadow_page.page = alloc_page(gfp_mask); |
638 | return 0; | 622 | if (!spt->shadow_page.page) { |
623 | kfree(spt); | ||
624 | return NULL; | ||
625 | } | ||
626 | return spt; | ||
639 | } | 627 | } |
640 | 628 | ||
641 | /** | 629 | static void free_spt(struct intel_vgpu_ppgtt_spt *spt) |
642 | * intel_vgpu_clean_page_track - release a page track data structure | ||
643 | * @vgpu: a vGPU | ||
644 | * @t: a page track data structure | ||
645 | * | ||
646 | * This function is called before a user frees a page track data structure. | ||
647 | */ | ||
648 | void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, | ||
649 | struct intel_vgpu_page_track *t) | ||
650 | { | 630 | { |
651 | if (!hlist_unhashed(&t->node)) | 631 | __free_page(spt->shadow_page.page); |
652 | hash_del(&t->node); | 632 | kfree(spt); |
653 | |||
654 | if (t->tracked) | ||
655 | intel_gvt_hypervisor_disable_page_track(vgpu, t); | ||
656 | } | 633 | } |
657 | 634 | ||
658 | /** | 635 | /** |
@@ -679,139 +656,53 @@ struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( | |||
679 | return NULL; | 656 | return NULL; |
680 | } | 657 | } |
681 | 658 | ||
682 | static int init_guest_page(struct intel_vgpu *vgpu, | ||
683 | struct intel_vgpu_guest_page *p, | ||
684 | unsigned long gfn, | ||
685 | int (*handler)(void *, u64, void *, int), | ||
686 | void *data) | ||
687 | { | ||
688 | p->oos_page = NULL; | ||
689 | p->write_cnt = 0; | ||
690 | |||
691 | return intel_vgpu_init_page_track(vgpu, &p->track, gfn, handler, data); | ||
692 | } | ||
693 | |||
694 | static int detach_oos_page(struct intel_vgpu *vgpu, | 659 | static int detach_oos_page(struct intel_vgpu *vgpu, |
695 | struct intel_vgpu_oos_page *oos_page); | 660 | struct intel_vgpu_oos_page *oos_page); |
696 | 661 | ||
697 | static void clean_guest_page(struct intel_vgpu *vgpu, | 662 | static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) |
698 | struct intel_vgpu_guest_page *p) | ||
699 | { | ||
700 | if (p->oos_page) | ||
701 | detach_oos_page(vgpu, p->oos_page); | ||
702 | |||
703 | intel_vgpu_clean_page_track(vgpu, &p->track); | ||
704 | } | ||
705 | |||
706 | static inline int init_shadow_page(struct intel_vgpu *vgpu, | ||
707 | struct intel_vgpu_shadow_page *p, int type, bool hash) | ||
708 | { | ||
709 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
710 | dma_addr_t daddr; | ||
711 | |||
712 | daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | ||
713 | if (dma_mapping_error(kdev, daddr)) { | ||
714 | gvt_vgpu_err("fail to map dma addr\n"); | ||
715 | return -EINVAL; | ||
716 | } | ||
717 | |||
718 | p->vaddr = page_address(p->page); | ||
719 | p->type = type; | ||
720 | |||
721 | INIT_HLIST_NODE(&p->node); | ||
722 | |||
723 | p->mfn = daddr >> I915_GTT_PAGE_SHIFT; | ||
724 | if (hash) | ||
725 | hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); | ||
726 | return 0; | ||
727 | } | ||
728 | |||
729 | static inline void clean_shadow_page(struct intel_vgpu *vgpu, | ||
730 | struct intel_vgpu_shadow_page *p) | ||
731 | { | ||
732 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
733 | |||
734 | dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096, | ||
735 | PCI_DMA_BIDIRECTIONAL); | ||
736 | |||
737 | if (!hlist_unhashed(&p->node)) | ||
738 | hash_del(&p->node); | ||
739 | } | ||
740 | |||
741 | static inline struct intel_vgpu_shadow_page *find_shadow_page( | ||
742 | struct intel_vgpu *vgpu, unsigned long mfn) | ||
743 | { | 663 | { |
744 | struct intel_vgpu_shadow_page *p; | 664 | struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev; |
745 | |||
746 | hash_for_each_possible(vgpu->gtt.shadow_page_hash_table, | ||
747 | p, node, mfn) { | ||
748 | if (p->mfn == mfn) | ||
749 | return p; | ||
750 | } | ||
751 | return NULL; | ||
752 | } | ||
753 | 665 | ||
754 | #define page_track_to_guest_page(ptr) \ | 666 | trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); |
755 | container_of(ptr, struct intel_vgpu_guest_page, track) | ||
756 | 667 | ||
757 | #define guest_page_to_ppgtt_spt(ptr) \ | 668 | dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, |
758 | container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page) | 669 | PCI_DMA_BIDIRECTIONAL); |
670 | if (!hlist_unhashed(&spt->node)) | ||
671 | hash_del(&spt->node); | ||
759 | 672 | ||
760 | #define shadow_page_to_ppgtt_spt(ptr) \ | 673 | if (spt->guest_page.oos_page) |
761 | container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page) | 674 | detach_oos_page(spt->vgpu, spt->guest_page.oos_page); |
762 | 675 | ||
763 | static void *alloc_spt(gfp_t gfp_mask) | 676 | if (!hlist_unhashed(&spt->guest_page.track.node)) |
764 | { | 677 | hash_del(&spt->guest_page.track.node); |
765 | struct intel_vgpu_ppgtt_spt *spt; | ||
766 | 678 | ||
767 | spt = kzalloc(sizeof(*spt), gfp_mask); | 679 | if (spt->guest_page.track.tracked) |
768 | if (!spt) | 680 | intel_gvt_hypervisor_disable_page_track(spt->vgpu, |
769 | return NULL; | 681 | &spt->guest_page.track); |
770 | 682 | ||
771 | spt->shadow_page.page = alloc_page(gfp_mask); | ||
772 | if (!spt->shadow_page.page) { | ||
773 | kfree(spt); | ||
774 | return NULL; | ||
775 | } | ||
776 | return spt; | ||
777 | } | ||
778 | |||
779 | static void free_spt(struct intel_vgpu_ppgtt_spt *spt) | ||
780 | { | ||
781 | __free_page(spt->shadow_page.page); | ||
782 | kfree(spt); | ||
783 | } | ||
784 | |||
785 | static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | ||
786 | { | ||
787 | trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); | ||
788 | |||
789 | clean_shadow_page(spt->vgpu, &spt->shadow_page); | ||
790 | clean_guest_page(spt->vgpu, &spt->guest_page); | ||
791 | list_del_init(&spt->post_shadow_list); | 683 | list_del_init(&spt->post_shadow_list); |
792 | |||
793 | free_spt(spt); | 684 | free_spt(spt); |
794 | } | 685 | } |
795 | 686 | ||
796 | static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu) | 687 | static void ppgtt_free_all_shadow_page(struct intel_vgpu *vgpu) |
797 | { | 688 | { |
798 | struct hlist_node *n; | 689 | struct hlist_node *n; |
799 | struct intel_vgpu_shadow_page *sp; | 690 | struct intel_vgpu_ppgtt_spt *spt; |
800 | int i; | 691 | int i; |
801 | 692 | ||
802 | hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, sp, node) | 693 | hash_for_each_safe(vgpu->gtt.shadow_page_hash_table, i, n, spt, node) |
803 | ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp)); | 694 | ppgtt_free_shadow_page(spt); |
804 | } | 695 | } |
805 | 696 | ||
806 | static int ppgtt_handle_guest_write_page_table_bytes( | 697 | static int ppgtt_handle_guest_write_page_table_bytes( |
807 | struct intel_vgpu_guest_page *gpt, | 698 | struct intel_vgpu_ppgtt_spt *spt, |
808 | u64 pa, void *p_data, int bytes); | 699 | u64 pa, void *p_data, int bytes); |
809 | 700 | ||
810 | static int ppgtt_write_protection_handler(void *data, u64 pa, | 701 | static int ppgtt_write_protection_handler(void *data, u64 pa, |
811 | void *p_data, int bytes) | 702 | void *p_data, int bytes) |
812 | { | 703 | { |
813 | struct intel_vgpu_page_track *t = data; | 704 | struct intel_vgpu_page_track *t = data; |
814 | struct intel_vgpu_guest_page *p = page_track_to_guest_page(t); | 705 | struct intel_vgpu_ppgtt_spt *spt = page_track_to_ppgtt_spt(t); |
815 | int ret; | 706 | int ret; |
816 | 707 | ||
817 | if (bytes != 4 && bytes != 8) | 708 | if (bytes != 4 && bytes != 8) |
@@ -820,20 +711,47 @@ static int ppgtt_write_protection_handler(void *data, u64 pa, | |||
820 | if (!t->tracked) | 711 | if (!t->tracked) |
821 | return -EINVAL; | 712 | return -EINVAL; |
822 | 713 | ||
823 | ret = ppgtt_handle_guest_write_page_table_bytes(p, | 714 | ret = ppgtt_handle_guest_write_page_table_bytes(spt, |
824 | pa, p_data, bytes); | 715 | pa, p_data, bytes); |
825 | if (ret) | 716 | if (ret) |
826 | return ret; | 717 | return ret; |
827 | return ret; | 718 | return ret; |
828 | } | 719 | } |
829 | 720 | ||
721 | /* Find a spt by guest gfn. */ | ||
722 | static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn( | ||
723 | struct intel_vgpu *vgpu, unsigned long gfn) | ||
724 | { | ||
725 | struct intel_vgpu_page_track *track; | ||
726 | |||
727 | track = intel_vgpu_find_tracked_page(vgpu, gfn); | ||
728 | if (track) | ||
729 | return page_track_to_ppgtt_spt(track); | ||
730 | |||
731 | return NULL; | ||
732 | } | ||
733 | |||
734 | /* Find the spt by shadow page mfn. */ | ||
735 | static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( | ||
736 | struct intel_vgpu *vgpu, unsigned long mfn) | ||
737 | { | ||
738 | struct intel_vgpu_ppgtt_spt *spt; | ||
739 | |||
740 | hash_for_each_possible(vgpu->gtt.shadow_page_hash_table, spt, node, mfn) { | ||
741 | if (spt->shadow_page.mfn == mfn) | ||
742 | return spt; | ||
743 | } | ||
744 | return NULL; | ||
745 | } | ||
746 | |||
830 | static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); | 747 | static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); |
831 | 748 | ||
832 | static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( | 749 | static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_shadow_page( |
833 | struct intel_vgpu *vgpu, int type, unsigned long gfn) | 750 | struct intel_vgpu *vgpu, int type, unsigned long gfn) |
834 | { | 751 | { |
752 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
835 | struct intel_vgpu_ppgtt_spt *spt = NULL; | 753 | struct intel_vgpu_ppgtt_spt *spt = NULL; |
836 | int ret; | 754 | dma_addr_t daddr; |
837 | 755 | ||
838 | retry: | 756 | retry: |
839 | spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); | 757 | spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); |
@@ -846,44 +764,39 @@ retry: | |||
846 | } | 764 | } |
847 | 765 | ||
848 | spt->vgpu = vgpu; | 766 | spt->vgpu = vgpu; |
849 | spt->guest_page_type = type; | ||
850 | atomic_set(&spt->refcount, 1); | 767 | atomic_set(&spt->refcount, 1); |
851 | INIT_LIST_HEAD(&spt->post_shadow_list); | 768 | INIT_LIST_HEAD(&spt->post_shadow_list); |
852 | 769 | ||
853 | /* | 770 | /* |
854 | * TODO: guest page type may be different with shadow page type, | 771 | * Init shadow_page. |
855 | * when we support PSE page in future. | ||
856 | */ | 772 | */ |
857 | ret = init_shadow_page(vgpu, &spt->shadow_page, type, true); | 773 | spt->shadow_page.type = type; |
858 | if (ret) { | 774 | daddr = dma_map_page(kdev, spt->shadow_page.page, |
859 | gvt_vgpu_err("fail to initialize shadow page for spt\n"); | 775 | 0, 4096, PCI_DMA_BIDIRECTIONAL); |
860 | goto err; | 776 | if (dma_mapping_error(kdev, daddr)) { |
861 | } | 777 | gvt_vgpu_err("fail to map dma addr\n"); |
862 | 778 | free_spt(spt); | |
863 | ret = init_guest_page(vgpu, &spt->guest_page, | 779 | return ERR_PTR(-EINVAL); |
864 | gfn, ppgtt_write_protection_handler, NULL); | ||
865 | if (ret) { | ||
866 | gvt_vgpu_err("fail to initialize guest page for spt\n"); | ||
867 | goto err; | ||
868 | } | 780 | } |
781 | spt->shadow_page.vaddr = page_address(spt->shadow_page.page); | ||
782 | spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; | ||
869 | 783 | ||
870 | trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); | 784 | /* |
871 | return spt; | 785 | * Init guest_page. |
872 | err: | 786 | */ |
873 | ppgtt_free_shadow_page(spt); | 787 | spt->guest_page.type = type; |
874 | return ERR_PTR(ret); | 788 | spt->guest_page.gfn = gfn; |
875 | } | ||
876 | 789 | ||
877 | static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( | 790 | spt->guest_page.track.gfn = gfn; |
878 | struct intel_vgpu *vgpu, unsigned long mfn) | 791 | spt->guest_page.track.handler = ppgtt_write_protection_handler; |
879 | { | 792 | hash_add(vgpu->gtt.tracked_guest_page_hash_table, |
880 | struct intel_vgpu_shadow_page *p = find_shadow_page(vgpu, mfn); | 793 | &spt->guest_page.track.node, gfn); |
881 | 794 | ||
882 | if (p) | 795 | INIT_HLIST_NODE(&spt->node); |
883 | return shadow_page_to_ppgtt_spt(p); | 796 | hash_add(vgpu->gtt.shadow_page_hash_table, &spt->node, spt->shadow_page.mfn); |
884 | 797 | ||
885 | gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn); | 798 | trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); |
886 | return NULL; | 799 | return spt; |
887 | } | 800 | } |
888 | 801 | ||
889 | #define pt_entry_size_shift(spt) \ | 802 | #define pt_entry_size_shift(spt) \ |
@@ -929,7 +842,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, | |||
929 | vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) | 842 | vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) |
930 | return 0; | 843 | return 0; |
931 | } | 844 | } |
932 | s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); | 845 | s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); |
933 | if (!s) { | 846 | if (!s) { |
934 | gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", | 847 | gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", |
935 | ops->get_pfn(e)); | 848 | ops->get_pfn(e)); |
@@ -947,7 +860,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
947 | int v = atomic_read(&spt->refcount); | 860 | int v = atomic_read(&spt->refcount); |
948 | 861 | ||
949 | trace_spt_change(spt->vgpu->id, "die", spt, | 862 | trace_spt_change(spt->vgpu->id, "die", spt, |
950 | spt->guest_page.track.gfn, spt->shadow_page.type); | 863 | spt->guest_page.gfn, spt->shadow_page.type); |
951 | 864 | ||
952 | trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); | 865 | trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); |
953 | 866 | ||
@@ -981,7 +894,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
981 | } | 894 | } |
982 | release: | 895 | release: |
983 | trace_spt_change(spt->vgpu->id, "release", spt, | 896 | trace_spt_change(spt->vgpu->id, "release", spt, |
984 | spt->guest_page.track.gfn, spt->shadow_page.type); | 897 | spt->guest_page.gfn, spt->shadow_page.type); |
985 | ppgtt_free_shadow_page(spt); | 898 | ppgtt_free_shadow_page(spt); |
986 | return 0; | 899 | return 0; |
987 | fail: | 900 | fail: |
@@ -996,43 +909,38 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( | |||
996 | struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) | 909 | struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) |
997 | { | 910 | { |
998 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; | 911 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
999 | struct intel_vgpu_ppgtt_spt *s = NULL; | 912 | struct intel_vgpu_ppgtt_spt *spt = NULL; |
1000 | struct intel_vgpu_guest_page *g; | ||
1001 | struct intel_vgpu_page_track *t; | ||
1002 | int ret; | 913 | int ret; |
1003 | 914 | ||
1004 | GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); | 915 | GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); |
1005 | 916 | ||
1006 | t = intel_vgpu_find_tracked_page(vgpu, ops->get_pfn(we)); | 917 | spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); |
1007 | if (t) { | 918 | if (spt) |
1008 | g = page_track_to_guest_page(t); | 919 | ppgtt_get_shadow_page(spt); |
1009 | s = guest_page_to_ppgtt_spt(g); | 920 | else { |
1010 | ppgtt_get_shadow_page(s); | ||
1011 | } else { | ||
1012 | int type = get_next_pt_type(we->type); | 921 | int type = get_next_pt_type(we->type); |
1013 | 922 | ||
1014 | s = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we)); | 923 | spt = ppgtt_alloc_shadow_page(vgpu, type, ops->get_pfn(we)); |
1015 | if (IS_ERR(s)) { | 924 | if (IS_ERR(spt)) { |
1016 | ret = PTR_ERR(s); | 925 | ret = PTR_ERR(spt); |
1017 | goto fail; | 926 | goto fail; |
1018 | } | 927 | } |
1019 | 928 | ||
1020 | ret = intel_gvt_hypervisor_enable_page_track(vgpu, | 929 | ret = intel_gvt_hypervisor_enable_page_track(vgpu, &spt->guest_page.track); |
1021 | &s->guest_page.track); | ||
1022 | if (ret) | 930 | if (ret) |
1023 | goto fail; | 931 | goto fail; |
1024 | 932 | ||
1025 | ret = ppgtt_populate_shadow_page(s); | 933 | ret = ppgtt_populate_shadow_page(spt); |
1026 | if (ret) | 934 | if (ret) |
1027 | goto fail; | 935 | goto fail; |
1028 | 936 | ||
1029 | trace_spt_change(vgpu->id, "new", s, s->guest_page.track.gfn, | 937 | trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, |
1030 | s->shadow_page.type); | 938 | spt->shadow_page.type); |
1031 | } | 939 | } |
1032 | return s; | 940 | return spt; |
1033 | fail: | 941 | fail: |
1034 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", | 942 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", |
1035 | s, we->val64, we->type); | 943 | spt, we->val64, we->type); |
1036 | return ERR_PTR(ret); | 944 | return ERR_PTR(ret); |
1037 | } | 945 | } |
1038 | 946 | ||
@@ -1097,8 +1005,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
1097 | 1005 | ||
1098 | for_each_present_guest_entry(spt, &ge, i) { | 1006 | for_each_present_guest_entry(spt, &ge, i) { |
1099 | if (gtt_type_is_pt(get_next_pt_type(ge.type))) { | 1007 | if (gtt_type_is_pt(get_next_pt_type(ge.type))) { |
1100 | s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, | 1008 | s = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); |
1101 | &ge); | ||
1102 | if (IS_ERR(s)) { | 1009 | if (IS_ERR(s)) { |
1103 | ret = PTR_ERR(s); | 1010 | ret = PTR_ERR(s); |
1104 | goto fail; | 1011 | goto fail; |
@@ -1126,17 +1033,15 @@ fail: | |||
1126 | return ret; | 1033 | return ret; |
1127 | } | 1034 | } |
1128 | 1035 | ||
1129 | static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, | 1036 | static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, |
1130 | struct intel_gvt_gtt_entry *se, unsigned long index) | 1037 | struct intel_gvt_gtt_entry *se, unsigned long index) |
1131 | { | 1038 | { |
1132 | struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); | ||
1133 | struct intel_vgpu_shadow_page *sp = &spt->shadow_page; | ||
1134 | struct intel_vgpu *vgpu = spt->vgpu; | 1039 | struct intel_vgpu *vgpu = spt->vgpu; |
1135 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; | 1040 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
1136 | int ret; | 1041 | int ret; |
1137 | 1042 | ||
1138 | trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64, | 1043 | trace_spt_guest_change(spt->vgpu->id, "remove", spt, |
1139 | index); | 1044 | spt->shadow_page.type, se->val64, index); |
1140 | 1045 | ||
1141 | gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n", | 1046 | gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n", |
1142 | se->type, index, se->val64); | 1047 | se->type, index, se->val64); |
@@ -1144,12 +1049,13 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, | |||
1144 | if (!ops->test_present(se)) | 1049 | if (!ops->test_present(se)) |
1145 | return 0; | 1050 | return 0; |
1146 | 1051 | ||
1147 | if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn) | 1052 | if (ops->get_pfn(se) == |
1053 | vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn) | ||
1148 | return 0; | 1054 | return 0; |
1149 | 1055 | ||
1150 | if (gtt_type_is_pt(get_next_pt_type(se->type))) { | 1056 | if (gtt_type_is_pt(get_next_pt_type(se->type))) { |
1151 | struct intel_vgpu_ppgtt_spt *s = | 1057 | struct intel_vgpu_ppgtt_spt *s = |
1152 | ppgtt_find_shadow_page(vgpu, ops->get_pfn(se)); | 1058 | intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se)); |
1153 | if (!s) { | 1059 | if (!s) { |
1154 | gvt_vgpu_err("fail to find guest page\n"); | 1060 | gvt_vgpu_err("fail to find guest page\n"); |
1155 | ret = -ENXIO; | 1061 | ret = -ENXIO; |
@@ -1166,18 +1072,16 @@ fail: | |||
1166 | return ret; | 1072 | return ret; |
1167 | } | 1073 | } |
1168 | 1074 | ||
1169 | static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt, | 1075 | static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt, |
1170 | struct intel_gvt_gtt_entry *we, unsigned long index) | 1076 | struct intel_gvt_gtt_entry *we, unsigned long index) |
1171 | { | 1077 | { |
1172 | struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); | ||
1173 | struct intel_vgpu_shadow_page *sp = &spt->shadow_page; | ||
1174 | struct intel_vgpu *vgpu = spt->vgpu; | 1078 | struct intel_vgpu *vgpu = spt->vgpu; |
1175 | struct intel_gvt_gtt_entry m; | 1079 | struct intel_gvt_gtt_entry m; |
1176 | struct intel_vgpu_ppgtt_spt *s; | 1080 | struct intel_vgpu_ppgtt_spt *s; |
1177 | int ret; | 1081 | int ret; |
1178 | 1082 | ||
1179 | trace_gpt_change(spt->vgpu->id, "add", spt, sp->type, | 1083 | trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type, |
1180 | we->val64, index); | 1084 | we->val64, index); |
1181 | 1085 | ||
1182 | gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n", | 1086 | gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n", |
1183 | we->type, index, we->val64); | 1087 | we->type, index, we->val64); |
@@ -1209,30 +1113,29 @@ static int sync_oos_page(struct intel_vgpu *vgpu, | |||
1209 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; | 1113 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; |
1210 | struct intel_gvt *gvt = vgpu->gvt; | 1114 | struct intel_gvt *gvt = vgpu->gvt; |
1211 | struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; | 1115 | struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; |
1212 | struct intel_vgpu_ppgtt_spt *spt = | 1116 | struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; |
1213 | guest_page_to_ppgtt_spt(oos_page->guest_page); | ||
1214 | struct intel_gvt_gtt_entry old, new; | 1117 | struct intel_gvt_gtt_entry old, new; |
1215 | int index; | 1118 | int index; |
1216 | int ret; | 1119 | int ret; |
1217 | 1120 | ||
1218 | trace_oos_change(vgpu->id, "sync", oos_page->id, | 1121 | trace_oos_change(vgpu->id, "sync", oos_page->id, |
1219 | oos_page->guest_page, spt->guest_page_type); | 1122 | spt, spt->guest_page.type); |
1220 | 1123 | ||
1221 | old.type = new.type = get_entry_type(spt->guest_page_type); | 1124 | old.type = new.type = get_entry_type(spt->guest_page.type); |
1222 | old.val64 = new.val64 = 0; | 1125 | old.val64 = new.val64 = 0; |
1223 | 1126 | ||
1224 | for (index = 0; index < (I915_GTT_PAGE_SIZE >> | 1127 | for (index = 0; index < (I915_GTT_PAGE_SIZE >> |
1225 | info->gtt_entry_size_shift); index++) { | 1128 | info->gtt_entry_size_shift); index++) { |
1226 | ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); | 1129 | ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); |
1227 | ops->get_entry(NULL, &new, index, true, | 1130 | ops->get_entry(NULL, &new, index, true, |
1228 | oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu); | 1131 | spt->guest_page.gfn << PAGE_SHIFT, vgpu); |
1229 | 1132 | ||
1230 | if (old.val64 == new.val64 | 1133 | if (old.val64 == new.val64 |
1231 | && !test_and_clear_bit(index, spt->post_shadow_bitmap)) | 1134 | && !test_and_clear_bit(index, spt->post_shadow_bitmap)) |
1232 | continue; | 1135 | continue; |
1233 | 1136 | ||
1234 | trace_oos_sync(vgpu->id, oos_page->id, | 1137 | trace_oos_sync(vgpu->id, oos_page->id, |
1235 | oos_page->guest_page, spt->guest_page_type, | 1138 | spt, spt->guest_page.type, |
1236 | new.val64, index); | 1139 | new.val64, index); |
1237 | 1140 | ||
1238 | ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new); | 1141 | ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new); |
@@ -1242,7 +1145,7 @@ static int sync_oos_page(struct intel_vgpu *vgpu, | |||
1242 | ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); | 1145 | ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); |
1243 | } | 1146 | } |
1244 | 1147 | ||
1245 | oos_page->guest_page->write_cnt = 0; | 1148 | spt->guest_page.write_cnt = 0; |
1246 | list_del_init(&spt->post_shadow_list); | 1149 | list_del_init(&spt->post_shadow_list); |
1247 | return 0; | 1150 | return 0; |
1248 | } | 1151 | } |
@@ -1251,15 +1154,14 @@ static int detach_oos_page(struct intel_vgpu *vgpu, | |||
1251 | struct intel_vgpu_oos_page *oos_page) | 1154 | struct intel_vgpu_oos_page *oos_page) |
1252 | { | 1155 | { |
1253 | struct intel_gvt *gvt = vgpu->gvt; | 1156 | struct intel_gvt *gvt = vgpu->gvt; |
1254 | struct intel_vgpu_ppgtt_spt *spt = | 1157 | struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; |
1255 | guest_page_to_ppgtt_spt(oos_page->guest_page); | ||
1256 | 1158 | ||
1257 | trace_oos_change(vgpu->id, "detach", oos_page->id, | 1159 | trace_oos_change(vgpu->id, "detach", oos_page->id, |
1258 | oos_page->guest_page, spt->guest_page_type); | 1160 | spt, spt->guest_page.type); |
1259 | 1161 | ||
1260 | oos_page->guest_page->write_cnt = 0; | 1162 | spt->guest_page.write_cnt = 0; |
1261 | oos_page->guest_page->oos_page = NULL; | 1163 | spt->guest_page.oos_page = NULL; |
1262 | oos_page->guest_page = NULL; | 1164 | oos_page->spt = NULL; |
1263 | 1165 | ||
1264 | list_del_init(&oos_page->vm_list); | 1166 | list_del_init(&oos_page->vm_list); |
1265 | list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); | 1167 | list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); |
@@ -1267,51 +1169,49 @@ static int detach_oos_page(struct intel_vgpu *vgpu, | |||
1267 | return 0; | 1169 | return 0; |
1268 | } | 1170 | } |
1269 | 1171 | ||
1270 | static int attach_oos_page(struct intel_vgpu *vgpu, | 1172 | static int attach_oos_page(struct intel_vgpu_oos_page *oos_page, |
1271 | struct intel_vgpu_oos_page *oos_page, | 1173 | struct intel_vgpu_ppgtt_spt *spt) |
1272 | struct intel_vgpu_guest_page *gpt) | ||
1273 | { | 1174 | { |
1274 | struct intel_gvt *gvt = vgpu->gvt; | 1175 | struct intel_gvt *gvt = spt->vgpu->gvt; |
1275 | int ret; | 1176 | int ret; |
1276 | 1177 | ||
1277 | ret = intel_gvt_hypervisor_read_gpa(vgpu, | 1178 | ret = intel_gvt_hypervisor_read_gpa(spt->vgpu, |
1278 | gpt->track.gfn << I915_GTT_PAGE_SHIFT, | 1179 | spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, |
1279 | oos_page->mem, I915_GTT_PAGE_SIZE); | 1180 | oos_page->mem, I915_GTT_PAGE_SIZE); |
1280 | if (ret) | 1181 | if (ret) |
1281 | return ret; | 1182 | return ret; |
1282 | 1183 | ||
1283 | oos_page->guest_page = gpt; | 1184 | oos_page->spt = spt; |
1284 | gpt->oos_page = oos_page; | 1185 | spt->guest_page.oos_page = oos_page; |
1285 | 1186 | ||
1286 | list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); | 1187 | list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); |
1287 | 1188 | ||
1288 | trace_oos_change(vgpu->id, "attach", gpt->oos_page->id, | 1189 | trace_oos_change(spt->vgpu->id, "attach", oos_page->id, |
1289 | gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); | 1190 | spt, spt->guest_page.type); |
1290 | return 0; | 1191 | return 0; |
1291 | } | 1192 | } |
1292 | 1193 | ||
1293 | static int ppgtt_set_guest_page_sync(struct intel_vgpu *vgpu, | 1194 | static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt) |
1294 | struct intel_vgpu_guest_page *gpt) | ||
1295 | { | 1195 | { |
1196 | struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; | ||
1296 | int ret; | 1197 | int ret; |
1297 | 1198 | ||
1298 | ret = intel_gvt_hypervisor_enable_page_track(vgpu, &gpt->track); | 1199 | ret = intel_gvt_hypervisor_enable_page_track(spt->vgpu, &spt->guest_page.track); |
1299 | if (ret) | 1200 | if (ret) |
1300 | return ret; | 1201 | return ret; |
1301 | 1202 | ||
1302 | trace_oos_change(vgpu->id, "set page sync", gpt->oos_page->id, | 1203 | trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id, |
1303 | gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); | 1204 | spt, spt->guest_page.type); |
1304 | 1205 | ||
1305 | list_del_init(&gpt->oos_page->vm_list); | 1206 | list_del_init(&oos_page->vm_list); |
1306 | return sync_oos_page(vgpu, gpt->oos_page); | 1207 | return sync_oos_page(spt->vgpu, oos_page); |
1307 | } | 1208 | } |
1308 | 1209 | ||
1309 | static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu, | 1210 | static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt) |
1310 | struct intel_vgpu_guest_page *gpt) | ||
1311 | { | 1211 | { |
1312 | struct intel_gvt *gvt = vgpu->gvt; | 1212 | struct intel_gvt *gvt = spt->vgpu->gvt; |
1313 | struct intel_gvt_gtt *gtt = &gvt->gtt; | 1213 | struct intel_gvt_gtt *gtt = &gvt->gtt; |
1314 | struct intel_vgpu_oos_page *oos_page = gpt->oos_page; | 1214 | struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; |
1315 | int ret; | 1215 | int ret; |
1316 | 1216 | ||
1317 | WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); | 1217 | WARN(oos_page, "shadow PPGTT page has already has a oos page\n"); |
@@ -1319,31 +1219,30 @@ static int ppgtt_allocate_oos_page(struct intel_vgpu *vgpu, | |||
1319 | if (list_empty(>t->oos_page_free_list_head)) { | 1219 | if (list_empty(>t->oos_page_free_list_head)) { |
1320 | oos_page = container_of(gtt->oos_page_use_list_head.next, | 1220 | oos_page = container_of(gtt->oos_page_use_list_head.next, |
1321 | struct intel_vgpu_oos_page, list); | 1221 | struct intel_vgpu_oos_page, list); |
1322 | ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page); | 1222 | ret = ppgtt_set_guest_page_sync(oos_page->spt); |
1323 | if (ret) | 1223 | if (ret) |
1324 | return ret; | 1224 | return ret; |
1325 | ret = detach_oos_page(vgpu, oos_page); | 1225 | ret = detach_oos_page(spt->vgpu, oos_page); |
1326 | if (ret) | 1226 | if (ret) |
1327 | return ret; | 1227 | return ret; |
1328 | } else | 1228 | } else |
1329 | oos_page = container_of(gtt->oos_page_free_list_head.next, | 1229 | oos_page = container_of(gtt->oos_page_free_list_head.next, |
1330 | struct intel_vgpu_oos_page, list); | 1230 | struct intel_vgpu_oos_page, list); |
1331 | return attach_oos_page(vgpu, oos_page, gpt); | 1231 | return attach_oos_page(oos_page, spt); |
1332 | } | 1232 | } |
1333 | 1233 | ||
1334 | static int ppgtt_set_guest_page_oos(struct intel_vgpu *vgpu, | 1234 | static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt) |
1335 | struct intel_vgpu_guest_page *gpt) | ||
1336 | { | 1235 | { |
1337 | struct intel_vgpu_oos_page *oos_page = gpt->oos_page; | 1236 | struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; |
1338 | 1237 | ||
1339 | if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) | 1238 | if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) |
1340 | return -EINVAL; | 1239 | return -EINVAL; |
1341 | 1240 | ||
1342 | trace_oos_change(vgpu->id, "set page out of sync", gpt->oos_page->id, | 1241 | trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id, |
1343 | gpt, guest_page_to_ppgtt_spt(gpt)->guest_page_type); | 1242 | spt, spt->guest_page.type); |
1344 | 1243 | ||
1345 | list_add_tail(&oos_page->vm_list, &vgpu->gtt.oos_page_list_head); | 1244 | list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head); |
1346 | return intel_gvt_hypervisor_disable_page_track(vgpu, &gpt->track); | 1245 | return intel_gvt_hypervisor_disable_page_track(spt->vgpu, &spt->guest_page.track); |
1347 | } | 1246 | } |
1348 | 1247 | ||
1349 | /** | 1248 | /** |
@@ -1368,7 +1267,7 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) | |||
1368 | list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { | 1267 | list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { |
1369 | oos_page = container_of(pos, | 1268 | oos_page = container_of(pos, |
1370 | struct intel_vgpu_oos_page, vm_list); | 1269 | struct intel_vgpu_oos_page, vm_list); |
1371 | ret = ppgtt_set_guest_page_sync(vgpu, oos_page->guest_page); | 1270 | ret = ppgtt_set_guest_page_sync(oos_page->spt); |
1372 | if (ret) | 1271 | if (ret) |
1373 | return ret; | 1272 | return ret; |
1374 | } | 1273 | } |
@@ -1379,10 +1278,9 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) | |||
1379 | * The heart of PPGTT shadow page table. | 1278 | * The heart of PPGTT shadow page table. |
1380 | */ | 1279 | */ |
1381 | static int ppgtt_handle_guest_write_page_table( | 1280 | static int ppgtt_handle_guest_write_page_table( |
1382 | struct intel_vgpu_guest_page *gpt, | 1281 | struct intel_vgpu_ppgtt_spt *spt, |
1383 | struct intel_gvt_gtt_entry *we, unsigned long index) | 1282 | struct intel_gvt_gtt_entry *we, unsigned long index) |
1384 | { | 1283 | { |
1385 | struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); | ||
1386 | struct intel_vgpu *vgpu = spt->vgpu; | 1284 | struct intel_vgpu *vgpu = spt->vgpu; |
1387 | int type = spt->shadow_page.type; | 1285 | int type = spt->shadow_page.type; |
1388 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; | 1286 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
@@ -1400,12 +1298,12 @@ static int ppgtt_handle_guest_write_page_table( | |||
1400 | ppgtt_get_shadow_entry(spt, &old_se, index); | 1298 | ppgtt_get_shadow_entry(spt, &old_se, index); |
1401 | 1299 | ||
1402 | if (new_present) { | 1300 | if (new_present) { |
1403 | ret = ppgtt_handle_guest_entry_add(gpt, we, index); | 1301 | ret = ppgtt_handle_guest_entry_add(spt, we, index); |
1404 | if (ret) | 1302 | if (ret) |
1405 | goto fail; | 1303 | goto fail; |
1406 | } | 1304 | } |
1407 | 1305 | ||
1408 | ret = ppgtt_handle_guest_entry_removal(gpt, &old_se, index); | 1306 | ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index); |
1409 | if (ret) | 1307 | if (ret) |
1410 | goto fail; | 1308 | goto fail; |
1411 | 1309 | ||
@@ -1423,12 +1321,11 @@ fail: | |||
1423 | 1321 | ||
1424 | 1322 | ||
1425 | 1323 | ||
1426 | static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page *gpt) | 1324 | static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt) |
1427 | { | 1325 | { |
1428 | return enable_out_of_sync | 1326 | return enable_out_of_sync |
1429 | && gtt_type_is_pte_pt( | 1327 | && gtt_type_is_pte_pt(spt->guest_page.type) |
1430 | guest_page_to_ppgtt_spt(gpt)->guest_page_type) | 1328 | && spt->guest_page.write_cnt >= 2; |
1431 | && gpt->write_cnt >= 2; | ||
1432 | } | 1329 | } |
1433 | 1330 | ||
1434 | static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, | 1331 | static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt, |
@@ -1468,8 +1365,8 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) | |||
1468 | GTT_ENTRY_NUM_IN_ONE_PAGE) { | 1365 | GTT_ENTRY_NUM_IN_ONE_PAGE) { |
1469 | ppgtt_get_guest_entry(spt, &ge, index); | 1366 | ppgtt_get_guest_entry(spt, &ge, index); |
1470 | 1367 | ||
1471 | ret = ppgtt_handle_guest_write_page_table( | 1368 | ret = ppgtt_handle_guest_write_page_table(spt, |
1472 | &spt->guest_page, &ge, index); | 1369 | &ge, index); |
1473 | if (ret) | 1370 | if (ret) |
1474 | return ret; | 1371 | return ret; |
1475 | clear_bit(index, spt->post_shadow_bitmap); | 1372 | clear_bit(index, spt->post_shadow_bitmap); |
@@ -1480,10 +1377,9 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) | |||
1480 | } | 1377 | } |
1481 | 1378 | ||
1482 | static int ppgtt_handle_guest_write_page_table_bytes( | 1379 | static int ppgtt_handle_guest_write_page_table_bytes( |
1483 | struct intel_vgpu_guest_page *gpt, | 1380 | struct intel_vgpu_ppgtt_spt *spt, |
1484 | u64 pa, void *p_data, int bytes) | 1381 | u64 pa, void *p_data, int bytes) |
1485 | { | 1382 | { |
1486 | struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); | ||
1487 | struct intel_vgpu *vgpu = spt->vgpu; | 1383 | struct intel_vgpu *vgpu = spt->vgpu; |
1488 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; | 1384 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
1489 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; | 1385 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; |
@@ -1498,7 +1394,7 @@ static int ppgtt_handle_guest_write_page_table_bytes( | |||
1498 | ops->test_pse(&we); | 1394 | ops->test_pse(&we); |
1499 | 1395 | ||
1500 | if (bytes == info->gtt_entry_size) { | 1396 | if (bytes == info->gtt_entry_size) { |
1501 | ret = ppgtt_handle_guest_write_page_table(gpt, &we, index); | 1397 | ret = ppgtt_handle_guest_write_page_table(spt, &we, index); |
1502 | if (ret) | 1398 | if (ret) |
1503 | return ret; | 1399 | return ret; |
1504 | } else { | 1400 | } else { |
@@ -1506,7 +1402,7 @@ static int ppgtt_handle_guest_write_page_table_bytes( | |||
1506 | int type = spt->shadow_page.type; | 1402 | int type = spt->shadow_page.type; |
1507 | 1403 | ||
1508 | ppgtt_get_shadow_entry(spt, &se, index); | 1404 | ppgtt_get_shadow_entry(spt, &se, index); |
1509 | ret = ppgtt_handle_guest_entry_removal(gpt, &se, index); | 1405 | ret = ppgtt_handle_guest_entry_removal(spt, &se, index); |
1510 | if (ret) | 1406 | if (ret) |
1511 | return ret; | 1407 | return ret; |
1512 | ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); | 1408 | ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); |
@@ -1518,17 +1414,17 @@ static int ppgtt_handle_guest_write_page_table_bytes( | |||
1518 | if (!enable_out_of_sync) | 1414 | if (!enable_out_of_sync) |
1519 | return 0; | 1415 | return 0; |
1520 | 1416 | ||
1521 | gpt->write_cnt++; | 1417 | spt->guest_page.write_cnt++; |
1522 | 1418 | ||
1523 | if (gpt->oos_page) | 1419 | if (spt->guest_page.oos_page) |
1524 | ops->set_entry(gpt->oos_page->mem, &we, index, | 1420 | ops->set_entry(spt->guest_page.oos_page->mem, &we, index, |
1525 | false, 0, vgpu); | 1421 | false, 0, vgpu); |
1526 | 1422 | ||
1527 | if (can_do_out_of_sync(gpt)) { | 1423 | if (can_do_out_of_sync(spt)) { |
1528 | if (!gpt->oos_page) | 1424 | if (!spt->guest_page.oos_page) |
1529 | ppgtt_allocate_oos_page(vgpu, gpt); | 1425 | ppgtt_allocate_oos_page(spt); |
1530 | 1426 | ||
1531 | ret = ppgtt_set_guest_page_oos(vgpu, gpt); | 1427 | ret = ppgtt_set_guest_page_oos(spt); |
1532 | if (ret < 0) | 1428 | if (ret < 0) |
1533 | return ret; | 1429 | return ret; |
1534 | } | 1430 | } |
@@ -1557,8 +1453,8 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) | |||
1557 | se.val64 = 0; | 1453 | se.val64 = 0; |
1558 | ppgtt_set_shadow_root_entry(mm, &se, index); | 1454 | ppgtt_set_shadow_root_entry(mm, &se, index); |
1559 | 1455 | ||
1560 | trace_gpt_change(vgpu->id, "destroy root pointer", | 1456 | trace_spt_guest_change(vgpu->id, "destroy root pointer", |
1561 | NULL, se.type, se.val64, index); | 1457 | NULL, se.type, se.val64, index); |
1562 | } | 1458 | } |
1563 | 1459 | ||
1564 | mm->ppgtt_mm.shadowed = false; | 1460 | mm->ppgtt_mm.shadowed = false; |
@@ -1586,8 +1482,8 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) | |||
1586 | if (!ops->test_present(&ge)) | 1482 | if (!ops->test_present(&ge)) |
1587 | continue; | 1483 | continue; |
1588 | 1484 | ||
1589 | trace_gpt_change(vgpu->id, __func__, NULL, | 1485 | trace_spt_guest_change(vgpu->id, __func__, NULL, |
1590 | ge.type, ge.val64, index); | 1486 | ge.type, ge.val64, index); |
1591 | 1487 | ||
1592 | spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); | 1488 | spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); |
1593 | if (IS_ERR(spt)) { | 1489 | if (IS_ERR(spt)) { |
@@ -1598,8 +1494,8 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) | |||
1598 | ppgtt_generate_shadow_entry(&se, spt, &ge); | 1494 | ppgtt_generate_shadow_entry(&se, spt, &ge); |
1599 | ppgtt_set_shadow_root_entry(mm, &se, index); | 1495 | ppgtt_set_shadow_root_entry(mm, &se, index); |
1600 | 1496 | ||
1601 | trace_gpt_change(vgpu->id, "populate root pointer", | 1497 | trace_spt_guest_change(vgpu->id, "populate root pointer", |
1602 | NULL, se.type, se.val64, index); | 1498 | NULL, se.type, se.val64, index); |
1603 | } | 1499 | } |
1604 | 1500 | ||
1605 | return 0; | 1501 | return 0; |
@@ -1793,7 +1689,7 @@ static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, | |||
1793 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; | 1689 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
1794 | struct intel_vgpu_ppgtt_spt *s; | 1690 | struct intel_vgpu_ppgtt_spt *s; |
1795 | 1691 | ||
1796 | s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); | 1692 | s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); |
1797 | if (!s) | 1693 | if (!s) |
1798 | return -ENXIO; | 1694 | return -ENXIO; |
1799 | 1695 | ||
@@ -2030,7 +1926,7 @@ int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa, | |||
2030 | if (t) { | 1926 | if (t) { |
2031 | if (unlikely(vgpu->failsafe)) { | 1927 | if (unlikely(vgpu->failsafe)) { |
2032 | /* remove write protection to prevent furture traps */ | 1928 | /* remove write protection to prevent furture traps */ |
2033 | intel_vgpu_clean_page_track(vgpu, t); | 1929 | intel_gvt_hypervisor_disable_page_track(vgpu, t); |
2034 | } else { | 1930 | } else { |
2035 | ret = t->handler(t, pa, p_data, bytes); | 1931 | ret = t->handler(t, pa, p_data, bytes); |
2036 | if (ret) { | 1932 | if (ret) { |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 652a76ef6706..a522bfe490f9 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h | |||
@@ -205,16 +205,6 @@ extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); | |||
205 | extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, | 205 | extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, |
206 | int page_table_level, void *root_entry); | 206 | int page_table_level, void *root_entry); |
207 | 207 | ||
208 | struct intel_vgpu_oos_page; | ||
209 | |||
210 | struct intel_vgpu_shadow_page { | ||
211 | void *vaddr; | ||
212 | struct page *page; | ||
213 | int type; | ||
214 | struct hlist_node node; | ||
215 | unsigned long mfn; | ||
216 | }; | ||
217 | |||
218 | struct intel_vgpu_page_track { | 208 | struct intel_vgpu_page_track { |
219 | struct hlist_node node; | 209 | struct hlist_node node; |
220 | bool tracked; | 210 | bool tracked; |
@@ -223,14 +213,8 @@ struct intel_vgpu_page_track { | |||
223 | void *data; | 213 | void *data; |
224 | }; | 214 | }; |
225 | 215 | ||
226 | struct intel_vgpu_guest_page { | ||
227 | struct intel_vgpu_page_track track; | ||
228 | unsigned long write_cnt; | ||
229 | struct intel_vgpu_oos_page *oos_page; | ||
230 | }; | ||
231 | |||
232 | struct intel_vgpu_oos_page { | 216 | struct intel_vgpu_oos_page { |
233 | struct intel_vgpu_guest_page *guest_page; | 217 | struct intel_vgpu_ppgtt_spt *spt; |
234 | struct list_head list; | 218 | struct list_head list; |
235 | struct list_head vm_list; | 219 | struct list_head vm_list; |
236 | int id; | 220 | int id; |
@@ -239,28 +223,31 @@ struct intel_vgpu_oos_page { | |||
239 | 223 | ||
240 | #define GTT_ENTRY_NUM_IN_ONE_PAGE 512 | 224 | #define GTT_ENTRY_NUM_IN_ONE_PAGE 512 |
241 | 225 | ||
226 | /* Represent a vgpu shadow page table. */ | ||
242 | struct intel_vgpu_ppgtt_spt { | 227 | struct intel_vgpu_ppgtt_spt { |
243 | struct intel_vgpu_shadow_page shadow_page; | ||
244 | struct intel_vgpu_guest_page guest_page; | ||
245 | int guest_page_type; | ||
246 | atomic_t refcount; | 228 | atomic_t refcount; |
247 | struct intel_vgpu *vgpu; | 229 | struct intel_vgpu *vgpu; |
230 | struct hlist_node node; | ||
231 | |||
232 | struct { | ||
233 | intel_gvt_gtt_type_t type; | ||
234 | void *vaddr; | ||
235 | struct page *page; | ||
236 | unsigned long mfn; | ||
237 | } shadow_page; | ||
238 | |||
239 | struct { | ||
240 | intel_gvt_gtt_type_t type; | ||
241 | unsigned long gfn; | ||
242 | unsigned long write_cnt; | ||
243 | struct intel_vgpu_page_track track; | ||
244 | struct intel_vgpu_oos_page *oos_page; | ||
245 | } guest_page; | ||
246 | |||
248 | DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE); | 247 | DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE); |
249 | struct list_head post_shadow_list; | 248 | struct list_head post_shadow_list; |
250 | }; | 249 | }; |
251 | 250 | ||
252 | int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, | ||
253 | struct intel_vgpu_page_track *t, | ||
254 | unsigned long gfn, | ||
255 | int (*handler)(void *gp, u64, void *, int), | ||
256 | void *data); | ||
257 | |||
258 | void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, | ||
259 | struct intel_vgpu_page_track *t); | ||
260 | |||
261 | struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( | ||
262 | struct intel_vgpu *vgpu, unsigned long gfn); | ||
263 | |||
264 | int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu); | 251 | int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu); |
265 | 252 | ||
266 | int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu); | 253 | int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu); |
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h index 5a060dacdb26..fc7831a62121 100644 --- a/drivers/gpu/drm/i915/gvt/trace.h +++ b/drivers/gpu/drm/i915/gvt/trace.h | |||
@@ -168,7 +168,7 @@ TRACE_EVENT(spt_change, | |||
168 | TP_printk("%s", __entry->buf) | 168 | TP_printk("%s", __entry->buf) |
169 | ); | 169 | ); |
170 | 170 | ||
171 | TRACE_EVENT(gpt_change, | 171 | TRACE_EVENT(spt_guest_change, |
172 | TP_PROTO(int id, const char *tag, void *spt, int type, u64 v, | 172 | TP_PROTO(int id, const char *tag, void *spt, int type, u64 v, |
173 | unsigned long index), | 173 | unsigned long index), |
174 | 174 | ||