diff options
author | Changbin Du <changbin.du@intel.com> | 2018-05-14 22:35:39 -0400 |
---|---|---|
committer | Zhenyu Wang <zhenyuw@linux.intel.com> | 2018-07-08 22:23:32 -0400 |
commit | 155521c93e468211673206e1871b53d26a44a82d (patch) | |
tree | d81f3657575703cb525a7505d5bfabc1837d1bfd | |
parent | c3e697635fcc9173e1d7116d9ebfd2fd0887177d (diff) |
drm/i915/gvt: Split ppgtt_alloc_spt into two parts
We need a interface to allocate a pure shadow page which doesn't have
a guest page associated with. Such shadow page is used to shadow 2M
huge gtt entry.
Signed-off-by: Changbin Du <changbin.du@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 62 |
1 files changed, 40 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d34dc9ab66e1..15f6908fc648 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -735,10 +735,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) | |||
735 | 735 | ||
736 | radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); | 736 | radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); |
737 | 737 | ||
738 | if (spt->guest_page.oos_page) | 738 | if (spt->guest_page.gfn) { |
739 | detach_oos_page(spt->vgpu, spt->guest_page.oos_page); | 739 | if (spt->guest_page.oos_page) |
740 | detach_oos_page(spt->vgpu, spt->guest_page.oos_page); | ||
740 | 741 | ||
741 | intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); | 742 | intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); |
743 | } | ||
742 | 744 | ||
743 | list_del_init(&spt->post_shadow_list); | 745 | list_del_init(&spt->post_shadow_list); |
744 | free_spt(spt); | 746 | free_spt(spt); |
@@ -799,9 +801,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn( | |||
799 | 801 | ||
800 | static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); | 802 | static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt); |
801 | 803 | ||
804 | /* Allocate shadow page table without guest page. */ | ||
802 | static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( | 805 | static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( |
803 | struct intel_vgpu *vgpu, int type, unsigned long gfn, | 806 | struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) |
804 | bool guest_pde_ips) | ||
805 | { | 807 | { |
806 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | 808 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; |
807 | struct intel_vgpu_ppgtt_spt *spt = NULL; | 809 | struct intel_vgpu_ppgtt_spt *spt = NULL; |
@@ -836,27 +838,12 @@ retry: | |||
836 | spt->shadow_page.vaddr = page_address(spt->shadow_page.page); | 838 | spt->shadow_page.vaddr = page_address(spt->shadow_page.page); |
837 | spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; | 839 | spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; |
838 | 840 | ||
839 | /* | ||
840 | * Init guest_page. | ||
841 | */ | ||
842 | spt->guest_page.type = type; | ||
843 | spt->guest_page.gfn = gfn; | ||
844 | spt->guest_page.pde_ips = guest_pde_ips; | ||
845 | |||
846 | ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn, | ||
847 | ppgtt_write_protection_handler, spt); | ||
848 | if (ret) | ||
849 | goto err_unmap_dma; | ||
850 | |||
851 | ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); | 841 | ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); |
852 | if (ret) | 842 | if (ret) |
853 | goto err_unreg_page_track; | 843 | goto err_unmap_dma; |
854 | 844 | ||
855 | trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); | ||
856 | return spt; | 845 | return spt; |
857 | 846 | ||
858 | err_unreg_page_track: | ||
859 | intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn); | ||
860 | err_unmap_dma: | 847 | err_unmap_dma: |
861 | dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | 848 | dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
862 | err_free_spt: | 849 | err_free_spt: |
@@ -864,6 +851,37 @@ err_free_spt: | |||
864 | return ERR_PTR(ret); | 851 | return ERR_PTR(ret); |
865 | } | 852 | } |
866 | 853 | ||
854 | /* Allocate shadow page table associated with specific gfn. */ | ||
855 | static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( | ||
856 | struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type, | ||
857 | unsigned long gfn, bool guest_pde_ips) | ||
858 | { | ||
859 | struct intel_vgpu_ppgtt_spt *spt; | ||
860 | int ret; | ||
861 | |||
862 | spt = ppgtt_alloc_spt(vgpu, type); | ||
863 | if (IS_ERR(spt)) | ||
864 | return spt; | ||
865 | |||
866 | /* | ||
867 | * Init guest_page. | ||
868 | */ | ||
869 | ret = intel_vgpu_register_page_track(vgpu, gfn, | ||
870 | ppgtt_write_protection_handler, spt); | ||
871 | if (ret) { | ||
872 | ppgtt_free_spt(spt); | ||
873 | return ERR_PTR(ret); | ||
874 | } | ||
875 | |||
876 | spt->guest_page.type = type; | ||
877 | spt->guest_page.gfn = gfn; | ||
878 | spt->guest_page.pde_ips = guest_pde_ips; | ||
879 | |||
880 | trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); | ||
881 | |||
882 | return spt; | ||
883 | } | ||
884 | |||
867 | #define pt_entry_size_shift(spt) \ | 885 | #define pt_entry_size_shift(spt) \ |
868 | ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) | 886 | ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) |
869 | 887 | ||
@@ -1021,7 +1039,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( | |||
1021 | if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) | 1039 | if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) |
1022 | ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); | 1040 | ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); |
1023 | 1041 | ||
1024 | spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we), ips); | 1042 | spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); |
1025 | if (IS_ERR(spt)) { | 1043 | if (IS_ERR(spt)) { |
1026 | ret = PTR_ERR(spt); | 1044 | ret = PTR_ERR(spt); |
1027 | goto fail; | 1045 | goto fail; |