diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 70 |
1 files changed, 50 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 47dec4acf7ff..28c92346db0e 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( | |||
606 | static inline int init_shadow_page(struct intel_vgpu *vgpu, | 606 | static inline int init_shadow_page(struct intel_vgpu *vgpu, |
607 | struct intel_vgpu_shadow_page *p, int type) | 607 | struct intel_vgpu_shadow_page *p, int type) |
608 | { | 608 | { |
609 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
610 | dma_addr_t daddr; | ||
611 | |||
612 | daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | ||
613 | if (dma_mapping_error(kdev, daddr)) { | ||
614 | gvt_err("fail to map dma addr\n"); | ||
615 | return -EINVAL; | ||
616 | } | ||
617 | |||
609 | p->vaddr = page_address(p->page); | 618 | p->vaddr = page_address(p->page); |
610 | p->type = type; | 619 | p->type = type; |
611 | 620 | ||
612 | INIT_HLIST_NODE(&p->node); | 621 | INIT_HLIST_NODE(&p->node); |
613 | 622 | ||
614 | p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr); | 623 | p->mfn = daddr >> GTT_PAGE_SHIFT; |
615 | if (p->mfn == INTEL_GVT_INVALID_ADDR) | ||
616 | return -EFAULT; | ||
617 | |||
618 | hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); | 624 | hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); |
619 | return 0; | 625 | return 0; |
620 | } | 626 | } |
621 | 627 | ||
622 | static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p) | 628 | static inline void clean_shadow_page(struct intel_vgpu *vgpu, |
629 | struct intel_vgpu_shadow_page *p) | ||
623 | { | 630 | { |
631 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
632 | |||
633 | dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096, | ||
634 | PCI_DMA_BIDIRECTIONAL); | ||
635 | |||
624 | if (!hlist_unhashed(&p->node)) | 636 | if (!hlist_unhashed(&p->node)) |
625 | hash_del(&p->node); | 637 | hash_del(&p->node); |
626 | } | 638 | } |
@@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
670 | { | 682 | { |
671 | trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); | 683 | trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); |
672 | 684 | ||
673 | clean_shadow_page(&spt->shadow_page); | 685 | clean_shadow_page(spt->vgpu, &spt->shadow_page); |
674 | intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); | 686 | intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); |
675 | list_del_init(&spt->post_shadow_list); | 687 | list_del_init(&spt->post_shadow_list); |
676 | 688 | ||
@@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1875 | int page_entry_num = GTT_PAGE_SIZE >> | 1887 | int page_entry_num = GTT_PAGE_SIZE >> |
1876 | vgpu->gvt->device_info.gtt_entry_size_shift; | 1888 | vgpu->gvt->device_info.gtt_entry_size_shift; |
1877 | void *scratch_pt; | 1889 | void *scratch_pt; |
1878 | unsigned long mfn; | ||
1879 | int i; | 1890 | int i; |
1891 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
1892 | dma_addr_t daddr; | ||
1880 | 1893 | ||
1881 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) | 1894 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) |
1882 | return -EINVAL; | 1895 | return -EINVAL; |
@@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1887 | return -ENOMEM; | 1900 | return -ENOMEM; |
1888 | } | 1901 | } |
1889 | 1902 | ||
1890 | mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); | 1903 | daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, |
1891 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 1904 | 4096, PCI_DMA_BIDIRECTIONAL); |
1892 | gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); | 1905 | if (dma_mapping_error(dev, daddr)) { |
1893 | free_page((unsigned long)scratch_pt); | 1906 | gvt_err("fail to dmamap scratch_pt\n"); |
1894 | return -EFAULT; | 1907 | __free_page(virt_to_page(scratch_pt)); |
1908 | return -ENOMEM; | ||
1895 | } | 1909 | } |
1896 | gtt->scratch_pt[type].page_mfn = mfn; | 1910 | gtt->scratch_pt[type].page_mfn = |
1911 | (unsigned long)(daddr >> GTT_PAGE_SHIFT); | ||
1897 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); | 1912 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); |
1898 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", | 1913 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", |
1899 | vgpu->id, type, mfn); | 1914 | vgpu->id, type, gtt->scratch_pt[type].page_mfn); |
1900 | 1915 | ||
1901 | /* Build the tree by full filled the scratch pt with the entries which | 1916 | /* Build the tree by full filled the scratch pt with the entries which |
1902 | * point to the next level scratch pt or scratch page. The | 1917 | * point to the next level scratch pt or scratch page. The |
@@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1930 | static int release_scratch_page_tree(struct intel_vgpu *vgpu) | 1945 | static int release_scratch_page_tree(struct intel_vgpu *vgpu) |
1931 | { | 1946 | { |
1932 | int i; | 1947 | int i; |
1948 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
1949 | dma_addr_t daddr; | ||
1933 | 1950 | ||
1934 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { | 1951 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { |
1935 | if (vgpu->gtt.scratch_pt[i].page != NULL) { | 1952 | if (vgpu->gtt.scratch_pt[i].page != NULL) { |
1953 | daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << | ||
1954 | GTT_PAGE_SHIFT); | ||
1955 | dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); | ||
1936 | __free_page(vgpu->gtt.scratch_pt[i].page); | 1956 | __free_page(vgpu->gtt.scratch_pt[i].page); |
1937 | vgpu->gtt.scratch_pt[i].page = NULL; | 1957 | vgpu->gtt.scratch_pt[i].page = NULL; |
1938 | vgpu->gtt.scratch_pt[i].page_mfn = 0; | 1958 | vgpu->gtt.scratch_pt[i].page_mfn = 0; |
@@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2192 | { | 2212 | { |
2193 | int ret; | 2213 | int ret; |
2194 | void *page; | 2214 | void *page; |
2215 | struct device *dev = &gvt->dev_priv->drm.pdev->dev; | ||
2216 | dma_addr_t daddr; | ||
2195 | 2217 | ||
2196 | gvt_dbg_core("init gtt\n"); | 2218 | gvt_dbg_core("init gtt\n"); |
2197 | 2219 | ||
@@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2209 | gvt_err("fail to allocate scratch ggtt page\n"); | 2231 | gvt_err("fail to allocate scratch ggtt page\n"); |
2210 | return -ENOMEM; | 2232 | return -ENOMEM; |
2211 | } | 2233 | } |
2212 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
2213 | 2234 | ||
2214 | gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); | 2235 | daddr = dma_map_page(dev, virt_to_page(page), 0, |
2215 | if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { | 2236 | 4096, PCI_DMA_BIDIRECTIONAL); |
2216 | gvt_err("fail to translate scratch ggtt page\n"); | 2237 | if (dma_mapping_error(dev, daddr)) { |
2217 | __free_page(gvt->gtt.scratch_ggtt_page); | 2238 | gvt_err("fail to dmamap scratch ggtt page\n"); |
2218 | return -EFAULT; | 2239 | __free_page(virt_to_page(page)); |
2240 | return -ENOMEM; | ||
2219 | } | 2241 | } |
2242 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
2243 | gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); | ||
2220 | 2244 | ||
2221 | if (enable_out_of_sync) { | 2245 | if (enable_out_of_sync) { |
2222 | ret = setup_spt_oos(gvt); | 2246 | ret = setup_spt_oos(gvt); |
@@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2239 | */ | 2263 | */ |
2240 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) | 2264 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) |
2241 | { | 2265 | { |
2266 | struct device *dev = &gvt->dev_priv->drm.pdev->dev; | ||
2267 | dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << | ||
2268 | GTT_PAGE_SHIFT); | ||
2269 | |||
2270 | dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); | ||
2271 | |||
2242 | __free_page(gvt->gtt.scratch_ggtt_page); | 2272 | __free_page(gvt->gtt.scratch_ggtt_page); |
2243 | 2273 | ||
2244 | if (enable_out_of_sync) | 2274 | if (enable_out_of_sync) |