diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/gtt.c')
| -rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 82 |
1 files changed, 42 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6a5ff23ded90..b832bea64e03 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
| @@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) | |||
| 49 | { | 49 | { |
| 50 | if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size | 50 | if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size |
| 51 | && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { | 51 | && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { |
| 52 | gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", | 52 | gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", |
| 53 | vgpu->id, addr, size); | 53 | addr, size); |
| 54 | return false; | 54 | return false; |
| 55 | } | 55 | } |
| 56 | return true; | 56 | return true; |
| @@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, | |||
| 430 | 430 | ||
| 431 | mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); | 431 | mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); |
| 432 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 432 | if (mfn == INTEL_GVT_INVALID_ADDR) { |
| 433 | gvt_err("fail to translate gfn: 0x%lx\n", gfn); | 433 | gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); |
| 434 | return -ENXIO; | 434 | return -ENXIO; |
| 435 | } | 435 | } |
| 436 | 436 | ||
| @@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu, | |||
| 611 | 611 | ||
| 612 | daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | 612 | daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); |
| 613 | if (dma_mapping_error(kdev, daddr)) { | 613 | if (dma_mapping_error(kdev, daddr)) { |
| 614 | gvt_err("fail to map dma addr\n"); | 614 | gvt_vgpu_err("fail to map dma addr\n"); |
| 615 | return -EINVAL; | 615 | return -EINVAL; |
| 616 | } | 616 | } |
| 617 | 617 | ||
| @@ -735,7 +735,7 @@ retry: | |||
| 735 | if (reclaim_one_mm(vgpu->gvt)) | 735 | if (reclaim_one_mm(vgpu->gvt)) |
| 736 | goto retry; | 736 | goto retry; |
| 737 | 737 | ||
| 738 | gvt_err("fail to allocate ppgtt shadow page\n"); | 738 | gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); |
| 739 | return ERR_PTR(-ENOMEM); | 739 | return ERR_PTR(-ENOMEM); |
| 740 | } | 740 | } |
| 741 | 741 | ||
| @@ -750,14 +750,14 @@ retry: | |||
| 750 | */ | 750 | */ |
| 751 | ret = init_shadow_page(vgpu, &spt->shadow_page, type); | 751 | ret = init_shadow_page(vgpu, &spt->shadow_page, type); |
| 752 | if (ret) { | 752 | if (ret) { |
| 753 | gvt_err("fail to initialize shadow page for spt\n"); | 753 | gvt_vgpu_err("fail to initialize shadow page for spt\n"); |
| 754 | goto err; | 754 | goto err; |
| 755 | } | 755 | } |
| 756 | 756 | ||
| 757 | ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, | 757 | ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, |
| 758 | gfn, ppgtt_write_protection_handler, NULL); | 758 | gfn, ppgtt_write_protection_handler, NULL); |
| 759 | if (ret) { | 759 | if (ret) { |
| 760 | gvt_err("fail to initialize guest page for spt\n"); | 760 | gvt_vgpu_err("fail to initialize guest page for spt\n"); |
| 761 | goto err; | 761 | goto err; |
| 762 | } | 762 | } |
| 763 | 763 | ||
| @@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( | |||
| 776 | if (p) | 776 | if (p) |
| 777 | return shadow_page_to_ppgtt_spt(p); | 777 | return shadow_page_to_ppgtt_spt(p); |
| 778 | 778 | ||
| 779 | gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", | 779 | gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn); |
| 780 | vgpu->id, mfn); | ||
| 781 | return NULL; | 780 | return NULL; |
| 782 | } | 781 | } |
| 783 | 782 | ||
| @@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, | |||
| 827 | } | 826 | } |
| 828 | s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); | 827 | s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); |
| 829 | if (!s) { | 828 | if (!s) { |
| 830 | gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", | 829 | gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", |
| 831 | vgpu->id, ops->get_pfn(e)); | 830 | ops->get_pfn(e)); |
| 832 | return -ENXIO; | 831 | return -ENXIO; |
| 833 | } | 832 | } |
| 834 | return ppgtt_invalidate_shadow_page(s); | 833 | return ppgtt_invalidate_shadow_page(s); |
| @@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, | |||
| 836 | 835 | ||
| 837 | static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | 836 | static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) |
| 838 | { | 837 | { |
| 838 | struct intel_vgpu *vgpu = spt->vgpu; | ||
| 839 | struct intel_gvt_gtt_entry e; | 839 | struct intel_gvt_gtt_entry e; |
| 840 | unsigned long index; | 840 | unsigned long index; |
| 841 | int ret; | 841 | int ret; |
| @@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
| 854 | 854 | ||
| 855 | for_each_present_shadow_entry(spt, &e, index) { | 855 | for_each_present_shadow_entry(spt, &e, index) { |
| 856 | if (!gtt_type_is_pt(get_next_pt_type(e.type))) { | 856 | if (!gtt_type_is_pt(get_next_pt_type(e.type))) { |
| 857 | gvt_err("GVT doesn't support pse bit for now\n"); | 857 | gvt_vgpu_err("GVT doesn't support pse bit for now\n"); |
| 858 | return -EINVAL; | 858 | return -EINVAL; |
| 859 | } | 859 | } |
| 860 | ret = ppgtt_invalidate_shadow_page_by_shadow_entry( | 860 | ret = ppgtt_invalidate_shadow_page_by_shadow_entry( |
| @@ -868,8 +868,8 @@ release: | |||
| 868 | ppgtt_free_shadow_page(spt); | 868 | ppgtt_free_shadow_page(spt); |
| 869 | return 0; | 869 | return 0; |
| 870 | fail: | 870 | fail: |
| 871 | gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", | 871 | gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", |
| 872 | spt->vgpu->id, spt, e.val64, e.type); | 872 | spt, e.val64, e.type); |
| 873 | return ret; | 873 | return ret; |
| 874 | } | 874 | } |
| 875 | 875 | ||
| @@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( | |||
| 914 | } | 914 | } |
| 915 | return s; | 915 | return s; |
| 916 | fail: | 916 | fail: |
| 917 | gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", | 917 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", |
| 918 | vgpu->id, s, we->val64, we->type); | 918 | s, we->val64, we->type); |
| 919 | return ERR_PTR(ret); | 919 | return ERR_PTR(ret); |
| 920 | } | 920 | } |
| 921 | 921 | ||
| @@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
| 953 | 953 | ||
| 954 | for_each_present_guest_entry(spt, &ge, i) { | 954 | for_each_present_guest_entry(spt, &ge, i) { |
| 955 | if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { | 955 | if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { |
| 956 | gvt_err("GVT doesn't support pse bit now\n"); | 956 | gvt_vgpu_err("GVT doesn't support pse bit now\n"); |
| 957 | ret = -EINVAL; | 957 | ret = -EINVAL; |
| 958 | goto fail; | 958 | goto fail; |
| 959 | } | 959 | } |
| @@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
| 969 | } | 969 | } |
| 970 | return 0; | 970 | return 0; |
| 971 | fail: | 971 | fail: |
| 972 | gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", | 972 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", |
| 973 | vgpu->id, spt, ge.val64, ge.type); | 973 | spt, ge.val64, ge.type); |
| 974 | return ret; | 974 | return ret; |
| 975 | } | 975 | } |
| 976 | 976 | ||
| @@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, | |||
| 999 | struct intel_vgpu_ppgtt_spt *s = | 999 | struct intel_vgpu_ppgtt_spt *s = |
| 1000 | ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); | 1000 | ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); |
| 1001 | if (!s) { | 1001 | if (!s) { |
| 1002 | gvt_err("fail to find guest page\n"); | 1002 | gvt_vgpu_err("fail to find guest page\n"); |
| 1003 | ret = -ENXIO; | 1003 | ret = -ENXIO; |
| 1004 | goto fail; | 1004 | goto fail; |
| 1005 | } | 1005 | } |
| @@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, | |||
| 1011 | ppgtt_set_shadow_entry(spt, &e, index); | 1011 | ppgtt_set_shadow_entry(spt, &e, index); |
| 1012 | return 0; | 1012 | return 0; |
| 1013 | fail: | 1013 | fail: |
| 1014 | gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", | 1014 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", |
| 1015 | vgpu->id, spt, e.val64, e.type); | 1015 | spt, e.val64, e.type); |
| 1016 | return ret; | 1016 | return ret; |
| 1017 | } | 1017 | } |
| 1018 | 1018 | ||
| @@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt, | |||
| 1046 | } | 1046 | } |
| 1047 | return 0; | 1047 | return 0; |
| 1048 | fail: | 1048 | fail: |
| 1049 | gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, | 1049 | gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", |
| 1050 | spt, we->val64, we->type); | 1050 | spt, we->val64, we->type); |
| 1051 | return ret; | 1051 | return ret; |
| 1052 | } | 1052 | } |
| 1053 | 1053 | ||
| @@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table( | |||
| 1250 | } | 1250 | } |
| 1251 | return 0; | 1251 | return 0; |
| 1252 | fail: | 1252 | fail: |
| 1253 | gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", | 1253 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", |
| 1254 | vgpu->id, spt, we->val64, we->type); | 1254 | spt, we->val64, we->type); |
| 1255 | return ret; | 1255 | return ret; |
| 1256 | } | 1256 | } |
| 1257 | 1257 | ||
| @@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm) | |||
| 1493 | 1493 | ||
| 1494 | spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); | 1494 | spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); |
| 1495 | if (IS_ERR(spt)) { | 1495 | if (IS_ERR(spt)) { |
| 1496 | gvt_err("fail to populate guest root pointer\n"); | 1496 | gvt_vgpu_err("fail to populate guest root pointer\n"); |
| 1497 | ret = PTR_ERR(spt); | 1497 | ret = PTR_ERR(spt); |
| 1498 | goto fail; | 1498 | goto fail; |
| 1499 | } | 1499 | } |
| @@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, | |||
| 1566 | 1566 | ||
| 1567 | ret = gtt->mm_alloc_page_table(mm); | 1567 | ret = gtt->mm_alloc_page_table(mm); |
| 1568 | if (ret) { | 1568 | if (ret) { |
| 1569 | gvt_err("fail to allocate page table for mm\n"); | 1569 | gvt_vgpu_err("fail to allocate page table for mm\n"); |
| 1570 | goto fail; | 1570 | goto fail; |
| 1571 | } | 1571 | } |
| 1572 | 1572 | ||
| @@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, | |||
| 1584 | } | 1584 | } |
| 1585 | return mm; | 1585 | return mm; |
| 1586 | fail: | 1586 | fail: |
| 1587 | gvt_err("fail to create mm\n"); | 1587 | gvt_vgpu_err("fail to create mm\n"); |
| 1588 | if (mm) | 1588 | if (mm) |
| 1589 | intel_gvt_mm_unreference(mm); | 1589 | intel_gvt_mm_unreference(mm); |
| 1590 | return ERR_PTR(ret); | 1590 | return ERR_PTR(ret); |
| @@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) | |||
| 1760 | mm->page_table_level, gma, gpa); | 1760 | mm->page_table_level, gma, gpa); |
| 1761 | return gpa; | 1761 | return gpa; |
| 1762 | err: | 1762 | err: |
| 1763 | gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); | 1763 | gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); |
| 1764 | return INTEL_GVT_INVALID_ADDR; | 1764 | return INTEL_GVT_INVALID_ADDR; |
| 1765 | } | 1765 | } |
| 1766 | 1766 | ||
| @@ -1836,13 +1836,16 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | |||
| 1836 | if (ops->test_present(&e)) { | 1836 | if (ops->test_present(&e)) { |
| 1837 | ret = gtt_entry_p2m(vgpu, &e, &m); | 1837 | ret = gtt_entry_p2m(vgpu, &e, &m); |
| 1838 | if (ret) { | 1838 | if (ret) { |
| 1839 | gvt_err("vgpu%d: fail to translate guest gtt entry\n", | 1839 | gvt_vgpu_err("fail to translate guest gtt entry\n"); |
| 1840 | vgpu->id); | 1840 | /* guest driver may read/write the entry when partial |
| 1841 | return ret; | 1841 | * update the entry in this situation p2m will fail |
| 1842 | * settting the shadow entry to point to a scratch page | ||
| 1843 | */ | ||
| 1844 | ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); | ||
| 1842 | } | 1845 | } |
| 1843 | } else { | 1846 | } else { |
| 1844 | m = e; | 1847 | m = e; |
| 1845 | m.val64 = 0; | 1848 | ops->set_pfn(&m, gvt->gtt.scratch_ggtt_mfn); |
| 1846 | } | 1849 | } |
| 1847 | 1850 | ||
| 1848 | ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); | 1851 | ggtt_set_shadow_entry(ggtt_mm, &m, g_gtt_index); |
| @@ -1893,14 +1896,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
| 1893 | 1896 | ||
| 1894 | scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); | 1897 | scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); |
| 1895 | if (!scratch_pt) { | 1898 | if (!scratch_pt) { |
| 1896 | gvt_err("fail to allocate scratch page\n"); | 1899 | gvt_vgpu_err("fail to allocate scratch page\n"); |
| 1897 | return -ENOMEM; | 1900 | return -ENOMEM; |
| 1898 | } | 1901 | } |
| 1899 | 1902 | ||
| 1900 | daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, | 1903 | daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, |
| 1901 | 4096, PCI_DMA_BIDIRECTIONAL); | 1904 | 4096, PCI_DMA_BIDIRECTIONAL); |
| 1902 | if (dma_mapping_error(dev, daddr)) { | 1905 | if (dma_mapping_error(dev, daddr)) { |
| 1903 | gvt_err("fail to dmamap scratch_pt\n"); | 1906 | gvt_vgpu_err("fail to dmamap scratch_pt\n"); |
| 1904 | __free_page(virt_to_page(scratch_pt)); | 1907 | __free_page(virt_to_page(scratch_pt)); |
| 1905 | return -ENOMEM; | 1908 | return -ENOMEM; |
| 1906 | } | 1909 | } |
| @@ -2003,7 +2006,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) | |||
| 2003 | ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, | 2006 | ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, |
| 2004 | NULL, 1, 0); | 2007 | NULL, 1, 0); |
| 2005 | if (IS_ERR(ggtt_mm)) { | 2008 | if (IS_ERR(ggtt_mm)) { |
| 2006 | gvt_err("fail to create mm for ggtt.\n"); | 2009 | gvt_vgpu_err("fail to create mm for ggtt.\n"); |
| 2007 | return PTR_ERR(ggtt_mm); | 2010 | return PTR_ERR(ggtt_mm); |
| 2008 | } | 2011 | } |
| 2009 | 2012 | ||
| @@ -2076,7 +2079,6 @@ static int setup_spt_oos(struct intel_gvt *gvt) | |||
| 2076 | for (i = 0; i < preallocated_oos_pages; i++) { | 2079 | for (i = 0; i < preallocated_oos_pages; i++) { |
| 2077 | oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); | 2080 | oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); |
| 2078 | if (!oos_page) { | 2081 | if (!oos_page) { |
| 2079 | gvt_err("fail to pre-allocate oos page\n"); | ||
| 2080 | ret = -ENOMEM; | 2082 | ret = -ENOMEM; |
| 2081 | goto fail; | 2083 | goto fail; |
| 2082 | } | 2084 | } |
| @@ -2166,7 +2168,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, | |||
| 2166 | mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, | 2168 | mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, |
| 2167 | pdp, page_table_level, 0); | 2169 | pdp, page_table_level, 0); |
| 2168 | if (IS_ERR(mm)) { | 2170 | if (IS_ERR(mm)) { |
| 2169 | gvt_err("fail to create mm\n"); | 2171 | gvt_vgpu_err("fail to create mm\n"); |
| 2170 | return PTR_ERR(mm); | 2172 | return PTR_ERR(mm); |
| 2171 | } | 2173 | } |
| 2172 | } | 2174 | } |
| @@ -2196,7 +2198,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, | |||
| 2196 | 2198 | ||
| 2197 | mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); | 2199 | mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); |
| 2198 | if (!mm) { | 2200 | if (!mm) { |
| 2199 | gvt_err("fail to find ppgtt instance.\n"); | 2201 | gvt_vgpu_err("fail to find ppgtt instance.\n"); |
| 2200 | return -EINVAL; | 2202 | return -EINVAL; |
| 2201 | } | 2203 | } |
| 2202 | intel_gvt_mm_unreference(mm); | 2204 | intel_gvt_mm_unreference(mm); |
