diff options
author | Jani Nikula <jani.nikula@intel.com> | 2017-02-16 04:58:16 -0500 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2017-02-16 04:58:37 -0500 |
commit | 33b7bfdf918af4dc6585afe6d21f5e6b7613de1b (patch) | |
tree | 7d12270ae0c0fd110b70f73c9b0b86b73dc4a2be | |
parent | 39a75ac4a55df8ee0a2f6059400c7153c8fd7d95 (diff) | |
parent | 4a0b3444da3ce1090d0f894f4e343756a94ab8c3 (diff) |
Merge tag 'gvt-next-2017-02-15' of https://github.com/01org/gvt-linux into drm-intel-next-fixes
gvt-next-2017-02-15
- Chuanxiao's IOMMU workaround fix
- debug message cleanup from Changbin
- oops fix in fail path of workload submission when GPU reset from Changbin
- other misc fixes
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
-rw-r--r-- | drivers/gpu/drm/i915/gvt/aperture_gm.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cmd_parser.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/display.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/display.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/execlist.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/interrupt.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 70 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/render.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/vgpu.c | 1 |
13 files changed, 179 insertions, 99 deletions
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 7311aeab16f7..3b6caaca9751 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c | |||
@@ -49,20 +49,21 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) | |||
49 | if (high_gm) { | 49 | if (high_gm) { |
50 | node = &vgpu->gm.high_gm_node; | 50 | node = &vgpu->gm.high_gm_node; |
51 | size = vgpu_hidden_sz(vgpu); | 51 | size = vgpu_hidden_sz(vgpu); |
52 | start = gvt_hidden_gmadr_base(gvt); | 52 | start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE); |
53 | end = gvt_hidden_gmadr_end(gvt); | 53 | end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE); |
54 | flags = PIN_HIGH; | 54 | flags = PIN_HIGH; |
55 | } else { | 55 | } else { |
56 | node = &vgpu->gm.low_gm_node; | 56 | node = &vgpu->gm.low_gm_node; |
57 | size = vgpu_aperture_sz(vgpu); | 57 | size = vgpu_aperture_sz(vgpu); |
58 | start = gvt_aperture_gmadr_base(gvt); | 58 | start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE); |
59 | end = gvt_aperture_gmadr_end(gvt); | 59 | end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE); |
60 | flags = PIN_MAPPABLE; | 60 | flags = PIN_MAPPABLE; |
61 | } | 61 | } |
62 | 62 | ||
63 | mutex_lock(&dev_priv->drm.struct_mutex); | 63 | mutex_lock(&dev_priv->drm.struct_mutex); |
64 | ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, | 64 | ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, |
65 | size, 4096, I915_COLOR_UNEVICTABLE, | 65 | size, I915_GTT_PAGE_SIZE, |
66 | I915_COLOR_UNEVICTABLE, | ||
66 | start, end, flags); | 67 | start, end, flags); |
67 | mutex_unlock(&dev_priv->drm.struct_mutex); | 68 | mutex_unlock(&dev_priv->drm.struct_mutex); |
68 | if (ret) | 69 | if (ret) |
@@ -254,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, | |||
254 | if (request > avail) | 255 | if (request > avail) |
255 | goto no_enough_resource; | 256 | goto no_enough_resource; |
256 | 257 | ||
257 | vgpu_aperture_sz(vgpu) = request; | 258 | vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); |
258 | 259 | ||
259 | item = "high GM space"; | 260 | item = "high GM space"; |
260 | max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; | 261 | max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; |
@@ -265,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, | |||
265 | if (request > avail) | 266 | if (request > avail) |
266 | goto no_enough_resource; | 267 | goto no_enough_resource; |
267 | 268 | ||
268 | vgpu_hidden_sz(vgpu) = request; | 269 | vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE); |
269 | 270 | ||
270 | item = "fence"; | 271 | item = "fence"; |
271 | max = gvt_fence_sz(gvt) - HOST_FENCE; | 272 | max = gvt_fence_sz(gvt) - HOST_FENCE; |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 9a4b23c3ee97..7bb11a555b76 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -1135,6 +1135,8 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, | |||
1135 | u32 dword2 = cmd_val(s, 2); | 1135 | u32 dword2 = cmd_val(s, 2); |
1136 | u32 plane = (dword0 & GENMASK(12, 8)) >> 8; | 1136 | u32 plane = (dword0 & GENMASK(12, 8)) >> 8; |
1137 | 1137 | ||
1138 | info->plane = PRIMARY_PLANE; | ||
1139 | |||
1138 | switch (plane) { | 1140 | switch (plane) { |
1139 | case MI_DISPLAY_FLIP_SKL_PLANE_1_A: | 1141 | case MI_DISPLAY_FLIP_SKL_PLANE_1_A: |
1140 | info->pipe = PIPE_A; | 1142 | info->pipe = PIPE_A; |
@@ -1148,12 +1150,28 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, | |||
1148 | info->pipe = PIPE_C; | 1150 | info->pipe = PIPE_C; |
1149 | info->event = PRIMARY_C_FLIP_DONE; | 1151 | info->event = PRIMARY_C_FLIP_DONE; |
1150 | break; | 1152 | break; |
1153 | |||
1154 | case MI_DISPLAY_FLIP_SKL_PLANE_2_A: | ||
1155 | info->pipe = PIPE_A; | ||
1156 | info->event = SPRITE_A_FLIP_DONE; | ||
1157 | info->plane = SPRITE_PLANE; | ||
1158 | break; | ||
1159 | case MI_DISPLAY_FLIP_SKL_PLANE_2_B: | ||
1160 | info->pipe = PIPE_B; | ||
1161 | info->event = SPRITE_B_FLIP_DONE; | ||
1162 | info->plane = SPRITE_PLANE; | ||
1163 | break; | ||
1164 | case MI_DISPLAY_FLIP_SKL_PLANE_2_C: | ||
1165 | info->pipe = PIPE_C; | ||
1166 | info->event = SPRITE_C_FLIP_DONE; | ||
1167 | info->plane = SPRITE_PLANE; | ||
1168 | break; | ||
1169 | |||
1151 | default: | 1170 | default: |
1152 | gvt_err("unknown plane code %d\n", plane); | 1171 | gvt_err("unknown plane code %d\n", plane); |
1153 | return -EINVAL; | 1172 | return -EINVAL; |
1154 | } | 1173 | } |
1155 | 1174 | ||
1156 | info->pipe = PRIMARY_PLANE; | ||
1157 | info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; | 1175 | info->stride_val = (dword1 & GENMASK(15, 6)) >> 6; |
1158 | info->tile_val = (dword1 & GENMASK(2, 0)); | 1176 | info->tile_val = (dword1 & GENMASK(2, 0)); |
1159 | info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; | 1177 | info->surf_val = (dword2 & GENMASK(31, 12)) >> 12; |
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 1a4430724069..6d8fde880c39 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c | |||
@@ -333,3 +333,15 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu) | |||
333 | else | 333 | else |
334 | return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); | 334 | return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); |
335 | } | 335 | } |
336 | |||
337 | /** | ||
338 | * intel_vgpu_reset_display- reset vGPU virtual display emulation | ||
339 | * @vgpu: a vGPU | ||
340 | * | ||
341 | * This function is used to reset vGPU virtual display emulation stuffs | ||
342 | * | ||
343 | */ | ||
344 | void intel_vgpu_reset_display(struct intel_vgpu *vgpu) | ||
345 | { | ||
346 | emulate_monitor_status_change(vgpu); | ||
347 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index 7a60cb848268..8b234ea961f6 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h | |||
@@ -158,6 +158,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt); | |||
158 | void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); | 158 | void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); |
159 | 159 | ||
160 | int intel_vgpu_init_display(struct intel_vgpu *vgpu); | 160 | int intel_vgpu_init_display(struct intel_vgpu *vgpu); |
161 | void intel_vgpu_reset_display(struct intel_vgpu *vgpu); | ||
161 | void intel_vgpu_clean_display(struct intel_vgpu *vgpu); | 162 | void intel_vgpu_clean_display(struct intel_vgpu *vgpu); |
162 | 163 | ||
163 | #endif | 164 | #endif |
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f32bb6f6495c..136c6e77561a 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
@@ -515,7 +515,7 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) | |||
515 | 515 | ||
516 | static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | 516 | static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) |
517 | { | 517 | { |
518 | if (wa_ctx->indirect_ctx.size == 0) | 518 | if (!wa_ctx->indirect_ctx.obj) |
519 | return; | 519 | return; |
520 | 520 | ||
521 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); | 521 | i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj); |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 47dec4acf7ff..28c92346db0e 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
@@ -606,21 +606,33 @@ struct intel_vgpu_guest_page *intel_vgpu_find_guest_page( | |||
606 | static inline int init_shadow_page(struct intel_vgpu *vgpu, | 606 | static inline int init_shadow_page(struct intel_vgpu *vgpu, |
607 | struct intel_vgpu_shadow_page *p, int type) | 607 | struct intel_vgpu_shadow_page *p, int type) |
608 | { | 608 | { |
609 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
610 | dma_addr_t daddr; | ||
611 | |||
612 | daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | ||
613 | if (dma_mapping_error(kdev, daddr)) { | ||
614 | gvt_err("fail to map dma addr\n"); | ||
615 | return -EINVAL; | ||
616 | } | ||
617 | |||
609 | p->vaddr = page_address(p->page); | 618 | p->vaddr = page_address(p->page); |
610 | p->type = type; | 619 | p->type = type; |
611 | 620 | ||
612 | INIT_HLIST_NODE(&p->node); | 621 | INIT_HLIST_NODE(&p->node); |
613 | 622 | ||
614 | p->mfn = intel_gvt_hypervisor_virt_to_mfn(p->vaddr); | 623 | p->mfn = daddr >> GTT_PAGE_SHIFT; |
615 | if (p->mfn == INTEL_GVT_INVALID_ADDR) | ||
616 | return -EFAULT; | ||
617 | |||
618 | hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); | 624 | hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); |
619 | return 0; | 625 | return 0; |
620 | } | 626 | } |
621 | 627 | ||
622 | static inline void clean_shadow_page(struct intel_vgpu_shadow_page *p) | 628 | static inline void clean_shadow_page(struct intel_vgpu *vgpu, |
629 | struct intel_vgpu_shadow_page *p) | ||
623 | { | 630 | { |
631 | struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
632 | |||
633 | dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096, | ||
634 | PCI_DMA_BIDIRECTIONAL); | ||
635 | |||
624 | if (!hlist_unhashed(&p->node)) | 636 | if (!hlist_unhashed(&p->node)) |
625 | hash_del(&p->node); | 637 | hash_del(&p->node); |
626 | } | 638 | } |
@@ -670,7 +682,7 @@ static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
670 | { | 682 | { |
671 | trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); | 683 | trace_spt_free(spt->vgpu->id, spt, spt->shadow_page.type); |
672 | 684 | ||
673 | clean_shadow_page(&spt->shadow_page); | 685 | clean_shadow_page(spt->vgpu, &spt->shadow_page); |
674 | intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); | 686 | intel_vgpu_clean_guest_page(spt->vgpu, &spt->guest_page); |
675 | list_del_init(&spt->post_shadow_list); | 687 | list_del_init(&spt->post_shadow_list); |
676 | 688 | ||
@@ -1875,8 +1887,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1875 | int page_entry_num = GTT_PAGE_SIZE >> | 1887 | int page_entry_num = GTT_PAGE_SIZE >> |
1876 | vgpu->gvt->device_info.gtt_entry_size_shift; | 1888 | vgpu->gvt->device_info.gtt_entry_size_shift; |
1877 | void *scratch_pt; | 1889 | void *scratch_pt; |
1878 | unsigned long mfn; | ||
1879 | int i; | 1890 | int i; |
1891 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
1892 | dma_addr_t daddr; | ||
1880 | 1893 | ||
1881 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) | 1894 | if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) |
1882 | return -EINVAL; | 1895 | return -EINVAL; |
@@ -1887,16 +1900,18 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1887 | return -ENOMEM; | 1900 | return -ENOMEM; |
1888 | } | 1901 | } |
1889 | 1902 | ||
1890 | mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt); | 1903 | daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, |
1891 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 1904 | 4096, PCI_DMA_BIDIRECTIONAL); |
1892 | gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt); | 1905 | if (dma_mapping_error(dev, daddr)) { |
1893 | free_page((unsigned long)scratch_pt); | 1906 | gvt_err("fail to dmamap scratch_pt\n"); |
1894 | return -EFAULT; | 1907 | __free_page(virt_to_page(scratch_pt)); |
1908 | return -ENOMEM; | ||
1895 | } | 1909 | } |
1896 | gtt->scratch_pt[type].page_mfn = mfn; | 1910 | gtt->scratch_pt[type].page_mfn = |
1911 | (unsigned long)(daddr >> GTT_PAGE_SHIFT); | ||
1897 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); | 1912 | gtt->scratch_pt[type].page = virt_to_page(scratch_pt); |
1898 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", | 1913 | gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", |
1899 | vgpu->id, type, mfn); | 1914 | vgpu->id, type, gtt->scratch_pt[type].page_mfn); |
1900 | 1915 | ||
1901 | /* Build the tree by full filled the scratch pt with the entries which | 1916 | /* Build the tree by full filled the scratch pt with the entries which |
1902 | * point to the next level scratch pt or scratch page. The | 1917 | * point to the next level scratch pt or scratch page. The |
@@ -1930,9 +1945,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
1930 | static int release_scratch_page_tree(struct intel_vgpu *vgpu) | 1945 | static int release_scratch_page_tree(struct intel_vgpu *vgpu) |
1931 | { | 1946 | { |
1932 | int i; | 1947 | int i; |
1948 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
1949 | dma_addr_t daddr; | ||
1933 | 1950 | ||
1934 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { | 1951 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { |
1935 | if (vgpu->gtt.scratch_pt[i].page != NULL) { | 1952 | if (vgpu->gtt.scratch_pt[i].page != NULL) { |
1953 | daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << | ||
1954 | GTT_PAGE_SHIFT); | ||
1955 | dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); | ||
1936 | __free_page(vgpu->gtt.scratch_pt[i].page); | 1956 | __free_page(vgpu->gtt.scratch_pt[i].page); |
1937 | vgpu->gtt.scratch_pt[i].page = NULL; | 1957 | vgpu->gtt.scratch_pt[i].page = NULL; |
1938 | vgpu->gtt.scratch_pt[i].page_mfn = 0; | 1958 | vgpu->gtt.scratch_pt[i].page_mfn = 0; |
@@ -2192,6 +2212,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2192 | { | 2212 | { |
2193 | int ret; | 2213 | int ret; |
2194 | void *page; | 2214 | void *page; |
2215 | struct device *dev = &gvt->dev_priv->drm.pdev->dev; | ||
2216 | dma_addr_t daddr; | ||
2195 | 2217 | ||
2196 | gvt_dbg_core("init gtt\n"); | 2218 | gvt_dbg_core("init gtt\n"); |
2197 | 2219 | ||
@@ -2209,14 +2231,16 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2209 | gvt_err("fail to allocate scratch ggtt page\n"); | 2231 | gvt_err("fail to allocate scratch ggtt page\n"); |
2210 | return -ENOMEM; | 2232 | return -ENOMEM; |
2211 | } | 2233 | } |
2212 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
2213 | 2234 | ||
2214 | gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page); | 2235 | daddr = dma_map_page(dev, virt_to_page(page), 0, |
2215 | if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { | 2236 | 4096, PCI_DMA_BIDIRECTIONAL); |
2216 | gvt_err("fail to translate scratch ggtt page\n"); | 2237 | if (dma_mapping_error(dev, daddr)) { |
2217 | __free_page(gvt->gtt.scratch_ggtt_page); | 2238 | gvt_err("fail to dmamap scratch ggtt page\n"); |
2218 | return -EFAULT; | 2239 | __free_page(virt_to_page(page)); |
2240 | return -ENOMEM; | ||
2219 | } | 2241 | } |
2242 | gvt->gtt.scratch_ggtt_page = virt_to_page(page); | ||
2243 | gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); | ||
2220 | 2244 | ||
2221 | if (enable_out_of_sync) { | 2245 | if (enable_out_of_sync) { |
2222 | ret = setup_spt_oos(gvt); | 2246 | ret = setup_spt_oos(gvt); |
@@ -2239,6 +2263,12 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) | |||
2239 | */ | 2263 | */ |
2240 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) | 2264 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) |
2241 | { | 2265 | { |
2266 | struct device *dev = &gvt->dev_priv->drm.pdev->dev; | ||
2267 | dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << | ||
2268 | GTT_PAGE_SHIFT); | ||
2269 | |||
2270 | dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); | ||
2271 | |||
2242 | __free_page(gvt->gtt.scratch_ggtt_page); | 2272 | __free_page(gvt->gtt.scratch_ggtt_page); |
2243 | 2273 | ||
2244 | if (enable_out_of_sync) | 2274 | if (enable_out_of_sync) |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 9a636a2c2077..3b9d59e457ba 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c | |||
@@ -75,13 +75,6 @@ int intel_gvt_init_host(void) | |||
75 | if (xen_domain() && !xen_initial_domain()) | 75 | if (xen_domain() && !xen_initial_domain()) |
76 | return -ENODEV; | 76 | return -ENODEV; |
77 | 77 | ||
78 | #ifdef CONFIG_INTEL_IOMMU | ||
79 | if (intel_iommu_gfx_mapped) { | ||
80 | gvt_err("Hardware IOMMU compatibility not yet supported, try to boot with intel_iommu=igfx_off\n"); | ||
81 | return -ENODEV; | ||
82 | } | ||
83 | #endif | ||
84 | |||
85 | /* Try to load MPT modules for hypervisors */ | 78 | /* Try to load MPT modules for hypervisors */ |
86 | if (xen_initial_domain()) { | 79 | if (xen_initial_domain()) { |
87 | /* In Xen dom0 */ | 80 | /* In Xen dom0 */ |
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index f7be02ac4be1..92bb247e3478 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c | |||
@@ -176,26 +176,15 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, | |||
176 | { | 176 | { |
177 | struct intel_gvt *gvt = vgpu->gvt; | 177 | struct intel_gvt *gvt = vgpu->gvt; |
178 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; | 178 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; |
179 | u32 changed, masked, unmasked; | ||
180 | u32 imr = *(u32 *)p_data; | 179 | u32 imr = *(u32 *)p_data; |
181 | 180 | ||
182 | gvt_dbg_irq("write IMR %x with val %x\n", | 181 | gvt_dbg_irq("write IMR %x, new %08x, old %08x, changed %08x\n", |
183 | reg, imr); | 182 | reg, imr, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ imr); |
184 | |||
185 | gvt_dbg_irq("old vIMR %x\n", vgpu_vreg(vgpu, reg)); | ||
186 | |||
187 | /* figure out newly masked/unmasked bits */ | ||
188 | changed = vgpu_vreg(vgpu, reg) ^ imr; | ||
189 | masked = (vgpu_vreg(vgpu, reg) & changed) ^ changed; | ||
190 | unmasked = masked ^ changed; | ||
191 | |||
192 | gvt_dbg_irq("changed %x, masked %x, unmasked %x\n", | ||
193 | changed, masked, unmasked); | ||
194 | 183 | ||
195 | vgpu_vreg(vgpu, reg) = imr; | 184 | vgpu_vreg(vgpu, reg) = imr; |
196 | 185 | ||
197 | ops->check_pending_irq(vgpu); | 186 | ops->check_pending_irq(vgpu); |
198 | gvt_dbg_irq("IRQ: new vIMR %x\n", vgpu_vreg(vgpu, reg)); | 187 | |
199 | return 0; | 188 | return 0; |
200 | } | 189 | } |
201 | 190 | ||
@@ -217,14 +206,11 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, | |||
217 | { | 206 | { |
218 | struct intel_gvt *gvt = vgpu->gvt; | 207 | struct intel_gvt *gvt = vgpu->gvt; |
219 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; | 208 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; |
220 | u32 changed, enabled, disabled; | ||
221 | u32 ier = *(u32 *)p_data; | 209 | u32 ier = *(u32 *)p_data; |
222 | u32 virtual_ier = vgpu_vreg(vgpu, reg); | 210 | u32 virtual_ier = vgpu_vreg(vgpu, reg); |
223 | 211 | ||
224 | gvt_dbg_irq("write master irq reg %x with val %x\n", | 212 | gvt_dbg_irq("write MASTER_IRQ %x, new %08x, old %08x, changed %08x\n", |
225 | reg, ier); | 213 | reg, ier, virtual_ier, virtual_ier ^ ier); |
226 | |||
227 | gvt_dbg_irq("old vreg %x\n", vgpu_vreg(vgpu, reg)); | ||
228 | 214 | ||
229 | /* | 215 | /* |
230 | * GEN8_MASTER_IRQ is a special irq register, | 216 | * GEN8_MASTER_IRQ is a special irq register, |
@@ -236,16 +222,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, | |||
236 | vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL; | 222 | vgpu_vreg(vgpu, reg) &= ~GEN8_MASTER_IRQ_CONTROL; |
237 | vgpu_vreg(vgpu, reg) |= ier; | 223 | vgpu_vreg(vgpu, reg) |= ier; |
238 | 224 | ||
239 | /* figure out newly enabled/disable bits */ | ||
240 | changed = virtual_ier ^ ier; | ||
241 | enabled = (virtual_ier & changed) ^ changed; | ||
242 | disabled = enabled ^ changed; | ||
243 | |||
244 | gvt_dbg_irq("changed %x, enabled %x, disabled %x\n", | ||
245 | changed, enabled, disabled); | ||
246 | |||
247 | ops->check_pending_irq(vgpu); | 225 | ops->check_pending_irq(vgpu); |
248 | gvt_dbg_irq("new vreg %x\n", vgpu_vreg(vgpu, reg)); | 226 | |
249 | return 0; | 227 | return 0; |
250 | } | 228 | } |
251 | 229 | ||
@@ -268,21 +246,11 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, | |||
268 | struct intel_gvt *gvt = vgpu->gvt; | 246 | struct intel_gvt *gvt = vgpu->gvt; |
269 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; | 247 | struct intel_gvt_irq_ops *ops = gvt->irq.ops; |
270 | struct intel_gvt_irq_info *info; | 248 | struct intel_gvt_irq_info *info; |
271 | u32 changed, enabled, disabled; | ||
272 | u32 ier = *(u32 *)p_data; | 249 | u32 ier = *(u32 *)p_data; |
273 | 250 | ||
274 | gvt_dbg_irq("write IER %x with val %x\n", | 251 | gvt_dbg_irq("write IER %x, new %08x, old %08x, changed %08x\n", |
275 | reg, ier); | 252 | reg, ier, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ ier); |
276 | |||
277 | gvt_dbg_irq("old vIER %x\n", vgpu_vreg(vgpu, reg)); | ||
278 | 253 | ||
279 | /* figure out newly enabled/disable bits */ | ||
280 | changed = vgpu_vreg(vgpu, reg) ^ ier; | ||
281 | enabled = (vgpu_vreg(vgpu, reg) & changed) ^ changed; | ||
282 | disabled = enabled ^ changed; | ||
283 | |||
284 | gvt_dbg_irq("changed %x, enabled %x, disabled %x\n", | ||
285 | changed, enabled, disabled); | ||
286 | vgpu_vreg(vgpu, reg) = ier; | 254 | vgpu_vreg(vgpu, reg) = ier; |
287 | 255 | ||
288 | info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); | 256 | info = regbase_to_irq_info(gvt, ier_to_regbase(reg)); |
@@ -293,7 +261,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, | |||
293 | update_upstream_irq(vgpu, info); | 261 | update_upstream_irq(vgpu, info); |
294 | 262 | ||
295 | ops->check_pending_irq(vgpu); | 263 | ops->check_pending_irq(vgpu); |
296 | gvt_dbg_irq("new vIER %x\n", vgpu_vreg(vgpu, reg)); | 264 | |
297 | return 0; | 265 | return 0; |
298 | } | 266 | } |
299 | 267 | ||
@@ -317,7 +285,8 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg, | |||
317 | iir_to_regbase(reg)); | 285 | iir_to_regbase(reg)); |
318 | u32 iir = *(u32 *)p_data; | 286 | u32 iir = *(u32 *)p_data; |
319 | 287 | ||
320 | gvt_dbg_irq("write IIR %x with val %x\n", reg, iir); | 288 | gvt_dbg_irq("write IIR %x, new %08x, old %08x, changed %08x\n", |
289 | reg, iir, vgpu_vreg(vgpu, reg), vgpu_vreg(vgpu, reg) ^ iir); | ||
321 | 290 | ||
322 | if (WARN_ON(!info)) | 291 | if (WARN_ON(!info)) |
323 | return -EINVAL; | 292 | return -EINVAL; |
@@ -619,6 +588,10 @@ static void gen8_init_irq( | |||
619 | SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); | 588 | SET_BIT_INFO(irq, 3, PRIMARY_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); |
620 | SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); | 589 | SET_BIT_INFO(irq, 3, PRIMARY_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); |
621 | SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); | 590 | SET_BIT_INFO(irq, 3, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); |
591 | |||
592 | SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); | ||
593 | SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); | ||
594 | SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); | ||
622 | } | 595 | } |
623 | 596 | ||
624 | /* GEN8 interrupt PCU events */ | 597 | /* GEN8 interrupt PCU events */ |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 080ca77abd22..10c3a4b95a92 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -77,7 +77,7 @@ struct kvmgt_guest_info { | |||
77 | struct gvt_dma { | 77 | struct gvt_dma { |
78 | struct rb_node node; | 78 | struct rb_node node; |
79 | gfn_t gfn; | 79 | gfn_t gfn; |
80 | kvm_pfn_t pfn; | 80 | unsigned long iova; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | static inline bool handle_valid(unsigned long handle) | 83 | static inline bool handle_valid(unsigned long handle) |
@@ -89,6 +89,35 @@ static int kvmgt_guest_init(struct mdev_device *mdev); | |||
89 | static void intel_vgpu_release_work(struct work_struct *work); | 89 | static void intel_vgpu_release_work(struct work_struct *work); |
90 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); | 90 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info); |
91 | 91 | ||
92 | static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn, | ||
93 | unsigned long *iova) | ||
94 | { | ||
95 | struct page *page; | ||
96 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
97 | dma_addr_t daddr; | ||
98 | |||
99 | page = pfn_to_page(pfn); | ||
100 | if (is_error_page(page)) | ||
101 | return -EFAULT; | ||
102 | |||
103 | daddr = dma_map_page(dev, page, 0, PAGE_SIZE, | ||
104 | PCI_DMA_BIDIRECTIONAL); | ||
105 | if (dma_mapping_error(dev, daddr)) | ||
106 | return -ENOMEM; | ||
107 | |||
108 | *iova = (unsigned long)(daddr >> PAGE_SHIFT); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static void gvt_dma_unmap_iova(struct intel_vgpu *vgpu, unsigned long iova) | ||
113 | { | ||
114 | struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; | ||
115 | dma_addr_t daddr; | ||
116 | |||
117 | daddr = (dma_addr_t)(iova << PAGE_SHIFT); | ||
118 | dma_unmap_page(dev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
119 | } | ||
120 | |||
92 | static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) | 121 | static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) |
93 | { | 122 | { |
94 | struct rb_node *node = vgpu->vdev.cache.rb_node; | 123 | struct rb_node *node = vgpu->vdev.cache.rb_node; |
@@ -111,21 +140,22 @@ out: | |||
111 | return ret; | 140 | return ret; |
112 | } | 141 | } |
113 | 142 | ||
114 | static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) | 143 | static unsigned long gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) |
115 | { | 144 | { |
116 | struct gvt_dma *entry; | 145 | struct gvt_dma *entry; |
117 | kvm_pfn_t pfn; | 146 | unsigned long iova; |
118 | 147 | ||
119 | mutex_lock(&vgpu->vdev.cache_lock); | 148 | mutex_lock(&vgpu->vdev.cache_lock); |
120 | 149 | ||
121 | entry = __gvt_cache_find(vgpu, gfn); | 150 | entry = __gvt_cache_find(vgpu, gfn); |
122 | pfn = (entry == NULL) ? 0 : entry->pfn; | 151 | iova = (entry == NULL) ? INTEL_GVT_INVALID_ADDR : entry->iova; |
123 | 152 | ||
124 | mutex_unlock(&vgpu->vdev.cache_lock); | 153 | mutex_unlock(&vgpu->vdev.cache_lock); |
125 | return pfn; | 154 | return iova; |
126 | } | 155 | } |
127 | 156 | ||
128 | static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) | 157 | static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, |
158 | unsigned long iova) | ||
129 | { | 159 | { |
130 | struct gvt_dma *new, *itr; | 160 | struct gvt_dma *new, *itr; |
131 | struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; | 161 | struct rb_node **link = &vgpu->vdev.cache.rb_node, *parent = NULL; |
@@ -135,7 +165,7 @@ static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) | |||
135 | return; | 165 | return; |
136 | 166 | ||
137 | new->gfn = gfn; | 167 | new->gfn = gfn; |
138 | new->pfn = pfn; | 168 | new->iova = iova; |
139 | 169 | ||
140 | mutex_lock(&vgpu->vdev.cache_lock); | 170 | mutex_lock(&vgpu->vdev.cache_lock); |
141 | while (*link) { | 171 | while (*link) { |
@@ -182,6 +212,7 @@ static void gvt_cache_remove(struct intel_vgpu *vgpu, gfn_t gfn) | |||
182 | } | 212 | } |
183 | 213 | ||
184 | g1 = gfn; | 214 | g1 = gfn; |
215 | gvt_dma_unmap_iova(vgpu, this->iova); | ||
185 | rc = vfio_unpin_pages(dev, &g1, 1); | 216 | rc = vfio_unpin_pages(dev, &g1, 1); |
186 | WARN_ON(rc != 1); | 217 | WARN_ON(rc != 1); |
187 | __gvt_cache_remove_entry(vgpu, this); | 218 | __gvt_cache_remove_entry(vgpu, this); |
@@ -204,6 +235,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) | |||
204 | mutex_lock(&vgpu->vdev.cache_lock); | 235 | mutex_lock(&vgpu->vdev.cache_lock); |
205 | while ((node = rb_first(&vgpu->vdev.cache))) { | 236 | while ((node = rb_first(&vgpu->vdev.cache))) { |
206 | dma = rb_entry(node, struct gvt_dma, node); | 237 | dma = rb_entry(node, struct gvt_dma, node); |
238 | gvt_dma_unmap_iova(vgpu, dma->iova); | ||
207 | gfn = dma->gfn; | 239 | gfn = dma->gfn; |
208 | 240 | ||
209 | vfio_unpin_pages(dev, &gfn, 1); | 241 | vfio_unpin_pages(dev, &gfn, 1); |
@@ -965,11 +997,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, | |||
965 | sparse->areas[0].offset = | 997 | sparse->areas[0].offset = |
966 | PAGE_ALIGN(vgpu_aperture_offset(vgpu)); | 998 | PAGE_ALIGN(vgpu_aperture_offset(vgpu)); |
967 | sparse->areas[0].size = vgpu_aperture_sz(vgpu); | 999 | sparse->areas[0].size = vgpu_aperture_sz(vgpu); |
968 | if (!caps.buf) { | ||
969 | kfree(caps.buf); | ||
970 | caps.buf = NULL; | ||
971 | caps.size = 0; | ||
972 | } | ||
973 | break; | 1000 | break; |
974 | 1001 | ||
975 | case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: | 1002 | case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX: |
@@ -1353,7 +1380,7 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) | |||
1353 | 1380 | ||
1354 | static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | 1381 | static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) |
1355 | { | 1382 | { |
1356 | unsigned long pfn; | 1383 | unsigned long iova, pfn; |
1357 | struct kvmgt_guest_info *info; | 1384 | struct kvmgt_guest_info *info; |
1358 | struct device *dev; | 1385 | struct device *dev; |
1359 | int rc; | 1386 | int rc; |
@@ -1362,9 +1389,9 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |||
1362 | return INTEL_GVT_INVALID_ADDR; | 1389 | return INTEL_GVT_INVALID_ADDR; |
1363 | 1390 | ||
1364 | info = (struct kvmgt_guest_info *)handle; | 1391 | info = (struct kvmgt_guest_info *)handle; |
1365 | pfn = gvt_cache_find(info->vgpu, gfn); | 1392 | iova = gvt_cache_find(info->vgpu, gfn); |
1366 | if (pfn != 0) | 1393 | if (iova != INTEL_GVT_INVALID_ADDR) |
1367 | return pfn; | 1394 | return iova; |
1368 | 1395 | ||
1369 | pfn = INTEL_GVT_INVALID_ADDR; | 1396 | pfn = INTEL_GVT_INVALID_ADDR; |
1370 | dev = mdev_dev(info->vgpu->vdev.mdev); | 1397 | dev = mdev_dev(info->vgpu->vdev.mdev); |
@@ -1373,9 +1400,16 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |||
1373 | gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); | 1400 | gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); |
1374 | return INTEL_GVT_INVALID_ADDR; | 1401 | return INTEL_GVT_INVALID_ADDR; |
1375 | } | 1402 | } |
1403 | /* transfer to host iova for GFX to use DMA */ | ||
1404 | rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); | ||
1405 | if (rc) { | ||
1406 | gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); | ||
1407 | vfio_unpin_pages(dev, &gfn, 1); | ||
1408 | return INTEL_GVT_INVALID_ADDR; | ||
1409 | } | ||
1376 | 1410 | ||
1377 | gvt_cache_add(info->vgpu, gfn, pfn); | 1411 | gvt_cache_add(info->vgpu, gfn, iova); |
1378 | return pfn; | 1412 | return iova; |
1379 | } | 1413 | } |
1380 | 1414 | ||
1381 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, | 1415 | static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, |
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 44136b1f3aab..2b3a642284b6 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c | |||
@@ -236,12 +236,18 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) | |||
236 | } | 236 | } |
237 | } | 237 | } |
238 | 238 | ||
239 | #define CTX_CONTEXT_CONTROL_VAL 0x03 | ||
240 | |||
239 | void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) | 241 | void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) |
240 | { | 242 | { |
241 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 243 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
242 | struct render_mmio *mmio; | 244 | struct render_mmio *mmio; |
243 | u32 v; | 245 | u32 v; |
244 | int i, array_size; | 246 | int i, array_size; |
247 | u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state; | ||
248 | u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL]; | ||
249 | u32 inhibit_mask = | ||
250 | _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); | ||
245 | 251 | ||
246 | if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { | 252 | if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { |
247 | mmio = gen9_render_mmio_list; | 253 | mmio = gen9_render_mmio_list; |
@@ -257,6 +263,17 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) | |||
257 | continue; | 263 | continue; |
258 | 264 | ||
259 | mmio->value = I915_READ(mmio->reg); | 265 | mmio->value = I915_READ(mmio->reg); |
266 | |||
267 | /* | ||
268 | * if it is an inhibit context, load in_context mmio | ||
269 | * into HW by mmio write. If it is not, skip this mmio | ||
270 | * write. | ||
271 | */ | ||
272 | if (mmio->in_context && | ||
273 | ((ctx_ctrl & inhibit_mask) != inhibit_mask) && | ||
274 | i915.enable_execlists) | ||
275 | continue; | ||
276 | |||
260 | if (mmio->mask) | 277 | if (mmio->mask) |
261 | v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16); | 278 | v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16); |
262 | else | 279 | else |
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 678b0be85376..06c9584ac5f0 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
@@ -125,7 +125,6 @@ static void tbs_sched_func(struct work_struct *work) | |||
125 | vgpu_data = scheduler->current_vgpu->sched_data; | 125 | vgpu_data = scheduler->current_vgpu->sched_data; |
126 | head = &vgpu_data->list; | 126 | head = &vgpu_data->list; |
127 | } else { | 127 | } else { |
128 | gvt_dbg_sched("no current vgpu search from q head\n"); | ||
129 | head = &sched_data->runq_head; | 128 | head = &sched_data->runq_head; |
130 | } | 129 | } |
131 | 130 | ||
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 7ea68a75dc46..d6b6d0efdd1a 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
@@ -169,7 +169,8 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
169 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", | 169 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", |
170 | ring_id, workload); | 170 | ring_id, workload); |
171 | 171 | ||
172 | shadow_ctx->desc_template = workload->ctx_desc.addressing_mode << | 172 | shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); |
173 | shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode << | ||
173 | GEN8_CTX_ADDRESSING_MODE_SHIFT; | 174 | GEN8_CTX_ADDRESSING_MODE_SHIFT; |
174 | 175 | ||
175 | mutex_lock(&dev_priv->drm.struct_mutex); | 176 | mutex_lock(&dev_priv->drm.struct_mutex); |
@@ -456,7 +457,7 @@ static int workload_thread(void *priv) | |||
456 | } | 457 | } |
457 | 458 | ||
458 | complete: | 459 | complete: |
459 | gvt_dbg_sched("will complete workload %p\n, status: %d\n", | 460 | gvt_dbg_sched("will complete workload %p, status: %d\n", |
460 | workload, workload->status); | 461 | workload, workload->status); |
461 | 462 | ||
462 | if (workload->req) | 463 | if (workload->req) |
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 67d471cee79e..95a97aa0051e 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c | |||
@@ -385,6 +385,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, | |||
385 | intel_vgpu_reset_resource(vgpu); | 385 | intel_vgpu_reset_resource(vgpu); |
386 | intel_vgpu_reset_mmio(vgpu); | 386 | intel_vgpu_reset_mmio(vgpu); |
387 | populate_pvinfo_page(vgpu); | 387 | populate_pvinfo_page(vgpu); |
388 | intel_vgpu_reset_display(vgpu); | ||
388 | 389 | ||
389 | if (dmlr) | 390 | if (dmlr) |
390 | intel_vgpu_reset_cfg_space(vgpu); | 391 | intel_vgpu_reset_cfg_space(vgpu); |