diff options
author | Dave Airlie <airlied@redhat.com> | 2018-09-13 19:33:05 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-09-13 19:33:16 -0400 |
commit | db7f06d49037859febb14c396bdc57ef250bedc1 (patch) | |
tree | e8b03cca39989f65ea13a4220fe003ad775c7862 | |
parent | 2887e5ce15ddaa2f9a19e66f7462bbf0fe6867e0 (diff) | |
parent | 17dc7af70e89db773a7213f0b4270c69236a63ab (diff) |
Merge tag 'drm-intel-fixes-2018-09-11' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
This contains a regression fix for video playbacks on gen 2 hardware,
a IPS timeout error suppression on Broadwell and GVT bucked with
"Most critical one is to fix KVM's mm reference when we access guest memory,
issue was raised by Linus [1], and another one with virtual opregion fix."
[1] - https://lists.freedesktop.org/archives/intel-gvt-dev/2018-August/004130.html
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180911223229.GA30328@intel.com
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/opregion.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 228 |
4 files changed, 98 insertions, 168 deletions
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index a45f46d8537f..c7afee37b2b8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/device.h> | 32 | #include <linux/device.h> |
33 | #include <linux/mm.h> | 33 | #include <linux/mm.h> |
34 | #include <linux/mmu_context.h> | 34 | #include <linux/mmu_context.h> |
35 | #include <linux/sched/mm.h> | ||
35 | #include <linux/types.h> | 36 | #include <linux/types.h> |
36 | #include <linux/list.h> | 37 | #include <linux/list.h> |
37 | #include <linux/rbtree.h> | 38 | #include <linux/rbtree.h> |
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, | |||
1792 | info = (struct kvmgt_guest_info *)handle; | 1793 | info = (struct kvmgt_guest_info *)handle; |
1793 | kvm = info->kvm; | 1794 | kvm = info->kvm; |
1794 | 1795 | ||
1795 | if (kthread) | 1796 | if (kthread) { |
1797 | if (!mmget_not_zero(kvm->mm)) | ||
1798 | return -EFAULT; | ||
1796 | use_mm(kvm->mm); | 1799 | use_mm(kvm->mm); |
1800 | } | ||
1797 | 1801 | ||
1798 | idx = srcu_read_lock(&kvm->srcu); | 1802 | idx = srcu_read_lock(&kvm->srcu); |
1799 | ret = write ? kvm_write_guest(kvm, gpa, buf, len) : | 1803 | ret = write ? kvm_write_guest(kvm, gpa, buf, len) : |
1800 | kvm_read_guest(kvm, gpa, buf, len); | 1804 | kvm_read_guest(kvm, gpa, buf, len); |
1801 | srcu_read_unlock(&kvm->srcu, idx); | 1805 | srcu_read_unlock(&kvm->srcu, idx); |
1802 | 1806 | ||
1803 | if (kthread) | 1807 | if (kthread) { |
1804 | unuse_mm(kvm->mm); | 1808 | unuse_mm(kvm->mm); |
1809 | mmput(kvm->mm); | ||
1810 | } | ||
1805 | 1811 | ||
1806 | return ret; | 1812 | return ret; |
1807 | } | 1813 | } |
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index fa75a2eead90..b0d3a43ccd03 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c | |||
@@ -42,8 +42,6 @@ | |||
42 | #define DEVICE_TYPE_EFP3 0x20 | 42 | #define DEVICE_TYPE_EFP3 0x20 |
43 | #define DEVICE_TYPE_EFP4 0x10 | 43 | #define DEVICE_TYPE_EFP4 0x10 |
44 | 44 | ||
45 | #define DEV_SIZE 38 | ||
46 | |||
47 | struct opregion_header { | 45 | struct opregion_header { |
48 | u8 signature[16]; | 46 | u8 signature[16]; |
49 | u32 size; | 47 | u32 size; |
@@ -63,6 +61,10 @@ struct bdb_data_header { | |||
63 | u16 size; /* data size */ | 61 | u16 size; /* data size */ |
64 | } __packed; | 62 | } __packed; |
65 | 63 | ||
64 | /* For supporting windows guest with opregion, here hardcode the emulated | ||
65 | * bdb header version as '186', and the corresponding child_device_config | ||
66 | * length should be '33' but not '38'. | ||
67 | */ | ||
66 | struct efp_child_device_config { | 68 | struct efp_child_device_config { |
67 | u16 handle; | 69 | u16 handle; |
68 | u16 device_type; | 70 | u16 device_type; |
@@ -109,12 +111,6 @@ struct efp_child_device_config { | |||
109 | u8 mipi_bridge_type; /* 171 */ | 111 | u8 mipi_bridge_type; /* 171 */ |
110 | u16 device_class_ext; | 112 | u16 device_class_ext; |
111 | u8 dvo_function; | 113 | u8 dvo_function; |
112 | u8 dp_usb_type_c:1; /* 195 */ | ||
113 | u8 skip6:7; | ||
114 | u8 dp_usb_type_c_2x_gpio_index; /* 195 */ | ||
115 | u16 dp_usb_type_c_2x_gpio_pin; /* 195 */ | ||
116 | u8 iboost_dp:4; /* 196 */ | ||
117 | u8 iboost_hdmi:4; /* 196 */ | ||
118 | } __packed; | 114 | } __packed; |
119 | 115 | ||
120 | struct vbt { | 116 | struct vbt { |
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v) | |||
155 | v->header.bdb_offset = offsetof(struct vbt, bdb_header); | 151 | v->header.bdb_offset = offsetof(struct vbt, bdb_header); |
156 | 152 | ||
157 | strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); | 153 | strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); |
158 | v->bdb_header.version = 186; /* child_dev_size = 38 */ | 154 | v->bdb_header.version = 186; /* child_dev_size = 33 */ |
159 | v->bdb_header.header_size = sizeof(v->bdb_header); | 155 | v->bdb_header.header_size = sizeof(v->bdb_header); |
160 | 156 | ||
161 | v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) | 157 | v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) |
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v) | |||
169 | 165 | ||
170 | /* child device */ | 166 | /* child device */ |
171 | num_child = 4; /* each port has one child */ | 167 | num_child = 4; /* each port has one child */ |
168 | v->general_definitions.child_dev_size = | ||
169 | sizeof(struct efp_child_device_config); | ||
172 | v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; | 170 | v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS; |
173 | /* size will include child devices */ | 171 | /* size will include child devices */ |
174 | v->general_definitions_header.size = | 172 | v->general_definitions_header.size = |
175 | sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE; | 173 | sizeof(struct bdb_general_definitions) + |
176 | v->general_definitions.child_dev_size = DEV_SIZE; | 174 | num_child * v->general_definitions.child_dev_size; |
177 | 175 | ||
178 | /* portA */ | 176 | /* portA */ |
179 | v->child0.handle = DEVICE_TYPE_EFP1; | 177 | v->child0.handle = DEVICE_TYPE_EFP1; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4a3c8ee9a973..d2951096bca0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -5079,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) | |||
5079 | mutex_lock(&dev_priv->pcu_lock); | 5079 | mutex_lock(&dev_priv->pcu_lock); |
5080 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); | 5080 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); |
5081 | mutex_unlock(&dev_priv->pcu_lock); | 5081 | mutex_unlock(&dev_priv->pcu_lock); |
5082 | /* wait for pcode to finish disabling IPS, which may take up to 42ms */ | 5082 | /* |
5083 | * Wait for PCODE to finish disabling IPS. The BSpec specified | ||
5084 | * 42ms timeout value leads to occasional timeouts so use 100ms | ||
5085 | * instead. | ||
5086 | */ | ||
5083 | if (intel_wait_for_register(dev_priv, | 5087 | if (intel_wait_for_register(dev_priv, |
5084 | IPS_CTL, IPS_ENABLE, 0, | 5088 | IPS_CTL, IPS_ENABLE, 0, |
5085 | 42)) | 5089 | 100)) |
5086 | DRM_ERROR("Timed out waiting for IPS disable\n"); | 5090 | DRM_ERROR("Timed out waiting for IPS disable\n"); |
5087 | } else { | 5091 | } else { |
5088 | I915_WRITE(IPS_CTL, 0); | 5092 | I915_WRITE(IPS_CTL, 0); |
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index c2f10d899329..443dfaefd7a6 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c | |||
@@ -181,8 +181,9 @@ struct intel_overlay { | |||
181 | u32 brightness, contrast, saturation; | 181 | u32 brightness, contrast, saturation; |
182 | u32 old_xscale, old_yscale; | 182 | u32 old_xscale, old_yscale; |
183 | /* register access */ | 183 | /* register access */ |
184 | u32 flip_addr; | ||
185 | struct drm_i915_gem_object *reg_bo; | 184 | struct drm_i915_gem_object *reg_bo; |
185 | struct overlay_registers __iomem *regs; | ||
186 | u32 flip_addr; | ||
186 | /* flip handling */ | 187 | /* flip handling */ |
187 | struct i915_gem_active last_flip; | 188 | struct i915_gem_active last_flip; |
188 | }; | 189 | }; |
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv, | |||
210 | PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); | 211 | PCI_DEVFN(0, 0), I830_CLOCK_GATE, val); |
211 | } | 212 | } |
212 | 213 | ||
213 | static struct overlay_registers __iomem * | ||
214 | intel_overlay_map_regs(struct intel_overlay *overlay) | ||
215 | { | ||
216 | struct drm_i915_private *dev_priv = overlay->i915; | ||
217 | struct overlay_registers __iomem *regs; | ||
218 | |||
219 | if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) | ||
220 | regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; | ||
221 | else | ||
222 | regs = io_mapping_map_wc(&dev_priv->ggtt.iomap, | ||
223 | overlay->flip_addr, | ||
224 | PAGE_SIZE); | ||
225 | |||
226 | return regs; | ||
227 | } | ||
228 | |||
229 | static void intel_overlay_unmap_regs(struct intel_overlay *overlay, | ||
230 | struct overlay_registers __iomem *regs) | ||
231 | { | ||
232 | if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) | ||
233 | io_mapping_unmap(regs); | ||
234 | } | ||
235 | |||
236 | static void intel_overlay_submit_request(struct intel_overlay *overlay, | 214 | static void intel_overlay_submit_request(struct intel_overlay *overlay, |
237 | struct i915_request *rq, | 215 | struct i915_request *rq, |
238 | i915_gem_retire_fn retire) | 216 | i915_gem_retire_fn retire) |
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
784 | struct drm_i915_gem_object *new_bo, | 762 | struct drm_i915_gem_object *new_bo, |
785 | struct put_image_params *params) | 763 | struct put_image_params *params) |
786 | { | 764 | { |
787 | int ret, tmp_width; | 765 | struct overlay_registers __iomem *regs = overlay->regs; |
788 | struct overlay_registers __iomem *regs; | ||
789 | bool scale_changed = false; | ||
790 | struct drm_i915_private *dev_priv = overlay->i915; | 766 | struct drm_i915_private *dev_priv = overlay->i915; |
791 | u32 swidth, swidthsw, sheight, ostride; | 767 | u32 swidth, swidthsw, sheight, ostride; |
792 | enum pipe pipe = overlay->crtc->pipe; | 768 | enum pipe pipe = overlay->crtc->pipe; |
769 | bool scale_changed = false; | ||
793 | struct i915_vma *vma; | 770 | struct i915_vma *vma; |
771 | int ret, tmp_width; | ||
794 | 772 | ||
795 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 773 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
796 | WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); | 774 | WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); |
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
815 | 793 | ||
816 | if (!overlay->active) { | 794 | if (!overlay->active) { |
817 | u32 oconfig; | 795 | u32 oconfig; |
818 | regs = intel_overlay_map_regs(overlay); | 796 | |
819 | if (!regs) { | ||
820 | ret = -ENOMEM; | ||
821 | goto out_unpin; | ||
822 | } | ||
823 | oconfig = OCONF_CC_OUT_8BIT; | 797 | oconfig = OCONF_CC_OUT_8BIT; |
824 | if (IS_GEN4(dev_priv)) | 798 | if (IS_GEN4(dev_priv)) |
825 | oconfig |= OCONF_CSC_MODE_BT709; | 799 | oconfig |= OCONF_CSC_MODE_BT709; |
826 | oconfig |= pipe == 0 ? | 800 | oconfig |= pipe == 0 ? |
827 | OCONF_PIPE_A : OCONF_PIPE_B; | 801 | OCONF_PIPE_A : OCONF_PIPE_B; |
828 | iowrite32(oconfig, ®s->OCONFIG); | 802 | iowrite32(oconfig, ®s->OCONFIG); |
829 | intel_overlay_unmap_regs(overlay, regs); | ||
830 | 803 | ||
831 | ret = intel_overlay_on(overlay); | 804 | ret = intel_overlay_on(overlay); |
832 | if (ret != 0) | 805 | if (ret != 0) |
833 | goto out_unpin; | 806 | goto out_unpin; |
834 | } | 807 | } |
835 | 808 | ||
836 | regs = intel_overlay_map_regs(overlay); | ||
837 | if (!regs) { | ||
838 | ret = -ENOMEM; | ||
839 | goto out_unpin; | ||
840 | } | ||
841 | |||
842 | iowrite32((params->dst_y << 16) | params->dst_x, ®s->DWINPOS); | 809 | iowrite32((params->dst_y << 16) | params->dst_x, ®s->DWINPOS); |
843 | iowrite32((params->dst_h << 16) | params->dst_w, ®s->DWINSZ); | 810 | iowrite32((params->dst_h << 16) | params->dst_w, ®s->DWINSZ); |
844 | 811 | ||
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, | |||
882 | 849 | ||
883 | iowrite32(overlay_cmd_reg(params), ®s->OCMD); | 850 | iowrite32(overlay_cmd_reg(params), ®s->OCMD); |
884 | 851 | ||
885 | intel_overlay_unmap_regs(overlay, regs); | ||
886 | |||
887 | ret = intel_overlay_continue(overlay, vma, scale_changed); | 852 | ret = intel_overlay_continue(overlay, vma, scale_changed); |
888 | if (ret) | 853 | if (ret) |
889 | goto out_unpin; | 854 | goto out_unpin; |
@@ -901,7 +866,6 @@ out_pin_section: | |||
901 | int intel_overlay_switch_off(struct intel_overlay *overlay) | 866 | int intel_overlay_switch_off(struct intel_overlay *overlay) |
902 | { | 867 | { |
903 | struct drm_i915_private *dev_priv = overlay->i915; | 868 | struct drm_i915_private *dev_priv = overlay->i915; |
904 | struct overlay_registers __iomem *regs; | ||
905 | int ret; | 869 | int ret; |
906 | 870 | ||
907 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | 871 | lockdep_assert_held(&dev_priv->drm.struct_mutex); |
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) | |||
918 | if (ret != 0) | 882 | if (ret != 0) |
919 | return ret; | 883 | return ret; |
920 | 884 | ||
921 | regs = intel_overlay_map_regs(overlay); | 885 | iowrite32(0, &overlay->regs->OCMD); |
922 | iowrite32(0, ®s->OCMD); | ||
923 | intel_overlay_unmap_regs(overlay, regs); | ||
924 | 886 | ||
925 | return intel_overlay_off(overlay); | 887 | return intel_overlay_off(overlay); |
926 | } | 888 | } |
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, | |||
1305 | struct drm_intel_overlay_attrs *attrs = data; | 1267 | struct drm_intel_overlay_attrs *attrs = data; |
1306 | struct drm_i915_private *dev_priv = to_i915(dev); | 1268 | struct drm_i915_private *dev_priv = to_i915(dev); |
1307 | struct intel_overlay *overlay; | 1269 | struct intel_overlay *overlay; |
1308 | struct overlay_registers __iomem *regs; | ||
1309 | int ret; | 1270 | int ret; |
1310 | 1271 | ||
1311 | overlay = dev_priv->overlay; | 1272 | overlay = dev_priv->overlay; |
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, | |||
1345 | overlay->contrast = attrs->contrast; | 1306 | overlay->contrast = attrs->contrast; |
1346 | overlay->saturation = attrs->saturation; | 1307 | overlay->saturation = attrs->saturation; |
1347 | 1308 | ||
1348 | regs = intel_overlay_map_regs(overlay); | 1309 | update_reg_attrs(overlay, overlay->regs); |
1349 | if (!regs) { | ||
1350 | ret = -ENOMEM; | ||
1351 | goto out_unlock; | ||
1352 | } | ||
1353 | |||
1354 | update_reg_attrs(overlay, regs); | ||
1355 | |||
1356 | intel_overlay_unmap_regs(overlay, regs); | ||
1357 | 1310 | ||
1358 | if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { | 1311 | if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { |
1359 | if (IS_GEN2(dev_priv)) | 1312 | if (IS_GEN2(dev_priv)) |
@@ -1386,12 +1339,47 @@ out_unlock: | |||
1386 | return ret; | 1339 | return ret; |
1387 | } | 1340 | } |
1388 | 1341 | ||
1342 | static int get_registers(struct intel_overlay *overlay, bool use_phys) | ||
1343 | { | ||
1344 | struct drm_i915_gem_object *obj; | ||
1345 | struct i915_vma *vma; | ||
1346 | int err; | ||
1347 | |||
1348 | obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE); | ||
1349 | if (obj == NULL) | ||
1350 | obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE); | ||
1351 | if (IS_ERR(obj)) | ||
1352 | return PTR_ERR(obj); | ||
1353 | |||
1354 | vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); | ||
1355 | if (IS_ERR(vma)) { | ||
1356 | err = PTR_ERR(vma); | ||
1357 | goto err_put_bo; | ||
1358 | } | ||
1359 | |||
1360 | if (use_phys) | ||
1361 | overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl); | ||
1362 | else | ||
1363 | overlay->flip_addr = i915_ggtt_offset(vma); | ||
1364 | overlay->regs = i915_vma_pin_iomap(vma); | ||
1365 | i915_vma_unpin(vma); | ||
1366 | |||
1367 | if (IS_ERR(overlay->regs)) { | ||
1368 | err = PTR_ERR(overlay->regs); | ||
1369 | goto err_put_bo; | ||
1370 | } | ||
1371 | |||
1372 | overlay->reg_bo = obj; | ||
1373 | return 0; | ||
1374 | |||
1375 | err_put_bo: | ||
1376 | i915_gem_object_put(obj); | ||
1377 | return err; | ||
1378 | } | ||
1379 | |||
1389 | void intel_setup_overlay(struct drm_i915_private *dev_priv) | 1380 | void intel_setup_overlay(struct drm_i915_private *dev_priv) |
1390 | { | 1381 | { |
1391 | struct intel_overlay *overlay; | 1382 | struct intel_overlay *overlay; |
1392 | struct drm_i915_gem_object *reg_bo; | ||
1393 | struct overlay_registers __iomem *regs; | ||
1394 | struct i915_vma *vma = NULL; | ||
1395 | int ret; | 1383 | int ret; |
1396 | 1384 | ||
1397 | if (!HAS_OVERLAY(dev_priv)) | 1385 | if (!HAS_OVERLAY(dev_priv)) |
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) | |||
1401 | if (!overlay) | 1389 | if (!overlay) |
1402 | return; | 1390 | return; |
1403 | 1391 | ||
1404 | mutex_lock(&dev_priv->drm.struct_mutex); | ||
1405 | if (WARN_ON(dev_priv->overlay)) | ||
1406 | goto out_free; | ||
1407 | |||
1408 | overlay->i915 = dev_priv; | 1392 | overlay->i915 = dev_priv; |
1409 | 1393 | ||
1410 | reg_bo = NULL; | ||
1411 | if (!OVERLAY_NEEDS_PHYSICAL(dev_priv)) | ||
1412 | reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE); | ||
1413 | if (reg_bo == NULL) | ||
1414 | reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE); | ||
1415 | if (IS_ERR(reg_bo)) | ||
1416 | goto out_free; | ||
1417 | overlay->reg_bo = reg_bo; | ||
1418 | |||
1419 | if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) { | ||
1420 | ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); | ||
1421 | if (ret) { | ||
1422 | DRM_ERROR("failed to attach phys overlay regs\n"); | ||
1423 | goto out_free_bo; | ||
1424 | } | ||
1425 | overlay->flip_addr = reg_bo->phys_handle->busaddr; | ||
1426 | } else { | ||
1427 | vma = i915_gem_object_ggtt_pin(reg_bo, NULL, | ||
1428 | 0, PAGE_SIZE, PIN_MAPPABLE); | ||
1429 | if (IS_ERR(vma)) { | ||
1430 | DRM_ERROR("failed to pin overlay register bo\n"); | ||
1431 | ret = PTR_ERR(vma); | ||
1432 | goto out_free_bo; | ||
1433 | } | ||
1434 | overlay->flip_addr = i915_ggtt_offset(vma); | ||
1435 | |||
1436 | ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); | ||
1437 | if (ret) { | ||
1438 | DRM_ERROR("failed to move overlay register bo into the GTT\n"); | ||
1439 | goto out_unpin_bo; | ||
1440 | } | ||
1441 | } | ||
1442 | |||
1443 | /* init all values */ | ||
1444 | overlay->color_key = 0x0101fe; | 1394 | overlay->color_key = 0x0101fe; |
1445 | overlay->color_key_enabled = true; | 1395 | overlay->color_key_enabled = true; |
1446 | overlay->brightness = -19; | 1396 | overlay->brightness = -19; |
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv) | |||
1449 | 1399 | ||
1450 | init_request_active(&overlay->last_flip, NULL); | 1400 | init_request_active(&overlay->last_flip, NULL); |
1451 | 1401 | ||
1452 | regs = intel_overlay_map_regs(overlay); | 1402 | mutex_lock(&dev_priv->drm.struct_mutex); |
1453 | if (!regs) | 1403 | |
1454 | goto out_unpin_bo; | 1404 | ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv)); |
1405 | if (ret) | ||
1406 | goto out_free; | ||
1407 | |||
1408 | ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true); | ||
1409 | if (ret) | ||
1410 | goto out_reg_bo; | ||
1455 | 1411 | ||
1456 | memset_io(regs, 0, sizeof(struct overlay_registers)); | 1412 | mutex_unlock(&dev_priv->drm.struct_mutex); |
1457 | update_polyphase_filter(regs); | ||
1458 | update_reg_attrs(overlay, regs); | ||
1459 | 1413 | ||
1460 | intel_overlay_unmap_regs(overlay, regs); | 1414 | memset_io(overlay->regs, 0, sizeof(struct overlay_registers)); |
1415 | update_polyphase_filter(overlay->regs); | ||
1416 | update_reg_attrs(overlay, overlay->regs); | ||
1461 | 1417 | ||
1462 | dev_priv->overlay = overlay; | 1418 | dev_priv->overlay = overlay; |
1463 | mutex_unlock(&dev_priv->drm.struct_mutex); | 1419 | DRM_INFO("Initialized overlay support.\n"); |
1464 | DRM_INFO("initialized overlay support\n"); | ||
1465 | return; | 1420 | return; |
1466 | 1421 | ||
1467 | out_unpin_bo: | 1422 | out_reg_bo: |
1468 | if (vma) | 1423 | i915_gem_object_put(overlay->reg_bo); |
1469 | i915_vma_unpin(vma); | ||
1470 | out_free_bo: | ||
1471 | i915_gem_object_put(reg_bo); | ||
1472 | out_free: | 1424 | out_free: |
1473 | mutex_unlock(&dev_priv->drm.struct_mutex); | 1425 | mutex_unlock(&dev_priv->drm.struct_mutex); |
1474 | kfree(overlay); | 1426 | kfree(overlay); |
1475 | return; | ||
1476 | } | 1427 | } |
1477 | 1428 | ||
1478 | void intel_cleanup_overlay(struct drm_i915_private *dev_priv) | 1429 | void intel_cleanup_overlay(struct drm_i915_private *dev_priv) |
1479 | { | 1430 | { |
1480 | if (!dev_priv->overlay) | 1431 | struct intel_overlay *overlay; |
1432 | |||
1433 | overlay = fetch_and_zero(&dev_priv->overlay); | ||
1434 | if (!overlay) | ||
1481 | return; | 1435 | return; |
1482 | 1436 | ||
1483 | /* The bo's should be free'd by the generic code already. | 1437 | /* |
1438 | * The bo's should be free'd by the generic code already. | ||
1484 | * Furthermore modesetting teardown happens beforehand so the | 1439 | * Furthermore modesetting teardown happens beforehand so the |
1485 | * hardware should be off already */ | 1440 | * hardware should be off already. |
1486 | WARN_ON(dev_priv->overlay->active); | 1441 | */ |
1442 | WARN_ON(overlay->active); | ||
1443 | |||
1444 | i915_gem_object_put(overlay->reg_bo); | ||
1487 | 1445 | ||
1488 | i915_gem_object_put(dev_priv->overlay->reg_bo); | 1446 | kfree(overlay); |
1489 | kfree(dev_priv->overlay); | ||
1490 | } | 1447 | } |
1491 | 1448 | ||
1492 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) | 1449 | #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) |
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state { | |||
1498 | u32 isr; | 1455 | u32 isr; |
1499 | }; | 1456 | }; |
1500 | 1457 | ||
1501 | static struct overlay_registers __iomem * | ||
1502 | intel_overlay_map_regs_atomic(struct intel_overlay *overlay) | ||
1503 | { | ||
1504 | struct drm_i915_private *dev_priv = overlay->i915; | ||
1505 | struct overlay_registers __iomem *regs; | ||
1506 | |||
1507 | if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) | ||
1508 | /* Cast to make sparse happy, but it's wc memory anyway, so | ||
1509 | * equivalent to the wc io mapping on X86. */ | ||
1510 | regs = (struct overlay_registers __iomem *) | ||
1511 | overlay->reg_bo->phys_handle->vaddr; | ||
1512 | else | ||
1513 | regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap, | ||
1514 | overlay->flip_addr); | ||
1515 | |||
1516 | return regs; | ||
1517 | } | ||
1518 | |||
1519 | static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, | ||
1520 | struct overlay_registers __iomem *regs) | ||
1521 | { | ||
1522 | if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915)) | ||
1523 | io_mapping_unmap_atomic(regs); | ||
1524 | } | ||
1525 | |||
1526 | struct intel_overlay_error_state * | 1458 | struct intel_overlay_error_state * |
1527 | intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) | 1459 | intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) |
1528 | { | 1460 | { |
1529 | struct intel_overlay *overlay = dev_priv->overlay; | 1461 | struct intel_overlay *overlay = dev_priv->overlay; |
1530 | struct intel_overlay_error_state *error; | 1462 | struct intel_overlay_error_state *error; |
1531 | struct overlay_registers __iomem *regs; | ||
1532 | 1463 | ||
1533 | if (!overlay || !overlay->active) | 1464 | if (!overlay || !overlay->active) |
1534 | return NULL; | 1465 | return NULL; |
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv) | |||
1541 | error->isr = I915_READ(ISR); | 1472 | error->isr = I915_READ(ISR); |
1542 | error->base = overlay->flip_addr; | 1473 | error->base = overlay->flip_addr; |
1543 | 1474 | ||
1544 | regs = intel_overlay_map_regs_atomic(overlay); | 1475 | memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs)); |
1545 | if (!regs) | ||
1546 | goto err; | ||
1547 | |||
1548 | memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers)); | ||
1549 | intel_overlay_unmap_regs_atomic(overlay, regs); | ||
1550 | 1476 | ||
1551 | return error; | 1477 | return error; |
1552 | |||
1553 | err: | ||
1554 | kfree(error); | ||
1555 | return NULL; | ||
1556 | } | 1478 | } |
1557 | 1479 | ||
1558 | void | 1480 | void |