diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-01 00:53:49 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-06-01 00:53:49 -0400 |
| commit | a37484638ca5e0aa7c205ecb91c9ace92e83c32c (patch) | |
| tree | a996c83b6c1261edfbdb51b4d50606edbbb7b150 | |
| parent | d602fb6844940b23afb64d4bf70bb963c15571ee (diff) | |
| parent | 400129f0a3ae989c30b37104bbc23b35c9d7a9a4 (diff) | |
Merge tag 'drm-fixes-for-v4.12-rc4' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie:
"This is the main set of fixes for rc4, one amdgpu fix, some exynos
regression fixes, some msm fixes and some i915 and GVT fixes.
I've got a second regression fix for some DP chips that might be a
bit large, but I think we'd like to land it now, I'll send it along
tomorrow, once you are happy with this set"
* tag 'drm-fixes-for-v4.12-rc4' of git://people.freedesktop.org/~airlied/linux: (24 commits)
drm/amdgpu: Program ring for vce instance 1 at its register space
drm/exynos: clean up description of exynos_drm_crtc
drm/exynos: dsi: Remove bridge node reference in removal
drm/exynos: dsi: Fix the parse_dt function
drm/exynos: Merge pre/postclose hooks
drm/msm: Fix the check for the command size
drm/msm: Take the mutex before calling msm_gem_new_impl
drm/msm: for array in-fences, check if all backing fences are from our own context before waiting
drm/msm: constify irq_domain_ops
drm/msm/mdp5: release hwpipe(s) for unused planes
drm/msm: Reuse dma_fence_release.
drm/msm: Expose our reservation object when exporting a dmabuf.
drm/msm/gpu: check legacy clk names in get_clocks()
drm/msm/mdp5: use __drm_atomic_helper_plane_duplicate_state()
drm/msm: select PM_OPP
drm/i915: Stop pretending to mask/unmask LPE audio interrupts
drm/i915/selftests: Silence compiler warning in igt_ctx_exec
Revert "drm/i915: Restore lost "Initialized i915" welcome message"
drm/i915/gvt: clean up unsubmited workloads before destroying kmem cache
drm/i915/gvt: Disable compression workaround for Gen9
...
24 files changed, 169 insertions, 154 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index fb0819359909..90332f55cfba 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
| @@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle, | |||
| 77 | static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) | 77 | static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) |
| 78 | { | 78 | { |
| 79 | struct amdgpu_device *adev = ring->adev; | 79 | struct amdgpu_device *adev = ring->adev; |
| 80 | u32 v; | ||
| 81 | |||
| 82 | mutex_lock(&adev->grbm_idx_mutex); | ||
| 83 | if (adev->vce.harvest_config == 0 || | ||
| 84 | adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) | ||
| 85 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); | ||
| 86 | else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) | ||
| 87 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); | ||
| 80 | 88 | ||
| 81 | if (ring == &adev->vce.ring[0]) | 89 | if (ring == &adev->vce.ring[0]) |
| 82 | return RREG32(mmVCE_RB_RPTR); | 90 | v = RREG32(mmVCE_RB_RPTR); |
| 83 | else if (ring == &adev->vce.ring[1]) | 91 | else if (ring == &adev->vce.ring[1]) |
| 84 | return RREG32(mmVCE_RB_RPTR2); | 92 | v = RREG32(mmVCE_RB_RPTR2); |
| 85 | else | 93 | else |
| 86 | return RREG32(mmVCE_RB_RPTR3); | 94 | v = RREG32(mmVCE_RB_RPTR3); |
| 95 | |||
| 96 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); | ||
| 97 | mutex_unlock(&adev->grbm_idx_mutex); | ||
| 98 | |||
| 99 | return v; | ||
| 87 | } | 100 | } |
| 88 | 101 | ||
| 89 | /** | 102 | /** |
| @@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) | |||
| 96 | static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) | 109 | static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) |
| 97 | { | 110 | { |
| 98 | struct amdgpu_device *adev = ring->adev; | 111 | struct amdgpu_device *adev = ring->adev; |
| 112 | u32 v; | ||
| 113 | |||
| 114 | mutex_lock(&adev->grbm_idx_mutex); | ||
| 115 | if (adev->vce.harvest_config == 0 || | ||
| 116 | adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) | ||
| 117 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); | ||
| 118 | else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) | ||
| 119 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); | ||
| 99 | 120 | ||
| 100 | if (ring == &adev->vce.ring[0]) | 121 | if (ring == &adev->vce.ring[0]) |
| 101 | return RREG32(mmVCE_RB_WPTR); | 122 | v = RREG32(mmVCE_RB_WPTR); |
| 102 | else if (ring == &adev->vce.ring[1]) | 123 | else if (ring == &adev->vce.ring[1]) |
| 103 | return RREG32(mmVCE_RB_WPTR2); | 124 | v = RREG32(mmVCE_RB_WPTR2); |
| 104 | else | 125 | else |
| 105 | return RREG32(mmVCE_RB_WPTR3); | 126 | v = RREG32(mmVCE_RB_WPTR3); |
| 127 | |||
| 128 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); | ||
| 129 | mutex_unlock(&adev->grbm_idx_mutex); | ||
| 130 | |||
| 131 | return v; | ||
| 106 | } | 132 | } |
| 107 | 133 | ||
| 108 | /** | 134 | /** |
| @@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
| 116 | { | 142 | { |
| 117 | struct amdgpu_device *adev = ring->adev; | 143 | struct amdgpu_device *adev = ring->adev; |
| 118 | 144 | ||
| 145 | mutex_lock(&adev->grbm_idx_mutex); | ||
| 146 | if (adev->vce.harvest_config == 0 || | ||
| 147 | adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) | ||
| 148 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); | ||
| 149 | else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) | ||
| 150 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); | ||
| 151 | |||
| 119 | if (ring == &adev->vce.ring[0]) | 152 | if (ring == &adev->vce.ring[0]) |
| 120 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); | 153 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); |
| 121 | else if (ring == &adev->vce.ring[1]) | 154 | else if (ring == &adev->vce.ring[1]) |
| 122 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); | 155 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); |
| 123 | else | 156 | else |
| 124 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); | 157 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); |
| 158 | |||
| 159 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); | ||
| 160 | mutex_unlock(&adev->grbm_idx_mutex); | ||
| 125 | } | 161 | } |
| 126 | 162 | ||
| 127 | static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) | 163 | static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) |
| @@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
| 231 | struct amdgpu_ring *ring; | 267 | struct amdgpu_ring *ring; |
| 232 | int idx, r; | 268 | int idx, r; |
| 233 | 269 | ||
| 234 | ring = &adev->vce.ring[0]; | ||
| 235 | WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); | ||
| 236 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); | ||
| 237 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | ||
| 238 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
| 239 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | ||
| 240 | |||
| 241 | ring = &adev->vce.ring[1]; | ||
| 242 | WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); | ||
| 243 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); | ||
| 244 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | ||
| 245 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | ||
| 246 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | ||
| 247 | |||
| 248 | ring = &adev->vce.ring[2]; | ||
| 249 | WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); | ||
| 250 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); | ||
| 251 | WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); | ||
| 252 | WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); | ||
| 253 | WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); | ||
| 254 | |||
| 255 | mutex_lock(&adev->grbm_idx_mutex); | 270 | mutex_lock(&adev->grbm_idx_mutex); |
| 256 | for (idx = 0; idx < 2; ++idx) { | 271 | for (idx = 0; idx < 2; ++idx) { |
| 257 | if (adev->vce.harvest_config & (1 << idx)) | 272 | if (adev->vce.harvest_config & (1 << idx)) |
| 258 | continue; | 273 | continue; |
| 259 | 274 | ||
| 260 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); | 275 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); |
| 276 | |||
| 277 | /* Program instance 0 reg space for two instances or instance 0 case | ||
| 278 | program instance 1 reg space for only instance 1 available case */ | ||
| 279 | if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { | ||
| 280 | ring = &adev->vce.ring[0]; | ||
| 281 | WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); | ||
| 282 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); | ||
| 283 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | ||
| 284 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
| 285 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | ||
| 286 | |||
| 287 | ring = &adev->vce.ring[1]; | ||
| 288 | WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); | ||
| 289 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); | ||
| 290 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | ||
| 291 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | ||
| 292 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | ||
| 293 | |||
| 294 | ring = &adev->vce.ring[2]; | ||
| 295 | WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); | ||
| 296 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); | ||
| 297 | WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); | ||
| 298 | WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); | ||
| 299 | WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); | ||
| 300 | } | ||
| 301 | |||
| 261 | vce_v3_0_mc_resume(adev, idx); | 302 | vce_v3_0_mc_resume(adev, idx); |
| 262 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); | 303 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); |
| 263 | 304 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 09d3c4c3c858..50294a7bd29d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
| @@ -82,14 +82,9 @@ err_file_priv_free: | |||
| 82 | return ret; | 82 | return ret; |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | static void exynos_drm_preclose(struct drm_device *dev, | ||
| 86 | struct drm_file *file) | ||
| 87 | { | ||
| 88 | exynos_drm_subdrv_close(dev, file); | ||
| 89 | } | ||
| 90 | |||
| 91 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | 85 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) |
| 92 | { | 86 | { |
| 87 | exynos_drm_subdrv_close(dev, file); | ||
| 93 | kfree(file->driver_priv); | 88 | kfree(file->driver_priv); |
| 94 | file->driver_priv = NULL; | 89 | file->driver_priv = NULL; |
| 95 | } | 90 | } |
| @@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = { | |||
| 145 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 140 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
| 146 | | DRIVER_ATOMIC | DRIVER_RENDER, | 141 | | DRIVER_ATOMIC | DRIVER_RENDER, |
| 147 | .open = exynos_drm_open, | 142 | .open = exynos_drm_open, |
| 148 | .preclose = exynos_drm_preclose, | ||
| 149 | .lastclose = exynos_drm_lastclose, | 143 | .lastclose = exynos_drm_lastclose, |
| 150 | .postclose = exynos_drm_postclose, | 144 | .postclose = exynos_drm_postclose, |
| 151 | .gem_free_object_unlocked = exynos_drm_gem_free_object, | 145 | .gem_free_object_unlocked = exynos_drm_gem_free_object, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index cb3176930596..39c740572034 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
| @@ -160,12 +160,9 @@ struct exynos_drm_clk { | |||
| 160 | * drm framework doesn't support multiple irq yet. | 160 | * drm framework doesn't support multiple irq yet. |
| 161 | * we can refer to the crtc to current hardware interrupt occurred through | 161 | * we can refer to the crtc to current hardware interrupt occurred through |
| 162 | * this pipe value. | 162 | * this pipe value. |
| 163 | * @enabled: if the crtc is enabled or not | ||
| 164 | * @event: vblank event that is currently queued for flip | ||
| 165 | * @wait_update: wait all pending planes updates to finish | ||
| 166 | * @pending_update: number of pending plane updates in this crtc | ||
| 167 | * @ops: pointer to callbacks for exynos drm specific functionality | 163 | * @ops: pointer to callbacks for exynos drm specific functionality |
| 168 | * @ctx: A pointer to the crtc's implementation specific context | 164 | * @ctx: A pointer to the crtc's implementation specific context |
| 165 | * @pipe_clk: A pointer to the crtc's pipeline clock. | ||
| 169 | */ | 166 | */ |
| 170 | struct exynos_drm_crtc { | 167 | struct exynos_drm_crtc { |
| 171 | struct drm_crtc base; | 168 | struct drm_crtc base; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index fc4fda738906..d404de86d5f9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c | |||
| @@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) | |||
| 1633 | { | 1633 | { |
| 1634 | struct device *dev = dsi->dev; | 1634 | struct device *dev = dsi->dev; |
| 1635 | struct device_node *node = dev->of_node; | 1635 | struct device_node *node = dev->of_node; |
| 1636 | struct device_node *ep; | ||
| 1637 | int ret; | 1636 | int ret; |
| 1638 | 1637 | ||
| 1639 | ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", | 1638 | ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", |
| @@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) | |||
| 1641 | if (ret < 0) | 1640 | if (ret < 0) |
| 1642 | return ret; | 1641 | return ret; |
| 1643 | 1642 | ||
| 1644 | ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0); | 1643 | ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency", |
| 1645 | if (!ep) { | ||
| 1646 | dev_err(dev, "no output port with endpoint specified\n"); | ||
| 1647 | return -EINVAL; | ||
| 1648 | } | ||
| 1649 | |||
| 1650 | ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency", | ||
| 1651 | &dsi->burst_clk_rate); | 1644 | &dsi->burst_clk_rate); |
| 1652 | if (ret < 0) | 1645 | if (ret < 0) |
| 1653 | goto end; | 1646 | return ret; |
| 1654 | 1647 | ||
| 1655 | ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency", | 1648 | ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency", |
| 1656 | &dsi->esc_clk_rate); | 1649 | &dsi->esc_clk_rate); |
| 1657 | if (ret < 0) | 1650 | if (ret < 0) |
| 1658 | goto end; | 1651 | return ret; |
| 1659 | |||
| 1660 | of_node_put(ep); | ||
| 1661 | 1652 | ||
| 1662 | dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); | 1653 | dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); |
| 1663 | if (!dsi->bridge_node) | 1654 | if (!dsi->bridge_node) |
| 1664 | return -EINVAL; | 1655 | return -EINVAL; |
| 1665 | 1656 | ||
| 1666 | end: | 1657 | return 0; |
| 1667 | of_node_put(ep); | ||
| 1668 | |||
| 1669 | return ret; | ||
| 1670 | } | 1658 | } |
| 1671 | 1659 | ||
| 1672 | static int exynos_dsi_bind(struct device *dev, struct device *master, | 1660 | static int exynos_dsi_bind(struct device *dev, struct device *master, |
| @@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev) | |||
| 1817 | 1805 | ||
| 1818 | static int exynos_dsi_remove(struct platform_device *pdev) | 1806 | static int exynos_dsi_remove(struct platform_device *pdev) |
| 1819 | { | 1807 | { |
| 1808 | struct exynos_dsi *dsi = platform_get_drvdata(pdev); | ||
| 1809 | |||
| 1810 | of_node_put(dsi->bridge_node); | ||
| 1811 | |||
| 1820 | pm_runtime_disable(&pdev->dev); | 1812 | pm_runtime_disable(&pdev->dev); |
| 1821 | 1813 | ||
| 1822 | component_del(&pdev->dev, &exynos_dsi_component_ops); | 1814 | component_del(&pdev->dev, &exynos_dsi_component_ops); |
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index dca989eb2d42..24fe04d6307b 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
| @@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) | |||
| 779 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; | 779 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; |
| 780 | } | 780 | } |
| 781 | 781 | ||
| 782 | static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) | ||
| 783 | { | ||
| 784 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
| 785 | struct intel_engine_cs *engine; | ||
| 786 | struct intel_vgpu_workload *pos, *n; | ||
| 787 | unsigned int tmp; | ||
| 788 | |||
| 789 | /* free the unsubmited workloads in the queues. */ | ||
| 790 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { | ||
| 791 | list_for_each_entry_safe(pos, n, | ||
| 792 | &vgpu->workload_q_head[engine->id], list) { | ||
| 793 | list_del_init(&pos->list); | ||
| 794 | free_workload(pos); | ||
| 795 | } | ||
| 796 | } | ||
| 797 | } | ||
| 798 | |||
| 782 | void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) | 799 | void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) |
| 783 | { | 800 | { |
| 801 | clean_workloads(vgpu, ALL_ENGINES); | ||
| 784 | kmem_cache_destroy(vgpu->workloads); | 802 | kmem_cache_destroy(vgpu->workloads); |
| 785 | } | 803 | } |
| 786 | 804 | ||
| @@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, | |||
| 811 | { | 829 | { |
| 812 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 830 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
| 813 | struct intel_engine_cs *engine; | 831 | struct intel_engine_cs *engine; |
| 814 | struct intel_vgpu_workload *pos, *n; | ||
| 815 | unsigned int tmp; | 832 | unsigned int tmp; |
| 816 | 833 | ||
| 817 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { | 834 | clean_workloads(vgpu, engine_mask); |
| 818 | /* free the unsubmited workload in the queue */ | 835 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
| 819 | list_for_each_entry_safe(pos, n, | ||
| 820 | &vgpu->workload_q_head[engine->id], list) { | ||
| 821 | list_del_init(&pos->list); | ||
| 822 | free_workload(pos); | ||
| 823 | } | ||
| 824 | |||
| 825 | init_vgpu_execlist(vgpu, engine->id); | 836 | init_vgpu_execlist(vgpu, engine->id); |
| 826 | } | ||
| 827 | } | 837 | } |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index c995e540ff96..0ffd69654592 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 1366 | void *p_data, unsigned int bytes) | 1366 | void *p_data, unsigned int bytes) |
| 1367 | { | 1367 | { |
| 1368 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 1368 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
| 1369 | i915_reg_t reg = {.reg = offset}; | 1369 | u32 v = *(u32 *)p_data; |
| 1370 | |||
| 1371 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) | ||
| 1372 | return intel_vgpu_default_mmio_write(vgpu, | ||
| 1373 | offset, p_data, bytes); | ||
| 1370 | 1374 | ||
| 1371 | switch (offset) { | 1375 | switch (offset) { |
| 1372 | case 0x4ddc: | 1376 | case 0x4ddc: |
| 1373 | vgpu_vreg(vgpu, offset) = 0x8000003c; | 1377 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ |
| 1374 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */ | 1378 | vgpu_vreg(vgpu, offset) = v & ~(1 << 31); |
| 1375 | I915_WRITE(reg, vgpu_vreg(vgpu, offset)); | ||
| 1376 | break; | 1379 | break; |
| 1377 | case 0x42080: | 1380 | case 0x42080: |
| 1378 | vgpu_vreg(vgpu, offset) = 0x8000; | 1381 | /* bypass WaCompressedResourceDisplayNewHashMode */ |
| 1379 | /* WaCompressedResourceDisplayNewHashMode:skl */ | 1382 | vgpu_vreg(vgpu, offset) = v & ~(1 << 15); |
| 1380 | I915_WRITE(reg, vgpu_vreg(vgpu, offset)); | 1383 | break; |
| 1384 | case 0xe194: | ||
| 1385 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ | ||
| 1386 | vgpu_vreg(vgpu, offset) = v & ~(1 << 8); | ||
| 1387 | break; | ||
| 1388 | case 0x7014: | ||
| 1389 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ | ||
| 1390 | vgpu_vreg(vgpu, offset) = v & ~(1 << 13); | ||
| 1381 | break; | 1391 | break; |
| 1382 | default: | 1392 | default: |
| 1383 | return -EINVAL; | 1393 | return -EINVAL; |
| @@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
| 1634 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1644 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); |
| 1635 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, | 1645 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
| 1636 | NULL, NULL); | 1646 | NULL, NULL); |
| 1637 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 1647 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, |
| 1648 | skl_misc_ctl_write); | ||
| 1638 | MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1649 | MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); |
| 1639 | MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1650 | MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); |
| 1640 | MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1651 | MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); |
| @@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
| 2568 | MMIO_D(0x6e570, D_BDW_PLUS); | 2579 | MMIO_D(0x6e570, D_BDW_PLUS); |
| 2569 | MMIO_D(0x65f10, D_BDW_PLUS); | 2580 | MMIO_D(0x65f10, D_BDW_PLUS); |
| 2570 | 2581 | ||
| 2571 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2582 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, |
| 2583 | skl_misc_ctl_write); | ||
| 2572 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2584 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
| 2573 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2585 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
| 2574 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2586 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3036d4835b0f..c994fe6e65b2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -1272,10 +1272,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1272 | 1272 | ||
| 1273 | dev_priv->ipc_enabled = false; | 1273 | dev_priv->ipc_enabled = false; |
| 1274 | 1274 | ||
| 1275 | /* Everything is in place, we can now relax! */ | ||
| 1276 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", | ||
| 1277 | driver.name, driver.major, driver.minor, driver.patchlevel, | ||
| 1278 | driver.date, pci_name(pdev), dev_priv->drm.primary->index); | ||
| 1279 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) | 1275 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) |
| 1280 | DRM_INFO("DRM_I915_DEBUG enabled\n"); | 1276 | DRM_INFO("DRM_I915_DEBUG enabled\n"); |
| 1281 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) | 1277 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a0563e18d753..50b8f1139ff9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -2313,7 +2313,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, | |||
| 2313 | appgtt->base.allocate_va_range) { | 2313 | appgtt->base.allocate_va_range) { |
| 2314 | ret = appgtt->base.allocate_va_range(&appgtt->base, | 2314 | ret = appgtt->base.allocate_va_range(&appgtt->base, |
| 2315 | vma->node.start, | 2315 | vma->node.start, |
| 2316 | vma->node.size); | 2316 | vma->size); |
| 2317 | if (ret) | 2317 | if (ret) |
| 2318 | goto err_pages; | 2318 | goto err_pages; |
| 2319 | } | 2319 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 129ed303a6c4..57d9f7f4ef15 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c | |||
| @@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock) | |||
| 59 | return; | 59 | return; |
| 60 | 60 | ||
| 61 | mutex_unlock(&dev->struct_mutex); | 61 | mutex_unlock(&dev->struct_mutex); |
| 62 | |||
| 63 | /* expedite the RCU grace period to free some request slabs */ | ||
| 64 | synchronize_rcu_expedited(); | ||
| 65 | } | 62 | } |
| 66 | 63 | ||
| 67 | static bool any_vma_pinned(struct drm_i915_gem_object *obj) | 64 | static bool any_vma_pinned(struct drm_i915_gem_object *obj) |
| @@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) | |||
| 274 | I915_SHRINK_ACTIVE); | 271 | I915_SHRINK_ACTIVE); |
| 275 | intel_runtime_pm_put(dev_priv); | 272 | intel_runtime_pm_put(dev_priv); |
| 276 | 273 | ||
| 277 | synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */ | ||
| 278 | |||
| 279 | return freed; | 274 | return freed; |
| 280 | } | 275 | } |
| 281 | 276 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index fd97fe00cd0d..190f6aa5d15e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 2953 | u32 pipestat_mask; | 2953 | u32 pipestat_mask; |
| 2954 | u32 enable_mask; | 2954 | u32 enable_mask; |
| 2955 | enum pipe pipe; | 2955 | enum pipe pipe; |
| 2956 | u32 val; | ||
| 2957 | 2956 | ||
| 2958 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | 2957 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | |
| 2959 | PIPE_CRC_DONE_INTERRUPT_STATUS; | 2958 | PIPE_CRC_DONE_INTERRUPT_STATUS; |
| @@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) | |||
| 2964 | 2963 | ||
| 2965 | enable_mask = I915_DISPLAY_PORT_INTERRUPT | | 2964 | enable_mask = I915_DISPLAY_PORT_INTERRUPT | |
| 2966 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 2965 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2967 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | 2966 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2967 | I915_LPE_PIPE_A_INTERRUPT | | ||
| 2968 | I915_LPE_PIPE_B_INTERRUPT; | ||
| 2969 | |||
| 2968 | if (IS_CHERRYVIEW(dev_priv)) | 2970 | if (IS_CHERRYVIEW(dev_priv)) |
| 2969 | enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | 2971 | enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | |
| 2972 | I915_LPE_PIPE_C_INTERRUPT; | ||
| 2970 | 2973 | ||
| 2971 | WARN_ON(dev_priv->irq_mask != ~0); | 2974 | WARN_ON(dev_priv->irq_mask != ~0); |
| 2972 | 2975 | ||
| 2973 | val = (I915_LPE_PIPE_A_INTERRUPT | | ||
| 2974 | I915_LPE_PIPE_B_INTERRUPT | | ||
| 2975 | I915_LPE_PIPE_C_INTERRUPT); | ||
| 2976 | |||
| 2977 | enable_mask |= val; | ||
| 2978 | |||
| 2979 | dev_priv->irq_mask = ~enable_mask; | 2976 | dev_priv->irq_mask = ~enable_mask; |
| 2980 | 2977 | ||
| 2981 | GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); | 2978 | GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5a7c63e64381..65b837e96fe6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -8280,7 +8280,7 @@ enum { | |||
| 8280 | 8280 | ||
| 8281 | /* MIPI DSI registers */ | 8281 | /* MIPI DSI registers */ |
| 8282 | 8282 | ||
| 8283 | #define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */ | 8283 | #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ |
| 8284 | #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) | 8284 | #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) |
| 8285 | 8285 | ||
| 8286 | #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) | 8286 | #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) |
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 668f00480d97..292fedf30b00 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c | |||
| @@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) | |||
| 149 | 149 | ||
| 150 | static void lpe_audio_irq_unmask(struct irq_data *d) | 150 | static void lpe_audio_irq_unmask(struct irq_data *d) |
| 151 | { | 151 | { |
| 152 | struct drm_i915_private *dev_priv = d->chip_data; | ||
| 153 | unsigned long irqflags; | ||
| 154 | u32 val = (I915_LPE_PIPE_A_INTERRUPT | | ||
| 155 | I915_LPE_PIPE_B_INTERRUPT); | ||
| 156 | |||
| 157 | if (IS_CHERRYVIEW(dev_priv)) | ||
| 158 | val |= I915_LPE_PIPE_C_INTERRUPT; | ||
| 159 | |||
| 160 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
| 161 | |||
| 162 | dev_priv->irq_mask &= ~val; | ||
| 163 | I915_WRITE(VLV_IIR, val); | ||
| 164 | I915_WRITE(VLV_IIR, val); | ||
| 165 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | ||
| 166 | POSTING_READ(VLV_IMR); | ||
| 167 | |||
| 168 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
| 169 | } | 152 | } |
| 170 | 153 | ||
| 171 | static void lpe_audio_irq_mask(struct irq_data *d) | 154 | static void lpe_audio_irq_mask(struct irq_data *d) |
| 172 | { | 155 | { |
| 173 | struct drm_i915_private *dev_priv = d->chip_data; | ||
| 174 | unsigned long irqflags; | ||
| 175 | u32 val = (I915_LPE_PIPE_A_INTERRUPT | | ||
| 176 | I915_LPE_PIPE_B_INTERRUPT); | ||
| 177 | |||
| 178 | if (IS_CHERRYVIEW(dev_priv)) | ||
| 179 | val |= I915_LPE_PIPE_C_INTERRUPT; | ||
| 180 | |||
| 181 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
| 182 | |||
| 183 | dev_priv->irq_mask |= val; | ||
| 184 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | ||
| 185 | I915_WRITE(VLV_IIR, val); | ||
| 186 | I915_WRITE(VLV_IIR, val); | ||
| 187 | POSTING_READ(VLV_IIR); | ||
| 188 | |||
| 189 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
| 190 | } | 156 | } |
| 191 | 157 | ||
| 192 | static struct irq_chip lpe_audio_irqchip = { | 158 | static struct irq_chip lpe_audio_irqchip = { |
| @@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) | |||
| 330 | 296 | ||
| 331 | desc = irq_to_desc(dev_priv->lpe_audio.irq); | 297 | desc = irq_to_desc(dev_priv->lpe_audio.irq); |
| 332 | 298 | ||
| 333 | lpe_audio_irq_mask(&desc->irq_data); | ||
| 334 | |||
| 335 | lpe_audio_platdev_destroy(dev_priv); | 299 | lpe_audio_platdev_destroy(dev_priv); |
| 336 | 300 | ||
| 337 | irq_free_desc(dev_priv->lpe_audio.irq); | 301 | irq_free_desc(dev_priv->lpe_audio.irq); |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index c8f7c631fc1f..dac4e003c1f3 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -1989,7 +1989,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, | |||
| 1989 | 1989 | ||
| 1990 | ce->ring = ring; | 1990 | ce->ring = ring; |
| 1991 | ce->state = vma; | 1991 | ce->state = vma; |
| 1992 | ce->initialised = engine->init_context == NULL; | 1992 | ce->initialised |= engine->init_context == NULL; |
| 1993 | 1993 | ||
| 1994 | return 0; | 1994 | return 0; |
| 1995 | 1995 | ||
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 1afb8b06e3e1..12b85b3278cd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c | |||
| @@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) | |||
| 320 | static int igt_ctx_exec(void *arg) | 320 | static int igt_ctx_exec(void *arg) |
| 321 | { | 321 | { |
| 322 | struct drm_i915_private *i915 = arg; | 322 | struct drm_i915_private *i915 = arg; |
| 323 | struct drm_i915_gem_object *obj; | 323 | struct drm_i915_gem_object *obj = NULL; |
| 324 | struct drm_file *file; | 324 | struct drm_file *file; |
| 325 | IGT_TIMEOUT(end_time); | 325 | IGT_TIMEOUT(end_time); |
| 326 | LIST_HEAD(objects); | 326 | LIST_HEAD(objects); |
| @@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg) | |||
| 359 | } | 359 | } |
| 360 | 360 | ||
| 361 | for_each_engine(engine, i915, id) { | 361 | for_each_engine(engine, i915, id) { |
| 362 | if (dw == 0) { | 362 | if (!obj) { |
| 363 | obj = create_test_object(ctx, file, &objects); | 363 | obj = create_test_object(ctx, file, &objects); |
| 364 | if (IS_ERR(obj)) { | 364 | if (IS_ERR(obj)) { |
| 365 | err = PTR_ERR(obj); | 365 | err = PTR_ERR(obj); |
| @@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg) | |||
| 376 | goto out_unlock; | 376 | goto out_unlock; |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | if (++dw == max_dwords(obj)) | 379 | if (++dw == max_dwords(obj)) { |
| 380 | obj = NULL; | ||
| 380 | dw = 0; | 381 | dw = 0; |
| 382 | } | ||
| 381 | ndwords++; | 383 | ndwords++; |
| 382 | } | 384 | } |
| 383 | ncontexts++; | 385 | ncontexts++; |
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 5b8e23d051f2..0a31cd6d01ce 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig | |||
| @@ -13,6 +13,7 @@ config DRM_MSM | |||
| 13 | select QCOM_SCM | 13 | select QCOM_SCM |
| 14 | select SND_SOC_HDMI_CODEC if SND_SOC | 14 | select SND_SOC_HDMI_CODEC if SND_SOC |
| 15 | select SYNC_FILE | 15 | select SYNC_FILE |
| 16 | select PM_OPP | ||
| 16 | default y | 17 | default y |
| 17 | help | 18 | help |
| 18 | DRM/KMS driver for MSM/snapdragon. | 19 | DRM/KMS driver for MSM/snapdragon. |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c index f8f48d014978..9c34d7824988 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c | |||
| @@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, | |||
| 116 | return 0; | 116 | return 0; |
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | static struct irq_domain_ops mdss_hw_irqdomain_ops = { | 119 | static const struct irq_domain_ops mdss_hw_irqdomain_ops = { |
| 120 | .map = mdss_hw_irqdomain_map, | 120 | .map = mdss_hw_irqdomain_map, |
| 121 | .xlate = irq_domain_xlate_onecell, | 121 | .xlate = irq_domain_xlate_onecell, |
| 122 | }; | 122 | }; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index a38c5fe6cc19..7d3741215387 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
| @@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) | |||
| 225 | 225 | ||
| 226 | mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), | 226 | mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), |
| 227 | sizeof(*mdp5_state), GFP_KERNEL); | 227 | sizeof(*mdp5_state), GFP_KERNEL); |
| 228 | if (!mdp5_state) | ||
| 229 | return NULL; | ||
| 228 | 230 | ||
| 229 | if (mdp5_state && mdp5_state->base.fb) | 231 | __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); |
| 230 | drm_framebuffer_reference(mdp5_state->base.fb); | ||
| 231 | 232 | ||
| 232 | return &mdp5_state->base; | 233 | return &mdp5_state->base; |
| 233 | } | 234 | } |
| @@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, | |||
| 444 | mdp5_pipe_release(state->state, old_hwpipe); | 445 | mdp5_pipe_release(state->state, old_hwpipe); |
| 445 | mdp5_pipe_release(state->state, old_right_hwpipe); | 446 | mdp5_pipe_release(state->state, old_right_hwpipe); |
| 446 | } | 447 | } |
| 448 | } else { | ||
| 449 | mdp5_pipe_release(state->state, mdp5_state->hwpipe); | ||
| 450 | mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); | ||
| 451 | mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; | ||
| 447 | } | 452 | } |
| 448 | 453 | ||
| 449 | return 0; | 454 | return 0; |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 87b5695d4034..9d498eb81906 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
| @@ -830,6 +830,7 @@ static struct drm_driver msm_driver = { | |||
| 830 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | 830 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
| 831 | .gem_prime_export = drm_gem_prime_export, | 831 | .gem_prime_export = drm_gem_prime_export, |
| 832 | .gem_prime_import = drm_gem_prime_import, | 832 | .gem_prime_import = drm_gem_prime_import, |
| 833 | .gem_prime_res_obj = msm_gem_prime_res_obj, | ||
| 833 | .gem_prime_pin = msm_gem_prime_pin, | 834 | .gem_prime_pin = msm_gem_prime_pin, |
| 834 | .gem_prime_unpin = msm_gem_prime_unpin, | 835 | .gem_prime_unpin = msm_gem_prime_unpin, |
| 835 | .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, | 836 | .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 28b6f9ba5066..1b26ca626528 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
| @@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); | |||
| 224 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); | 224 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); |
| 225 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 225 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
| 226 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); | 226 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
| 227 | struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); | ||
| 227 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, | 228 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, |
| 228 | struct dma_buf_attachment *attach, struct sg_table *sg); | 229 | struct dma_buf_attachment *attach, struct sg_table *sg); |
| 229 | int msm_gem_prime_pin(struct drm_gem_object *obj); | 230 | int msm_gem_prime_pin(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index 3f299c537b77..a2f89bac9c16 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c | |||
| @@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) | |||
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | struct msm_fence { | 101 | struct msm_fence { |
| 102 | struct msm_fence_context *fctx; | ||
| 103 | struct dma_fence base; | 102 | struct dma_fence base; |
| 103 | struct msm_fence_context *fctx; | ||
| 104 | }; | 104 | }; |
| 105 | 105 | ||
| 106 | static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) | 106 | static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) |
| @@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence) | |||
| 130 | return fence_completed(f->fctx, f->base.seqno); | 130 | return fence_completed(f->fctx, f->base.seqno); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static void msm_fence_release(struct dma_fence *fence) | ||
| 134 | { | ||
| 135 | struct msm_fence *f = to_msm_fence(fence); | ||
| 136 | kfree_rcu(f, base.rcu); | ||
| 137 | } | ||
| 138 | |||
| 139 | static const struct dma_fence_ops msm_fence_ops = { | 133 | static const struct dma_fence_ops msm_fence_ops = { |
| 140 | .get_driver_name = msm_fence_get_driver_name, | 134 | .get_driver_name = msm_fence_get_driver_name, |
| 141 | .get_timeline_name = msm_fence_get_timeline_name, | 135 | .get_timeline_name = msm_fence_get_timeline_name, |
| 142 | .enable_signaling = msm_fence_enable_signaling, | 136 | .enable_signaling = msm_fence_enable_signaling, |
| 143 | .signaled = msm_fence_signaled, | 137 | .signaled = msm_fence_signaled, |
| 144 | .wait = dma_fence_default_wait, | 138 | .wait = dma_fence_default_wait, |
| 145 | .release = msm_fence_release, | 139 | .release = dma_fence_free, |
| 146 | }; | 140 | }; |
| 147 | 141 | ||
| 148 | struct dma_fence * | 142 | struct dma_fence * |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 68e509b3b9e4..50289a23baf8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
| 758 | struct msm_gem_object *msm_obj; | 758 | struct msm_gem_object *msm_obj; |
| 759 | bool use_vram = false; | 759 | bool use_vram = false; |
| 760 | 760 | ||
| 761 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
| 762 | |||
| 761 | switch (flags & MSM_BO_CACHE_MASK) { | 763 | switch (flags & MSM_BO_CACHE_MASK) { |
| 762 | case MSM_BO_UNCACHED: | 764 | case MSM_BO_UNCACHED: |
| 763 | case MSM_BO_CACHED: | 765 | case MSM_BO_CACHED: |
| @@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |||
| 853 | 855 | ||
| 854 | size = PAGE_ALIGN(dmabuf->size); | 856 | size = PAGE_ALIGN(dmabuf->size); |
| 855 | 857 | ||
| 858 | /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ | ||
| 859 | mutex_lock(&dev->struct_mutex); | ||
| 856 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); | 860 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); |
| 861 | mutex_unlock(&dev->struct_mutex); | ||
| 862 | |||
| 857 | if (ret) | 863 | if (ret) |
| 858 | goto fail; | 864 | goto fail; |
| 859 | 865 | ||
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 60bb290700ce..13403c6da6c7 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c | |||
| @@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) | |||
| 70 | if (!obj->import_attach) | 70 | if (!obj->import_attach) |
| 71 | msm_gem_put_pages(obj); | 71 | msm_gem_put_pages(obj); |
| 72 | } | 72 | } |
| 73 | |||
| 74 | struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) | ||
| 75 | { | ||
| 76 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
| 77 | |||
| 78 | return msm_obj->resv; | ||
| 79 | } | ||
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 1c545ebe6a5a..7832e6421d25 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 410 | if (!in_fence) | 410 | if (!in_fence) |
| 411 | return -EINVAL; | 411 | return -EINVAL; |
| 412 | 412 | ||
| 413 | /* TODO if we get an array-fence due to userspace merging multiple | 413 | /* |
| 414 | * fences, we need a way to determine if all the backing fences | 414 | * Wait if the fence is from a foreign context, or if the fence |
| 415 | * are from our own context.. | 415 | * array contains any fence from a foreign context. |
| 416 | */ | 416 | */ |
| 417 | 417 | if (!dma_fence_match_context(in_fence, gpu->fctx->context)) { | |
| 418 | if (in_fence->context != gpu->fctx->context) { | ||
| 419 | ret = dma_fence_wait(in_fence, true); | 418 | ret = dma_fence_wait(in_fence, true); |
| 420 | if (ret) | 419 | if (ret) |
| 421 | return ret; | 420 | return ret; |
| @@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 496 | goto out; | 495 | goto out; |
| 497 | } | 496 | } |
| 498 | 497 | ||
| 499 | if ((submit_cmd.size + submit_cmd.submit_offset) >= | 498 | if (!submit_cmd.size || |
| 500 | msm_obj->base.size) { | 499 | ((submit_cmd.size + submit_cmd.submit_offset) > |
| 500 | msm_obj->base.size)) { | ||
| 501 | DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); | 501 | DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); |
| 502 | ret = -EINVAL; | 502 | ret = -EINVAL; |
| 503 | goto out; | 503 | goto out; |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 97b9c38c6b3f..0fdc88d79ca8 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) | |||
| 549 | gpu->grp_clks[i] = get_clock(dev, name); | 549 | gpu->grp_clks[i] = get_clock(dev, name); |
| 550 | 550 | ||
| 551 | /* Remember the key clocks that we need to control later */ | 551 | /* Remember the key clocks that we need to control later */ |
| 552 | if (!strcmp(name, "core")) | 552 | if (!strcmp(name, "core") || !strcmp(name, "core_clk")) |
| 553 | gpu->core_clk = gpu->grp_clks[i]; | 553 | gpu->core_clk = gpu->grp_clks[i]; |
| 554 | else if (!strcmp(name, "rbbmtimer")) | 554 | else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk")) |
| 555 | gpu->rbbmtimer_clk = gpu->grp_clks[i]; | 555 | gpu->rbbmtimer_clk = gpu->grp_clks[i]; |
| 556 | 556 | ||
| 557 | ++i; | 557 | ++i; |
