aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
-rw-r--r--drivers/gpu/drm/bridge/dw_hdmi.c6
-rw-r--r--drivers/gpu/drm/bridge/ps8622.c27
-rw-r--r--drivers/gpu/drm/bridge/ptn3460.c34
-rw-r--r--drivers/gpu/drm/drm_atomic.c3
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c26
-rw-r--r--drivers/gpu/drm/drm_cache.c5
-rw-r--r--drivers/gpu/drm/drm_crtc.c20
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c67
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c17
-rw-r--r--drivers/gpu/drm/drm_prime.c4
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig22
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c660
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c173
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c201
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c429
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h49
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c539
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c323
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c81
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c490
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c137
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c125
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c32
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c118
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c197
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c27
-rw-r--r--drivers/gpu/drm/i915/intel_display.c121
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c8
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c20
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c5
-rw-r--r--drivers/gpu/drm/msm/Kconfig7
-rw-r--r--drivers/gpu/drm/msm/Makefile5
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h168
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h420
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h31
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c43
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h61
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h163
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c120
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c79
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_phy.c315
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h12
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c164
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h89
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c652
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h12
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h101
-rw-r--r--drivers/gpu/drm/msm/edp/edp_aux.c12
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c9
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h99
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c43
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h32
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c39
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h398
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c20
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c128
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c46
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c27
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c1
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c52
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c10
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c54
-rw-r--r--drivers/gpu/drm/radeon/atombios.h4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c7
-rw-r--r--drivers/gpu/drm/radeon/cik.c370
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c392
-rw-r--r--drivers/gpu/drm/radeon/ni.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c155
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c81
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c57
-rw-r--r--drivers/gpu/drm/radeon/si.c336
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c15
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c39
-rw-r--r--drivers/gpu/drm/tegra/drm.c12
-rw-r--r--drivers/gpu/drm/tegra/gem.c25
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c44
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c9
179 files changed, 7490 insertions, 2858 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 22866d1c3d69..01657830b470 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -425,6 +425,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
425 unsigned irq_type); 425 unsigned irq_type);
426int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, 426int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
427 struct amdgpu_fence **fence); 427 struct amdgpu_fence **fence);
428int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
429 uint64_t seq, struct amdgpu_fence **fence);
428void amdgpu_fence_process(struct amdgpu_ring *ring); 430void amdgpu_fence_process(struct amdgpu_ring *ring);
429int amdgpu_fence_wait_next(struct amdgpu_ring *ring); 431int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
430int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); 432int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
@@ -435,9 +437,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
435int amdgpu_fence_wait_any(struct amdgpu_device *adev, 437int amdgpu_fence_wait_any(struct amdgpu_device *adev,
436 struct amdgpu_fence **fences, 438 struct amdgpu_fence **fences,
437 bool intr); 439 bool intr);
438long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
439 u64 *target_seq, bool intr,
440 long timeout);
441struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); 440struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
442void amdgpu_fence_unref(struct amdgpu_fence **fence); 441void amdgpu_fence_unref(struct amdgpu_fence **fence);
443 442
@@ -1622,6 +1621,7 @@ struct amdgpu_vce {
1622 unsigned fb_version; 1621 unsigned fb_version;
1623 atomic_t handles[AMDGPU_MAX_VCE_HANDLES]; 1622 atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
1624 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; 1623 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
1624 uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
1625 struct delayed_work idle_work; 1625 struct delayed_work idle_work;
1626 const struct firmware *fw; /* VCE firmware */ 1626 const struct firmware *fw; /* VCE firmware */
1627 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1627 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 36d34e0afbc3..f82a2dd83874 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -30,6 +30,7 @@
30 30
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include "amdgpu.h" 32#include "amdgpu.h"
33#include "amdgpu_trace.h"
33 34
34static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv, 35static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
35 struct amdgpu_bo_list **result, 36 struct amdgpu_bo_list **result,
@@ -124,6 +125,8 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
124 gws_obj = entry->robj; 125 gws_obj = entry->robj;
125 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA) 126 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
126 oa_obj = entry->robj; 127 oa_obj = entry->robj;
128
129 trace_amdgpu_bo_list_set(list, entry->robj);
127 } 130 }
128 131
129 for (i = 0; i < list->num_entries; ++i) 132 for (i = 0; i < list->num_entries; ++i)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index f09b2cba40ca..d63135bf29c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -181,8 +181,6 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
181 } 181 }
182 p->chunks[i].chunk_id = user_chunk.chunk_id; 182 p->chunks[i].chunk_id = user_chunk.chunk_id;
183 p->chunks[i].length_dw = user_chunk.length_dw; 183 p->chunks[i].length_dw = user_chunk.length_dw;
184 if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB)
185 p->num_ibs++;
186 184
187 size = p->chunks[i].length_dw; 185 size = p->chunks[i].length_dw;
188 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 186 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -199,7 +197,12 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
199 goto out; 197 goto out;
200 } 198 }
201 199
202 if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_FENCE) { 200 switch (p->chunks[i].chunk_id) {
201 case AMDGPU_CHUNK_ID_IB:
202 p->num_ibs++;
203 break;
204
205 case AMDGPU_CHUNK_ID_FENCE:
203 size = sizeof(struct drm_amdgpu_cs_chunk_fence); 206 size = sizeof(struct drm_amdgpu_cs_chunk_fence);
204 if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { 207 if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
205 uint32_t handle; 208 uint32_t handle;
@@ -221,6 +224,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
221 r = -EINVAL; 224 r = -EINVAL;
222 goto out; 225 goto out;
223 } 226 }
227 break;
228
229 case AMDGPU_CHUNK_ID_DEPENDENCIES:
230 break;
231
232 default:
233 r = -EINVAL;
234 goto out;
224 } 235 }
225 } 236 }
226 237
@@ -445,8 +456,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
445 for (i = 0; i < parser->nchunks; i++) 456 for (i = 0; i < parser->nchunks; i++)
446 drm_free_large(parser->chunks[i].kdata); 457 drm_free_large(parser->chunks[i].kdata);
447 kfree(parser->chunks); 458 kfree(parser->chunks);
448 for (i = 0; i < parser->num_ibs; i++) 459 if (parser->ibs)
449 amdgpu_ib_free(parser->adev, &parser->ibs[i]); 460 for (i = 0; i < parser->num_ibs; i++)
461 amdgpu_ib_free(parser->adev, &parser->ibs[i]);
450 kfree(parser->ibs); 462 kfree(parser->ibs);
451 if (parser->uf.bo) 463 if (parser->uf.bo)
452 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); 464 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
@@ -654,6 +666,55 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
654 return 0; 666 return 0;
655} 667}
656 668
669static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
670 struct amdgpu_cs_parser *p)
671{
672 struct amdgpu_ib *ib;
673 int i, j, r;
674
675 if (!p->num_ibs)
676 return 0;
677
678 /* Add dependencies to first IB */
679 ib = &p->ibs[0];
680 for (i = 0; i < p->nchunks; ++i) {
681 struct drm_amdgpu_cs_chunk_dep *deps;
682 struct amdgpu_cs_chunk *chunk;
683 unsigned num_deps;
684
685 chunk = &p->chunks[i];
686
687 if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
688 continue;
689
690 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
691 num_deps = chunk->length_dw * 4 /
692 sizeof(struct drm_amdgpu_cs_chunk_dep);
693
694 for (j = 0; j < num_deps; ++j) {
695 struct amdgpu_fence *fence;
696 struct amdgpu_ring *ring;
697
698 r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
699 deps[j].ip_instance,
700 deps[j].ring, &ring);
701 if (r)
702 return r;
703
704 r = amdgpu_fence_recreate(ring, p->filp,
705 deps[j].handle,
706 &fence);
707 if (r)
708 return r;
709
710 amdgpu_sync_fence(&ib->sync, fence);
711 amdgpu_fence_unref(&fence);
712 }
713 }
714
715 return 0;
716}
717
657int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 718int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
658{ 719{
659 struct amdgpu_device *adev = dev->dev_private; 720 struct amdgpu_device *adev = dev->dev_private;
@@ -688,11 +749,16 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
688 else 749 else
689 DRM_ERROR("Failed to process the buffer list %d!\n", r); 750 DRM_ERROR("Failed to process the buffer list %d!\n", r);
690 } 751 }
691 } else { 752 }
753
754 if (!r) {
692 reserved_buffers = true; 755 reserved_buffers = true;
693 r = amdgpu_cs_ib_fill(adev, &parser); 756 r = amdgpu_cs_ib_fill(adev, &parser);
694 } 757 }
695 758
759 if (!r)
760 r = amdgpu_cs_dependencies(adev, &parser);
761
696 if (r) { 762 if (r) {
697 amdgpu_cs_parser_fini(&parser, r, reserved_buffers); 763 amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
698 up_read(&adev->exclusive_lock); 764 up_read(&adev->exclusive_lock);
@@ -730,9 +796,9 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
730{ 796{
731 union drm_amdgpu_wait_cs *wait = data; 797 union drm_amdgpu_wait_cs *wait = data;
732 struct amdgpu_device *adev = dev->dev_private; 798 struct amdgpu_device *adev = dev->dev_private;
733 uint64_t seq[AMDGPU_MAX_RINGS] = {0};
734 struct amdgpu_ring *ring = NULL;
735 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); 799 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
800 struct amdgpu_fence *fence = NULL;
801 struct amdgpu_ring *ring = NULL;
736 struct amdgpu_ctx *ctx; 802 struct amdgpu_ctx *ctx;
737 long r; 803 long r;
738 804
@@ -745,9 +811,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
745 if (r) 811 if (r)
746 return r; 812 return r;
747 813
748 seq[ring->idx] = wait->in.handle; 814 r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence);
815 if (r)
816 return r;
749 817
750 r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout); 818 r = fence_wait_timeout(&fence->base, true, timeout);
819 amdgpu_fence_unref(&fence);
751 amdgpu_ctx_put(ctx); 820 amdgpu_ctx_put(ctx);
752 if (r < 0) 821 if (r < 0)
753 return r; 822 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index fec487d1c870..ba46be361c9b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1191,7 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1191 return -EINVAL; 1191 return -EINVAL;
1192 } 1192 }
1193 1193
1194 1194 adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
1195 if (adev->ip_block_enabled == NULL)
1196 return -ENOMEM;
1195 1197
1196 if (adev->ip_blocks == NULL) { 1198 if (adev->ip_blocks == NULL) {
1197 DRM_ERROR("No IP blocks found!\n"); 1199 DRM_ERROR("No IP blocks found!\n");
@@ -1575,8 +1577,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1575 amdgpu_fence_driver_fini(adev); 1577 amdgpu_fence_driver_fini(adev);
1576 amdgpu_fbdev_fini(adev); 1578 amdgpu_fbdev_fini(adev);
1577 r = amdgpu_fini(adev); 1579 r = amdgpu_fini(adev);
1578 if (adev->ip_block_enabled) 1580 kfree(adev->ip_block_enabled);
1579 kfree(adev->ip_block_enabled);
1580 adev->ip_block_enabled = NULL; 1581 adev->ip_block_enabled = NULL;
1581 adev->accel_working = false; 1582 adev->accel_working = false;
1582 /* free i2c buses */ 1583 /* free i2c buses */
@@ -2000,4 +2001,10 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
2000void amdgpu_debugfs_cleanup(struct drm_minor *minor) 2001void amdgpu_debugfs_cleanup(struct drm_minor *minor)
2001{ 2002{
2002} 2003}
2004#else
2005static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
2006{
2007 return 0;
2008}
2009static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
2003#endif 2010#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 5c9918d01bf9..a7189a1fa6a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -136,6 +136,38 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
136} 136}
137 137
138/** 138/**
139 * amdgpu_fence_recreate - recreate a fence from an user fence
140 *
141 * @ring: ring the fence is associated with
142 * @owner: creator of the fence
143 * @seq: user fence sequence number
144 * @fence: resulting amdgpu fence object
145 *
146 * Recreates a fence command from the user fence sequence number (all asics).
147 * Returns 0 on success, -ENOMEM on failure.
148 */
149int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner,
150 uint64_t seq, struct amdgpu_fence **fence)
151{
152 struct amdgpu_device *adev = ring->adev;
153
154 if (seq > ring->fence_drv.sync_seq[ring->idx])
155 return -EINVAL;
156
157 *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
158 if ((*fence) == NULL)
159 return -ENOMEM;
160
161 (*fence)->seq = seq;
162 (*fence)->ring = ring;
163 (*fence)->owner = owner;
164 fence_init(&(*fence)->base, &amdgpu_fence_ops,
165 &adev->fence_queue.lock, adev->fence_context + ring->idx,
166 (*fence)->seq);
167 return 0;
168}
169
170/**
139 * amdgpu_fence_check_signaled - callback from fence_queue 171 * amdgpu_fence_check_signaled - callback from fence_queue
140 * 172 *
141 * this function is called with fence_queue lock held, which is also used 173 * this function is called with fence_queue lock held, which is also used
@@ -517,12 +549,14 @@ static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq)
517 * the wait timeout, or an error for all other cases. 549 * the wait timeout, or an error for all other cases.
518 * -EDEADLK is returned when a GPU lockup has been detected. 550 * -EDEADLK is returned when a GPU lockup has been detected.
519 */ 551 */
520long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq, 552static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
521 bool intr, long timeout) 553 u64 *target_seq, bool intr,
554 long timeout)
522{ 555{
523 uint64_t last_seq[AMDGPU_MAX_RINGS]; 556 uint64_t last_seq[AMDGPU_MAX_RINGS];
524 bool signaled; 557 bool signaled;
525 int i, r; 558 int i;
559 long r;
526 560
527 if (timeout == 0) { 561 if (timeout == 0) {
528 return amdgpu_fence_any_seq_signaled(adev, target_seq); 562 return amdgpu_fence_any_seq_signaled(adev, target_seq);
@@ -1023,7 +1057,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
1023 1057
1024 amdgpu_fence_process(ring); 1058 amdgpu_fence_process(ring);
1025 1059
1026 seq_printf(m, "--- ring %d ---\n", i); 1060 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
1027 seq_printf(m, "Last signaled fence 0x%016llx\n", 1061 seq_printf(m, "Last signaled fence 0x%016llx\n",
1028 (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); 1062 (unsigned long long)atomic64_read(&ring->fence_drv.last_seq));
1029 seq_printf(m, "Last emitted 0x%016llx\n", 1063 seq_printf(m, "Last emitted 0x%016llx\n",
@@ -1031,7 +1065,8 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
1031 1065
1032 for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { 1066 for (j = 0; j < AMDGPU_MAX_RINGS; ++j) {
1033 struct amdgpu_ring *other = adev->rings[j]; 1067 struct amdgpu_ring *other = adev->rings[j];
1034 if (i != j && other && other->fence_drv.initialized) 1068 if (i != j && other && other->fence_drv.initialized &&
1069 ring->fence_drv.sync_seq[j])
1035 seq_printf(m, "Last sync to ring %d 0x%016llx\n", 1070 seq_printf(m, "Last sync to ring %d 0x%016llx\n",
1036 j, ring->fence_drv.sync_seq[j]); 1071 j, ring->fence_drv.sync_seq[j]);
1037 } 1072 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0ec222295fee..ae43b58c9733 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -352,7 +352,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
352 if (((int64_t)timeout_ns) < 0) 352 if (((int64_t)timeout_ns) < 0)
353 return MAX_SCHEDULE_TIMEOUT; 353 return MAX_SCHEDULE_TIMEOUT;
354 354
355 timeout = ktime_sub_ns(ktime_get(), timeout_ns); 355 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
356 if (ktime_to_ns(timeout) < 0) 356 if (ktime_to_ns(timeout) < 0)
357 return 0; 357 return 0;
358 358
@@ -496,7 +496,7 @@ error_unreserve:
496error_free: 496error_free:
497 drm_free_large(vm_bos); 497 drm_free_large(vm_bos);
498 498
499 if (r) 499 if (r && r != -ERESTARTSYS)
500 DRM_ERROR("Couldn't update BO_VA (%d)\n", r); 500 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
501} 501}
502 502
@@ -525,8 +525,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
525 return -EINVAL; 525 return -EINVAL;
526 } 526 }
527 527
528 invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | 528 invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE |
529 AMDGPU_VM_PAGE_EXECUTABLE); 529 AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE);
530 if ((args->flags & invalid_flags)) { 530 if ((args->flags & invalid_flags)) {
531 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", 531 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
532 args->flags, invalid_flags); 532 args->flags, invalid_flags);
@@ -579,7 +579,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
579 break; 579 break;
580 } 580 }
581 581
582 if (!r) 582 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
583 amdgpu_gem_va_update_vm(adev, bo_va); 583 amdgpu_gem_va_update_vm(adev, bo_va);
584 584
585 drm_gem_object_unreference_unlocked(gobj); 585 drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index b56dd64bd4ea..961d7265c286 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -30,19 +30,21 @@ TRACE_EVENT(amdgpu_cs,
30 TP_PROTO(struct amdgpu_cs_parser *p, int i), 30 TP_PROTO(struct amdgpu_cs_parser *p, int i),
31 TP_ARGS(p, i), 31 TP_ARGS(p, i),
32 TP_STRUCT__entry( 32 TP_STRUCT__entry(
33 __field(struct amdgpu_bo_list *, bo_list)
33 __field(u32, ring) 34 __field(u32, ring)
34 __field(u32, dw) 35 __field(u32, dw)
35 __field(u32, fences) 36 __field(u32, fences)
36 ), 37 ),
37 38
38 TP_fast_assign( 39 TP_fast_assign(
40 __entry->bo_list = p->bo_list;
39 __entry->ring = p->ibs[i].ring->idx; 41 __entry->ring = p->ibs[i].ring->idx;
40 __entry->dw = p->ibs[i].length_dw; 42 __entry->dw = p->ibs[i].length_dw;
41 __entry->fences = amdgpu_fence_count_emitted( 43 __entry->fences = amdgpu_fence_count_emitted(
42 p->ibs[i].ring); 44 p->ibs[i].ring);
43 ), 45 ),
44 TP_printk("ring=%u, dw=%u, fences=%u", 46 TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
45 __entry->ring, __entry->dw, 47 __entry->bo_list, __entry->ring, __entry->dw,
46 __entry->fences) 48 __entry->fences)
47); 49);
48 50
@@ -61,6 +63,54 @@ TRACE_EVENT(amdgpu_vm_grab_id,
61 TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) 63 TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring)
62); 64);
63 65
66TRACE_EVENT(amdgpu_vm_bo_map,
67 TP_PROTO(struct amdgpu_bo_va *bo_va,
68 struct amdgpu_bo_va_mapping *mapping),
69 TP_ARGS(bo_va, mapping),
70 TP_STRUCT__entry(
71 __field(struct amdgpu_bo *, bo)
72 __field(long, start)
73 __field(long, last)
74 __field(u64, offset)
75 __field(u32, flags)
76 ),
77
78 TP_fast_assign(
79 __entry->bo = bo_va->bo;
80 __entry->start = mapping->it.start;
81 __entry->last = mapping->it.last;
82 __entry->offset = mapping->offset;
83 __entry->flags = mapping->flags;
84 ),
85 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
86 __entry->bo, __entry->start, __entry->last,
87 __entry->offset, __entry->flags)
88);
89
90TRACE_EVENT(amdgpu_vm_bo_unmap,
91 TP_PROTO(struct amdgpu_bo_va *bo_va,
92 struct amdgpu_bo_va_mapping *mapping),
93 TP_ARGS(bo_va, mapping),
94 TP_STRUCT__entry(
95 __field(struct amdgpu_bo *, bo)
96 __field(long, start)
97 __field(long, last)
98 __field(u64, offset)
99 __field(u32, flags)
100 ),
101
102 TP_fast_assign(
103 __entry->bo = bo_va->bo;
104 __entry->start = mapping->it.start;
105 __entry->last = mapping->it.last;
106 __entry->offset = mapping->offset;
107 __entry->flags = mapping->flags;
108 ),
109 TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
110 __entry->bo, __entry->start, __entry->last,
111 __entry->offset, __entry->flags)
112);
113
64TRACE_EVENT(amdgpu_vm_bo_update, 114TRACE_EVENT(amdgpu_vm_bo_update,
65 TP_PROTO(struct amdgpu_bo_va_mapping *mapping), 115 TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
66 TP_ARGS(mapping), 116 TP_ARGS(mapping),
@@ -121,6 +171,21 @@ TRACE_EVENT(amdgpu_vm_flush,
121 __entry->pd_addr, __entry->ring, __entry->id) 171 __entry->pd_addr, __entry->ring, __entry->id)
122); 172);
123 173
174TRACE_EVENT(amdgpu_bo_list_set,
175 TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
176 TP_ARGS(list, bo),
177 TP_STRUCT__entry(
178 __field(struct amdgpu_bo_list *, list)
179 __field(struct amdgpu_bo *, bo)
180 ),
181
182 TP_fast_assign(
183 __entry->list = list;
184 __entry->bo = bo;
185 ),
186 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
187);
188
124DECLARE_EVENT_CLASS(amdgpu_fence_request, 189DECLARE_EVENT_CLASS(amdgpu_fence_request,
125 190
126 TP_PROTO(struct drm_device *dev, int ring, u32 seqno), 191 TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index d3706a498293..dd3415d2e45d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -674,7 +674,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
674 return 0; 674 return 0;
675 675
676 if (gtt && gtt->userptr) { 676 if (gtt && gtt->userptr) {
677 ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); 677 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
678 if (!ttm->sg) 678 if (!ttm->sg)
679 return -ENOMEM; 679 return -ENOMEM;
680 680
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 1127a504f118..d3ca73090e39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -464,28 +464,42 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
464 * @p: parser context 464 * @p: parser context
465 * @lo: address of lower dword 465 * @lo: address of lower dword
466 * @hi: address of higher dword 466 * @hi: address of higher dword
467 * @size: minimum size
467 * 468 *
468 * Patch relocation inside command stream with real buffer address 469 * Patch relocation inside command stream with real buffer address
469 */ 470 */
470int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi) 471static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
472 int lo, int hi, unsigned size, uint32_t index)
471{ 473{
472 struct amdgpu_bo_va_mapping *mapping; 474 struct amdgpu_bo_va_mapping *mapping;
473 struct amdgpu_ib *ib = &p->ibs[ib_idx]; 475 struct amdgpu_ib *ib = &p->ibs[ib_idx];
474 struct amdgpu_bo *bo; 476 struct amdgpu_bo *bo;
475 uint64_t addr; 477 uint64_t addr;
476 478
479 if (index == 0xffffffff)
480 index = 0;
481
477 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | 482 addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
478 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; 483 ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
484 addr += ((uint64_t)size) * ((uint64_t)index);
479 485
480 mapping = amdgpu_cs_find_mapping(p, addr, &bo); 486 mapping = amdgpu_cs_find_mapping(p, addr, &bo);
481 if (mapping == NULL) { 487 if (mapping == NULL) {
482 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d\n", 488 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
489 addr, lo, hi, size, index);
490 return -EINVAL;
491 }
492
493 if ((addr + (uint64_t)size) >
494 ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
495 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
483 addr, lo, hi); 496 addr, lo, hi);
484 return -EINVAL; 497 return -EINVAL;
485 } 498 }
486 499
487 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; 500 addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
488 addr += amdgpu_bo_gpu_offset(bo); 501 addr += amdgpu_bo_gpu_offset(bo);
502 addr -= ((uint64_t)size) * ((uint64_t)index);
489 503
490 ib->ptr[lo] = addr & 0xFFFFFFFF; 504 ib->ptr[lo] = addr & 0xFFFFFFFF;
491 ib->ptr[hi] = addr >> 32; 505 ib->ptr[hi] = addr >> 32;
@@ -494,6 +508,48 @@ int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int
494} 508}
495 509
496/** 510/**
511 * amdgpu_vce_validate_handle - validate stream handle
512 *
513 * @p: parser context
514 * @handle: handle to validate
515 * @allocated: allocated a new handle?
516 *
517 * Validates the handle and return the found session index or -EINVAL
518 * we we don't have another free session index.
519 */
520static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
521 uint32_t handle, bool *allocated)
522{
523 unsigned i;
524
525 *allocated = false;
526
527 /* validate the handle */
528 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
529 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
530 if (p->adev->vce.filp[i] != p->filp) {
531 DRM_ERROR("VCE handle collision detected!\n");
532 return -EINVAL;
533 }
534 return i;
535 }
536 }
537
538 /* handle not found try to alloc a new one */
539 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
540 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
541 p->adev->vce.filp[i] = p->filp;
542 p->adev->vce.img_size[i] = 0;
543 *allocated = true;
544 return i;
545 }
546 }
547
548 DRM_ERROR("No more free VCE handles!\n");
549 return -EINVAL;
550}
551
552/**
497 * amdgpu_vce_cs_parse - parse and validate the command stream 553 * amdgpu_vce_cs_parse - parse and validate the command stream
498 * 554 *
499 * @p: parser context 555 * @p: parser context
@@ -501,10 +557,15 @@ int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int
501 */ 557 */
502int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) 558int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
503{ 559{
504 uint32_t handle = 0;
505 bool destroy = false;
506 int i, r, idx = 0;
507 struct amdgpu_ib *ib = &p->ibs[ib_idx]; 560 struct amdgpu_ib *ib = &p->ibs[ib_idx];
561 unsigned fb_idx = 0, bs_idx = 0;
562 int session_idx = -1;
563 bool destroyed = false;
564 bool created = false;
565 bool allocated = false;
566 uint32_t tmp, handle = 0;
567 uint32_t *size = &tmp;
568 int i, r = 0, idx = 0;
508 569
509 amdgpu_vce_note_usage(p->adev); 570 amdgpu_vce_note_usage(p->adev);
510 571
@@ -514,16 +575,44 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
514 575
515 if ((len < 8) || (len & 3)) { 576 if ((len < 8) || (len & 3)) {
516 DRM_ERROR("invalid VCE command length (%d)!\n", len); 577 DRM_ERROR("invalid VCE command length (%d)!\n", len);
517 return -EINVAL; 578 r = -EINVAL;
579 goto out;
580 }
581
582 if (destroyed) {
583 DRM_ERROR("No other command allowed after destroy!\n");
584 r = -EINVAL;
585 goto out;
518 } 586 }
519 587
520 switch (cmd) { 588 switch (cmd) {
521 case 0x00000001: // session 589 case 0x00000001: // session
522 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 590 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
591 session_idx = amdgpu_vce_validate_handle(p, handle,
592 &allocated);
593 if (session_idx < 0)
594 return session_idx;
595 size = &p->adev->vce.img_size[session_idx];
523 break; 596 break;
524 597
525 case 0x00000002: // task info 598 case 0x00000002: // task info
599 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
600 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
601 break;
602
526 case 0x01000001: // create 603 case 0x01000001: // create
604 created = true;
605 if (!allocated) {
606 DRM_ERROR("Handle already in use!\n");
607 r = -EINVAL;
608 goto out;
609 }
610
611 *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
612 amdgpu_get_ib_value(p, ib_idx, idx + 10) *
613 8 * 3 / 2;
614 break;
615
527 case 0x04000001: // config extension 616 case 0x04000001: // config extension
528 case 0x04000002: // pic control 617 case 0x04000002: // pic control
529 case 0x04000005: // rate control 618 case 0x04000005: // rate control
@@ -534,60 +623,74 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
534 break; 623 break;
535 624
536 case 0x03000001: // encode 625 case 0x03000001: // encode
537 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9); 626 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
627 *size, 0);
538 if (r) 628 if (r)
539 return r; 629 goto out;
540 630
541 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11); 631 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
632 *size / 3, 0);
542 if (r) 633 if (r)
543 return r; 634 goto out;
544 break; 635 break;
545 636
546 case 0x02000001: // destroy 637 case 0x02000001: // destroy
547 destroy = true; 638 destroyed = true;
548 break; 639 break;
549 640
550 case 0x05000001: // context buffer 641 case 0x05000001: // context buffer
642 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
643 *size * 2, 0);
644 if (r)
645 goto out;
646 break;
647
551 case 0x05000004: // video bitstream buffer 648 case 0x05000004: // video bitstream buffer
649 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
650 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
651 tmp, bs_idx);
652 if (r)
653 goto out;
654 break;
655
552 case 0x05000005: // feedback buffer 656 case 0x05000005: // feedback buffer
553 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2); 657 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
658 4096, fb_idx);
554 if (r) 659 if (r)
555 return r; 660 goto out;
556 break; 661 break;
557 662
558 default: 663 default:
559 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); 664 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
560 return -EINVAL; 665 r = -EINVAL;
666 goto out;
561 } 667 }
562 668
563 idx += len / 4; 669 if (session_idx == -1) {
564 } 670 DRM_ERROR("no session command at start of IB\n");
565 671 r = -EINVAL;
566 if (destroy) { 672 goto out;
567 /* IB contains a destroy msg, free the handle */ 673 }
568 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
569 atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
570 674
571 return 0; 675 idx += len / 4;
572 } 676 }
573 677
574 /* create or encode, validate the handle */ 678 if (allocated && !created) {
575 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 679 DRM_ERROR("New session without create command!\n");
576 if (atomic_read(&p->adev->vce.handles[i]) == handle) 680 r = -ENOENT;
577 return 0;
578 } 681 }
579 682
580 /* handle not found try to alloc a new one */ 683out:
581 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 684 if ((!r && destroyed) || (r && allocated)) {
582 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { 685 /*
583 p->adev->vce.filp[i] = p->filp; 686 * IB contains a destroy msg or we have allocated an
584 return 0; 687 * handle and got an error, anyway free the handle
585 } 688 */
689 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
690 atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
586 } 691 }
587 692
588 DRM_ERROR("No more free VCE handles!\n"); 693 return r;
589
590 return -EINVAL;
591} 694}
592 695
593/** 696/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index b6a9d0956c60..7ccdb5927da5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -33,7 +33,6 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
33int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, 33int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
34 struct amdgpu_fence **fence); 34 struct amdgpu_fence **fence);
35void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); 35void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
36int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi);
37int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); 36int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
38bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, 37bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring,
39 struct amdgpu_semaphore *semaphore, 38 struct amdgpu_semaphore *semaphore,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 407882b233c7..9a4e3b63f1cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1001,6 +1001,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1001 1001
1002 list_add(&mapping->list, &bo_va->mappings); 1002 list_add(&mapping->list, &bo_va->mappings);
1003 interval_tree_insert(&mapping->it, &vm->va); 1003 interval_tree_insert(&mapping->it, &vm->va);
1004 trace_amdgpu_vm_bo_map(bo_va, mapping);
1004 1005
1005 bo_va->addr = 0; 1006 bo_va->addr = 0;
1006 1007
@@ -1058,6 +1059,7 @@ error_free:
1058 mutex_lock(&vm->mutex); 1059 mutex_lock(&vm->mutex);
1059 list_del(&mapping->list); 1060 list_del(&mapping->list);
1060 interval_tree_remove(&mapping->it, &vm->va); 1061 interval_tree_remove(&mapping->it, &vm->va);
1062 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1061 kfree(mapping); 1063 kfree(mapping);
1062 1064
1063error_unlock: 1065error_unlock:
@@ -1099,6 +1101,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1099 mutex_lock(&vm->mutex); 1101 mutex_lock(&vm->mutex);
1100 list_del(&mapping->list); 1102 list_del(&mapping->list);
1101 interval_tree_remove(&mapping->it, &vm->va); 1103 interval_tree_remove(&mapping->it, &vm->va);
1104 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1102 1105
1103 if (bo_va->addr) { 1106 if (bo_va->addr) {
1104 /* clear the old address */ 1107 /* clear the old address */
@@ -1139,6 +1142,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1139 list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) { 1142 list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) {
1140 list_del(&mapping->list); 1143 list_del(&mapping->list);
1141 interval_tree_remove(&mapping->it, &vm->va); 1144 interval_tree_remove(&mapping->it, &vm->va);
1145 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1142 if (bo_va->addr) 1146 if (bo_va->addr)
1143 list_add(&mapping->list, &vm->freed); 1147 list_add(&mapping->list, &vm->freed);
1144 else 1148 else
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 5dab578d6462..341c56681841 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -2256,10 +2256,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
2256 return -EINVAL; 2256 return -EINVAL;
2257 } 2257 }
2258 2258
2259 adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
2260 if (adev->ip_block_enabled == NULL)
2261 return -ENOMEM;
2262
2263 return 0; 2259 return 0;
2264} 2260}
2265 2261
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 220865a44814..d19085a97064 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -552,4 +552,10 @@
552#define VCE_CMD_IB_AUTO 0x00000005 552#define VCE_CMD_IB_AUTO 0x00000005
553#define VCE_CMD_SEMAPHORE 0x00000006 553#define VCE_CMD_SEMAPHORE 0x00000006
554 554
555/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
556enum {
557 MTYPE_CACHED = 0,
558 MTYPE_NONCACHED = 3
559};
560
555#endif 561#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index e4936a452bc6..f75a31df30bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
425 pi->mgcg_cgtt_local1 = 0x0; 425 pi->mgcg_cgtt_local1 = 0x0;
426 pi->clock_slow_down_step = 25000; 426 pi->clock_slow_down_step = 25000;
427 pi->skip_clock_slow_down = 1; 427 pi->skip_clock_slow_down = 1;
428 pi->enable_nb_ps_policy = 1; 428 pi->enable_nb_ps_policy = 0;
429 pi->caps_power_containment = true; 429 pi->caps_power_containment = true;
430 pi->caps_cac = true; 430 pi->caps_cac = true;
431 pi->didt_enabled = false; 431 pi->didt_enabled = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
index 782a74107664..99e1afc89629 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h
@@ -46,7 +46,7 @@
46 46
47/* Do not change the following, it is also defined in SMU8.h */ 47/* Do not change the following, it is also defined in SMU8.h */
48#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 48#define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001
49#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00100000 49#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000
50#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 50#define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000
51#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 51#define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000
52 52
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5cde635978f9..6e77964f1b64 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3403,19 +3403,25 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3403 3403
3404 switch (entry->src_data) { 3404 switch (entry->src_data) {
3405 case 0: /* vblank */ 3405 case 0: /* vblank */
3406 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3406 if (disp_int & interrupt_status_offsets[crtc].vblank)
3407 dce_v10_0_crtc_vblank_int_ack(adev, crtc); 3407 dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3408 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3408 else
3409 drm_handle_vblank(adev->ddev, crtc); 3409 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3410 } 3410
3411 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3411 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3412 drm_handle_vblank(adev->ddev, crtc);
3412 } 3413 }
3414 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3415
3413 break; 3416 break;
3414 case 1: /* vline */ 3417 case 1: /* vline */
3415 if (disp_int & interrupt_status_offsets[crtc].vline) { 3418 if (disp_int & interrupt_status_offsets[crtc].vline)
3416 dce_v10_0_crtc_vline_int_ack(adev, crtc); 3419 dce_v10_0_crtc_vline_int_ack(adev, crtc);
3417 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3420 else
3418 } 3421 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3422
3423 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3424
3419 break; 3425 break;
3420 default: 3426 default:
3421 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3427 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 95efd98b202d..7f7abb0e0be5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3402,19 +3402,25 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3402 3402
3403 switch (entry->src_data) { 3403 switch (entry->src_data) {
3404 case 0: /* vblank */ 3404 case 0: /* vblank */
3405 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3405 if (disp_int & interrupt_status_offsets[crtc].vblank)
3406 dce_v11_0_crtc_vblank_int_ack(adev, crtc); 3406 dce_v11_0_crtc_vblank_int_ack(adev, crtc);
3407 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3407 else
3408 drm_handle_vblank(adev->ddev, crtc); 3408 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3409 } 3409
3410 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3410 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3411 drm_handle_vblank(adev->ddev, crtc);
3411 } 3412 }
3413 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3414
3412 break; 3415 break;
3413 case 1: /* vline */ 3416 case 1: /* vline */
3414 if (disp_int & interrupt_status_offsets[crtc].vline) { 3417 if (disp_int & interrupt_status_offsets[crtc].vline)
3415 dce_v11_0_crtc_vline_int_ack(adev, crtc); 3418 dce_v11_0_crtc_vline_int_ack(adev, crtc);
3416 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3419 else
3417 } 3420 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3421
3422 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3423
3418 break; 3424 break;
3419 default: 3425 default:
3420 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3426 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 72c27ac915f2..08387dfd98a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3237,19 +3237,25 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3237 3237
3238 switch (entry->src_data) { 3238 switch (entry->src_data) {
3239 case 0: /* vblank */ 3239 case 0: /* vblank */
3240 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3240 if (disp_int & interrupt_status_offsets[crtc].vblank)
3241 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); 3241 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3242 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3242 else
3243 drm_handle_vblank(adev->ddev, crtc); 3243 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3244 } 3244
3245 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3245 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3246 drm_handle_vblank(adev->ddev, crtc);
3246 } 3247 }
3248 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3249
3247 break; 3250 break;
3248 case 1: /* vline */ 3251 case 1: /* vline */
3249 if (disp_int & interrupt_status_offsets[crtc].vline) { 3252 if (disp_int & interrupt_status_offsets[crtc].vline)
3250 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK); 3253 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3251 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3254 else
3252 } 3255 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3256
3257 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3258
3253 break; 3259 break;
3254 default: 3260 default:
3255 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3261 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
@@ -3379,7 +3385,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3379 uint32_t disp_int, mask, int_control, tmp; 3385 uint32_t disp_int, mask, int_control, tmp;
3380 unsigned hpd; 3386 unsigned hpd;
3381 3387
3382 if (entry->src_data > 6) { 3388 if (entry->src_data >= adev->mode_info.num_hpd) {
3383 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3389 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
3384 return 0; 3390 return 0;
3385 } 3391 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index cb7907447b81..2c188fb9fd22 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2010,6 +2010,46 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev,
2010} 2010}
2011 2011
2012/** 2012/**
2013 * gmc_v7_0_init_compute_vmid - gart enable
2014 *
2015 * @rdev: amdgpu_device pointer
2016 *
2017 * Initialize compute vmid sh_mem registers
2018 *
2019 */
2020#define DEFAULT_SH_MEM_BASES (0x6000)
2021#define FIRST_COMPUTE_VMID (8)
2022#define LAST_COMPUTE_VMID (16)
2023static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev)
2024{
2025 int i;
2026 uint32_t sh_mem_config;
2027 uint32_t sh_mem_bases;
2028
2029 /*
2030 * Configure apertures:
2031 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
2032 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
2033 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
2034 */
2035 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
2036 sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
2037 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
2038 sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT;
2039 mutex_lock(&adev->srbm_mutex);
2040 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
2041 cik_srbm_select(adev, 0, 0, 0, i);
2042 /* CP and shaders */
2043 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
2044 WREG32(mmSH_MEM_APE1_BASE, 1);
2045 WREG32(mmSH_MEM_APE1_LIMIT, 0);
2046 WREG32(mmSH_MEM_BASES, sh_mem_bases);
2047 }
2048 cik_srbm_select(adev, 0, 0, 0, 0);
2049 mutex_unlock(&adev->srbm_mutex);
2050}
2051
2052/**
2013 * gfx_v7_0_gpu_init - setup the 3D engine 2053 * gfx_v7_0_gpu_init - setup the 3D engine
2014 * 2054 *
2015 * @adev: amdgpu_device pointer 2055 * @adev: amdgpu_device pointer
@@ -2230,6 +2270,8 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
2230 cik_srbm_select(adev, 0, 0, 0, 0); 2270 cik_srbm_select(adev, 0, 0, 0, 0);
2231 mutex_unlock(&adev->srbm_mutex); 2271 mutex_unlock(&adev->srbm_mutex);
2232 2272
2273 gmc_v7_0_init_compute_vmid(adev);
2274
2233 WREG32(mmSX_DEBUG_1, 0x20); 2275 WREG32(mmSX_DEBUG_1, 0x20);
2234 2276
2235 WREG32(mmTA_CNTL_AUX, 0x00010000); 2277 WREG32(mmTA_CNTL_AUX, 0x00010000);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 14242bd33363..7b683fb2173c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1894,6 +1894,51 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev,
1894 mutex_unlock(&adev->grbm_idx_mutex); 1894 mutex_unlock(&adev->grbm_idx_mutex);
1895} 1895}
1896 1896
1897/**
1898 * gmc_v8_0_init_compute_vmid - gart enable
1899 *
1900 * @rdev: amdgpu_device pointer
1901 *
1902 * Initialize compute vmid sh_mem registers
1903 *
1904 */
1905#define DEFAULT_SH_MEM_BASES (0x6000)
1906#define FIRST_COMPUTE_VMID (8)
1907#define LAST_COMPUTE_VMID (16)
1908static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev)
1909{
1910 int i;
1911 uint32_t sh_mem_config;
1912 uint32_t sh_mem_bases;
1913
1914 /*
1915 * Configure apertures:
1916 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1917 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1918 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1919 */
1920 sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16);
1921
1922 sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 <<
1923 SH_MEM_CONFIG__ADDRESS_MODE__SHIFT |
1924 SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
1925 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT |
1926 MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT |
1927 SH_MEM_CONFIG__PRIVATE_ATC_MASK;
1928
1929 mutex_lock(&adev->srbm_mutex);
1930 for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
1931 vi_srbm_select(adev, 0, 0, 0, i);
1932 /* CP and shaders */
1933 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
1934 WREG32(mmSH_MEM_APE1_BASE, 1);
1935 WREG32(mmSH_MEM_APE1_LIMIT, 0);
1936 WREG32(mmSH_MEM_BASES, sh_mem_bases);
1937 }
1938 vi_srbm_select(adev, 0, 0, 0, 0);
1939 mutex_unlock(&adev->srbm_mutex);
1940}
1941
1897static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) 1942static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1898{ 1943{
1899 u32 gb_addr_config; 1944 u32 gb_addr_config;
@@ -2113,6 +2158,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2113 vi_srbm_select(adev, 0, 0, 0, 0); 2158 vi_srbm_select(adev, 0, 0, 0, 0);
2114 mutex_unlock(&adev->srbm_mutex); 2159 mutex_unlock(&adev->srbm_mutex);
2115 2160
2161 gmc_v8_0_init_compute_vmid(adev);
2162
2116 mutex_lock(&adev->grbm_idx_mutex); 2163 mutex_lock(&adev->grbm_idx_mutex);
2117 /* 2164 /*
2118 * making sure that the following register writes will be broadcasted 2165 * making sure that the following register writes will be broadcasted
@@ -3081,7 +3128,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3081 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, 3128 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
3082 AMDGPU_DOORBELL_KIQ << 2); 3129 AMDGPU_DOORBELL_KIQ << 2);
3083 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, 3130 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
3084 AMDGPU_DOORBELL_MEC_RING7 << 2); 3131 0x7FFFF << 2);
3085 } 3132 }
3086 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 3133 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3087 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3134 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3097,6 +3144,12 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3097 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 3144 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL,
3098 mqd->cp_hqd_pq_doorbell_control); 3145 mqd->cp_hqd_pq_doorbell_control);
3099 3146
3147 /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */
3148 ring->wptr = 0;
3149 mqd->cp_hqd_pq_wptr = ring->wptr;
3150 WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr);
3151 mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR);
3152
3100 /* set the vmid for the queue */ 3153 /* set the vmid for the queue */
3101 mqd->cp_hqd_vmid = 0; 3154 mqd->cp_hqd_vmid = 0;
3102 WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid); 3155 WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index e3c1fde75363..7bb37b93993f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -439,6 +439,31 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev)
439} 439}
440 440
441/** 441/**
442 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
443 *
444 * @adev: amdgpu_device pointer
445 * @enable: enable/disable the DMA MEs context switch.
446 *
447 * Halt or unhalt the async dma engines context switch (VI).
448 */
449static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
450{
451 u32 f32_cntl;
452 int i;
453
454 for (i = 0; i < SDMA_MAX_INSTANCE; i++) {
455 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
456 if (enable)
457 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
458 AUTO_CTXSW_ENABLE, 1);
459 else
460 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
461 AUTO_CTXSW_ENABLE, 0);
462 WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
463 }
464}
465
466/**
442 * sdma_v3_0_enable - stop the async dma engines 467 * sdma_v3_0_enable - stop the async dma engines
443 * 468 *
444 * @adev: amdgpu_device pointer 469 * @adev: amdgpu_device pointer
@@ -648,6 +673,8 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
648 673
649 /* unhalt the MEs */ 674 /* unhalt the MEs */
650 sdma_v3_0_enable(adev, true); 675 sdma_v3_0_enable(adev, true);
676 /* enable sdma ring preemption */
677 sdma_v3_0_ctx_switch_enable(adev, true);
651 678
652 /* start the gfx rings and rlc compute queues */ 679 /* start the gfx rings and rlc compute queues */
653 r = sdma_v3_0_gfx_resume(adev); 680 r = sdma_v3_0_gfx_resume(adev);
@@ -1079,6 +1106,7 @@ static int sdma_v3_0_hw_fini(void *handle)
1079{ 1106{
1080 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1107 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1081 1108
1109 sdma_v3_0_ctx_switch_enable(adev, false);
1082 sdma_v3_0_enable(adev, false); 1110 sdma_v3_0_enable(adev, false);
1083 1111
1084 return 0; 1112 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 90fc93c2c1d0..fa5a4448531d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1189,10 +1189,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
1189 return -EINVAL; 1189 return -EINVAL;
1190 } 1190 }
1191 1191
1192 adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL);
1193 if (adev->ip_block_enabled == NULL)
1194 return -ENOMEM;
1195
1196 return 0; 1192 return 0;
1197} 1193}
1198 1194
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 96c904b3acb7..c991973019d0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -553,7 +553,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
553 /* Validate arguments */ 553 /* Validate arguments */
554 554
555 if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) || 555 if ((args->buf_size_in_bytes > MAX_ALLOWED_AW_BUFF_SIZE) ||
556 (args->buf_size_in_bytes <= sizeof(*args)) || 556 (args->buf_size_in_bytes <= sizeof(*args) + sizeof(int) * 2) ||
557 (cmd_from_user == NULL)) 557 (cmd_from_user == NULL))
558 return -EINVAL; 558 return -EINVAL;
559 559
@@ -590,7 +590,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
590 /* skip over the addresses buffer */ 590 /* skip over the addresses buffer */
591 args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points; 591 args_idx += sizeof(aw_info.watch_address) * aw_info.num_watch_points;
592 592
593 if (args_idx >= args->buf_size_in_bytes) { 593 if (args_idx >= args->buf_size_in_bytes - sizeof(*args)) {
594 kfree(args_buff); 594 kfree(args_buff);
595 return -EINVAL; 595 return -EINVAL;
596 } 596 }
@@ -614,7 +614,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep,
614 args_idx += sizeof(aw_info.watch_mask); 614 args_idx += sizeof(aw_info.watch_mask);
615 } 615 }
616 616
617 if (args_idx > args->buf_size_in_bytes) { 617 if (args_idx >= args->buf_size_in_bytes - sizeof(args)) {
618 kfree(args_buff); 618 kfree(args_buff);
619 return -EINVAL; 619 return -EINVAL;
620 } 620 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8a1f999daa24..9be007081b72 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -420,6 +420,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
420 pqm_uninit(&p->pqm); 420 pqm_uninit(&p->pqm);
421 421
422 pdd = kfd_get_process_device_data(dev, p); 422 pdd = kfd_get_process_device_data(dev, p);
423
424 if (!pdd) {
425 mutex_unlock(&p->mutex);
426 return;
427 }
428
423 if (pdd->reset_wavefronts) { 429 if (pdd->reset_wavefronts) {
424 dbgdev_wave_reset_wavefronts(pdd->dev, p); 430 dbgdev_wave_reset_wavefronts(pdd->dev, p);
425 pdd->reset_wavefronts = false; 431 pdd->reset_wavefronts = false;
@@ -431,8 +437,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
431 * We don't call amd_iommu_unbind_pasid() here 437 * We don't call amd_iommu_unbind_pasid() here
432 * because the IOMMU called us. 438 * because the IOMMU called us.
433 */ 439 */
434 if (pdd) 440 pdd->bound = false;
435 pdd->bound = false;
436 441
437 mutex_unlock(&p->mutex); 442 mutex_unlock(&p->mutex);
438} 443}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index e469c4b2e8cc..c25728bc388a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
684 dev->node_props.cpu_core_id_base); 684 dev->node_props.cpu_core_id_base);
685 sysfs_show_32bit_prop(buffer, "simd_id_base", 685 sysfs_show_32bit_prop(buffer, "simd_id_base",
686 dev->node_props.simd_id_base); 686 dev->node_props.simd_id_base);
687 sysfs_show_32bit_prop(buffer, "capability",
688 dev->node_props.capability);
689 sysfs_show_32bit_prop(buffer, "max_waves_per_simd", 687 sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
690 dev->node_props.max_waves_per_simd); 688 dev->node_props.max_waves_per_simd);
691 sysfs_show_32bit_prop(buffer, "lds_size_in_kb", 689 sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
@@ -736,6 +734,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
736 dev->gpu->kfd2kgd->get_fw_version( 734 dev->gpu->kfd2kgd->get_fw_version(
737 dev->gpu->kgd, 735 dev->gpu->kgd,
738 KGD_ENGINE_MEC1)); 736 KGD_ENGINE_MEC1));
737 sysfs_show_32bit_prop(buffer, "capability",
738 dev->node_props.capability);
739 } 739 }
740 740
741 return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", 741 return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
diff --git a/drivers/gpu/drm/bridge/dw_hdmi.c b/drivers/gpu/drm/bridge/dw_hdmi.c
index 49cafb61d290..816d104ca4da 100644
--- a/drivers/gpu/drm/bridge/dw_hdmi.c
+++ b/drivers/gpu/drm/bridge/dw_hdmi.c
@@ -1395,7 +1395,7 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
1395 struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, 1395 struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
1396 connector); 1396 connector);
1397 struct edid *edid; 1397 struct edid *edid;
1398 int ret; 1398 int ret = 0;
1399 1399
1400 if (!hdmi->ddc) 1400 if (!hdmi->ddc)
1401 return 0; 1401 return 0;
@@ -1412,7 +1412,7 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
1412 dev_dbg(hdmi->dev, "failed to get edid\n"); 1412 dev_dbg(hdmi->dev, "failed to get edid\n");
1413 } 1413 }
1414 1414
1415 return 0; 1415 return ret;
1416} 1416}
1417 1417
1418static enum drm_mode_status 1418static enum drm_mode_status
@@ -1457,7 +1457,7 @@ static struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
1457 .best_encoder = dw_hdmi_connector_best_encoder, 1457 .best_encoder = dw_hdmi_connector_best_encoder,
1458}; 1458};
1459 1459
1460struct drm_bridge_funcs dw_hdmi_bridge_funcs = { 1460static struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
1461 .enable = dw_hdmi_bridge_enable, 1461 .enable = dw_hdmi_bridge_enable,
1462 .disable = dw_hdmi_bridge_disable, 1462 .disable = dw_hdmi_bridge_disable,
1463 .pre_enable = dw_hdmi_bridge_nop, 1463 .pre_enable = dw_hdmi_bridge_nop,
diff --git a/drivers/gpu/drm/bridge/ps8622.c b/drivers/gpu/drm/bridge/ps8622.c
index e895aa7ea353..1a6607beb29f 100644
--- a/drivers/gpu/drm/bridge/ps8622.c
+++ b/drivers/gpu/drm/bridge/ps8622.c
@@ -18,6 +18,7 @@
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/fb.h> 19#include <linux/fb.h>
20#include <linux/gpio.h> 20#include <linux/gpio.h>
21#include <linux/gpio/consumer.h>
21#include <linux/i2c.h> 22#include <linux/i2c.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/of.h> 24#include <linux/of.h>
@@ -31,6 +32,7 @@
31#include "drmP.h" 32#include "drmP.h"
32#include "drm_crtc.h" 33#include "drm_crtc.h"
33#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_atomic_helper.h"
34 36
35/* Brightness scale on the Parade chip */ 37/* Brightness scale on the Parade chip */
36#define PS8622_MAX_BRIGHTNESS 0xff 38#define PS8622_MAX_BRIGHTNESS 0xff
@@ -498,10 +500,13 @@ static void ps8622_connector_destroy(struct drm_connector *connector)
498} 500}
499 501
500static const struct drm_connector_funcs ps8622_connector_funcs = { 502static const struct drm_connector_funcs ps8622_connector_funcs = {
501 .dpms = drm_helper_connector_dpms, 503 .dpms = drm_atomic_helper_connector_dpms,
502 .fill_modes = drm_helper_probe_single_connector_modes, 504 .fill_modes = drm_helper_probe_single_connector_modes,
503 .detect = ps8622_detect, 505 .detect = ps8622_detect,
504 .destroy = ps8622_connector_destroy, 506 .destroy = ps8622_connector_destroy,
507 .reset = drm_atomic_helper_connector_reset,
508 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
509 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
505}; 510};
506 511
507static int ps8622_attach(struct drm_bridge *bridge) 512static int ps8622_attach(struct drm_bridge *bridge)
@@ -581,31 +586,21 @@ static int ps8622_probe(struct i2c_client *client,
581 ps8622->v12 = NULL; 586 ps8622->v12 = NULL;
582 } 587 }
583 588
584 ps8622->gpio_slp = devm_gpiod_get(dev, "sleep"); 589 ps8622->gpio_slp = devm_gpiod_get(dev, "sleep", GPIOD_OUT_HIGH);
585 if (IS_ERR(ps8622->gpio_slp)) { 590 if (IS_ERR(ps8622->gpio_slp)) {
586 ret = PTR_ERR(ps8622->gpio_slp); 591 ret = PTR_ERR(ps8622->gpio_slp);
587 dev_err(dev, "cannot get gpio_slp %d\n", ret); 592 dev_err(dev, "cannot get gpio_slp %d\n", ret);
588 return ret; 593 return ret;
589 } 594 }
590 ret = gpiod_direction_output(ps8622->gpio_slp, 1);
591 if (ret) {
592 dev_err(dev, "cannot configure gpio_slp\n");
593 return ret;
594 }
595 595
596 ps8622->gpio_rst = devm_gpiod_get(dev, "reset");
597 if (IS_ERR(ps8622->gpio_rst)) {
598 ret = PTR_ERR(ps8622->gpio_rst);
599 dev_err(dev, "cannot get gpio_rst %d\n", ret);
600 return ret;
601 }
602 /* 596 /*
603 * Assert the reset pin high to avoid the bridge being 597 * Assert the reset pin high to avoid the bridge being
604 * initialized prematurely 598 * initialized prematurely
605 */ 599 */
606 ret = gpiod_direction_output(ps8622->gpio_rst, 1); 600 ps8622->gpio_rst = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
607 if (ret) { 601 if (IS_ERR(ps8622->gpio_rst)) {
608 dev_err(dev, "cannot configure gpio_rst\n"); 602 ret = PTR_ERR(ps8622->gpio_rst);
603 dev_err(dev, "cannot get gpio_rst %d\n", ret);
609 return ret; 604 return ret;
610 } 605 }
611 606
diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
index 9d2f053382e1..1b1bf2384815 100644
--- a/drivers/gpu/drm/bridge/ptn3460.c
+++ b/drivers/gpu/drm/bridge/ptn3460.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18#include <linux/gpio/consumer.h>
18#include <linux/i2c.h> 19#include <linux/i2c.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/of.h> 21#include <linux/of.h>
@@ -23,10 +24,9 @@
23 24
24#include <drm/drm_panel.h> 25#include <drm/drm_panel.h>
25 26
26#include "bridge/ptn3460.h"
27
28#include "drm_crtc.h" 27#include "drm_crtc.h"
29#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29#include "drm_atomic_helper.h"
30#include "drm_edid.h" 30#include "drm_edid.h"
31#include "drmP.h" 31#include "drmP.h"
32 32
@@ -259,10 +259,13 @@ static void ptn3460_connector_destroy(struct drm_connector *connector)
259} 259}
260 260
261static struct drm_connector_funcs ptn3460_connector_funcs = { 261static struct drm_connector_funcs ptn3460_connector_funcs = {
262 .dpms = drm_helper_connector_dpms, 262 .dpms = drm_atomic_helper_connector_dpms,
263 .fill_modes = drm_helper_probe_single_connector_modes, 263 .fill_modes = drm_helper_probe_single_connector_modes,
264 .detect = ptn3460_detect, 264 .detect = ptn3460_detect,
265 .destroy = ptn3460_connector_destroy, 265 .destroy = ptn3460_connector_destroy,
266 .reset = drm_atomic_helper_connector_reset,
267 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
268 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
266}; 269};
267 270
268static int ptn3460_bridge_attach(struct drm_bridge *bridge) 271static int ptn3460_bridge_attach(struct drm_bridge *bridge)
@@ -330,32 +333,23 @@ static int ptn3460_probe(struct i2c_client *client,
330 333
331 ptn_bridge->client = client; 334 ptn_bridge->client = client;
332 335
333 ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown"); 336 ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown",
337 GPIOD_OUT_HIGH);
334 if (IS_ERR(ptn_bridge->gpio_pd_n)) { 338 if (IS_ERR(ptn_bridge->gpio_pd_n)) {
335 ret = PTR_ERR(ptn_bridge->gpio_pd_n); 339 ret = PTR_ERR(ptn_bridge->gpio_pd_n);
336 dev_err(dev, "cannot get gpio_pd_n %d\n", ret); 340 dev_err(dev, "cannot get gpio_pd_n %d\n", ret);
337 return ret; 341 return ret;
338 } 342 }
339 343
340 ret = gpiod_direction_output(ptn_bridge->gpio_pd_n, 1);
341 if (ret) {
342 DRM_ERROR("cannot configure gpio_pd_n\n");
343 return ret;
344 }
345
346 ptn_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset");
347 if (IS_ERR(ptn_bridge->gpio_rst_n)) {
348 ret = PTR_ERR(ptn_bridge->gpio_rst_n);
349 DRM_ERROR("cannot get gpio_rst_n %d\n", ret);
350 return ret;
351 }
352 /* 344 /*
353 * Request the reset pin low to avoid the bridge being 345 * Request the reset pin low to avoid the bridge being
354 * initialized prematurely 346 * initialized prematurely
355 */ 347 */
356 ret = gpiod_direction_output(ptn_bridge->gpio_rst_n, 0); 348 ptn_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset",
357 if (ret) { 349 GPIOD_OUT_LOW);
358 DRM_ERROR("cannot configure gpio_rst_n\n"); 350 if (IS_ERR(ptn_bridge->gpio_rst_n)) {
351 ret = PTR_ERR(ptn_bridge->gpio_rst_n);
352 DRM_ERROR("cannot get gpio_rst_n %d\n", ret);
359 return ret; 353 return ret;
360 } 354 }
361 355
@@ -389,7 +383,7 @@ static int ptn3460_remove(struct i2c_client *client)
389} 383}
390 384
391static const struct i2c_device_id ptn3460_i2c_table[] = { 385static const struct i2c_device_id ptn3460_i2c_table[] = {
392 {"nxp,ptn3460", 0}, 386 {"ptn3460", 0},
393 {}, 387 {},
394}; 388};
395MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table); 389MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index c7e59b074e62..f6f2fb58eb37 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1216,8 +1216,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
1216 1216
1217 if (!state->allow_modeset) { 1217 if (!state->allow_modeset) {
1218 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1218 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1219 if (crtc_state->mode_changed || 1219 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
1220 crtc_state->active_changed) {
1221 DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n", 1220 DRM_DEBUG_ATOMIC("[CRTC:%d] requires full modeset\n",
1222 crtc->base.id); 1221 crtc->base.id);
1223 return -EINVAL; 1222 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 536ae4da4665..5b59d5ad7d1c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -331,12 +331,6 @@ mode_fixup(struct drm_atomic_state *state)
331 return 0; 331 return 0;
332} 332}
333 333
334static bool
335needs_modeset(struct drm_crtc_state *state)
336{
337 return state->mode_changed || state->active_changed;
338}
339
340/** 334/**
341 * drm_atomic_helper_check_modeset - validate state object for modeset changes 335 * drm_atomic_helper_check_modeset - validate state object for modeset changes
342 * @dev: DRM device 336 * @dev: DRM device
@@ -414,7 +408,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
414 crtc_state->active_changed = true; 408 crtc_state->active_changed = true;
415 } 409 }
416 410
417 if (!needs_modeset(crtc_state)) 411 if (!drm_atomic_crtc_needs_modeset(crtc_state))
418 continue; 412 continue;
419 413
420 DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n", 414 DRM_DEBUG_ATOMIC("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
@@ -564,7 +558,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
564 old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)]; 558 old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
565 559
566 if (!old_crtc_state->active || 560 if (!old_crtc_state->active ||
567 !needs_modeset(old_conn_state->crtc->state)) 561 !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
568 continue; 562 continue;
569 563
570 encoder = old_conn_state->best_encoder; 564 encoder = old_conn_state->best_encoder;
@@ -601,7 +595,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
601 const struct drm_crtc_helper_funcs *funcs; 595 const struct drm_crtc_helper_funcs *funcs;
602 596
603 /* Shut down everything that needs a full modeset. */ 597 /* Shut down everything that needs a full modeset. */
604 if (!needs_modeset(crtc->state)) 598 if (!drm_atomic_crtc_needs_modeset(crtc->state))
605 continue; 599 continue;
606 600
607 if (!old_crtc_state->active) 601 if (!old_crtc_state->active)
@@ -792,7 +786,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
792 const struct drm_crtc_helper_funcs *funcs; 786 const struct drm_crtc_helper_funcs *funcs;
793 787
794 /* Need to filter out CRTCs where only planes change. */ 788 /* Need to filter out CRTCs where only planes change. */
795 if (!needs_modeset(crtc->state)) 789 if (!drm_atomic_crtc_needs_modeset(crtc->state))
796 continue; 790 continue;
797 791
798 if (!crtc->state->active) 792 if (!crtc->state->active)
@@ -819,7 +813,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
819 continue; 813 continue;
820 814
821 if (!connector->state->crtc->state->active || 815 if (!connector->state->crtc->state->active ||
822 !needs_modeset(connector->state->crtc->state)) 816 !drm_atomic_crtc_needs_modeset(connector->state->crtc->state))
823 continue; 817 continue;
824 818
825 encoder = connector->state->best_encoder; 819 encoder = connector->state->best_encoder;
@@ -1561,10 +1555,14 @@ static int update_output_state(struct drm_atomic_state *state,
1561 if (crtc == set->crtc) 1555 if (crtc == set->crtc)
1562 continue; 1556 continue;
1563 1557
1564 crtc_state->enable = 1558 if (!drm_atomic_connectors_for_crtc(state, crtc)) {
1565 drm_atomic_connectors_for_crtc(state, crtc); 1559 ret = drm_atomic_set_mode_prop_for_crtc(crtc_state,
1566 if (!crtc_state->enable) 1560 NULL);
1561 if (ret < 0)
1562 return ret;
1563
1567 crtc_state->active = false; 1564 crtc_state->active = false;
1565 }
1568 } 1566 }
1569 1567
1570 return 0; 1568 return 0;
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 9a62d7a53553..6743ff7dccfa 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -130,11 +130,12 @@ drm_clflush_virt_range(void *addr, unsigned long length)
130{ 130{
131#if defined(CONFIG_X86) 131#if defined(CONFIG_X86)
132 if (cpu_has_clflush) { 132 if (cpu_has_clflush) {
133 const int size = boot_cpu_data.x86_clflush_size;
133 void *end = addr + length; 134 void *end = addr + length;
135 addr = (void *)(((unsigned long)addr) & -size);
134 mb(); 136 mb();
135 for (; addr < end; addr += boot_cpu_data.x86_clflush_size) 137 for (; addr < end; addr += size)
136 clflushopt(addr); 138 clflushopt(addr);
137 clflushopt(end - 1);
138 mb(); 139 mb();
139 return; 140 return;
140 } 141 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 77f87b23a6e7..b9ba06176eb1 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -3255,6 +3255,24 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
3255 r->modifier[i], i); 3255 r->modifier[i], i);
3256 return -EINVAL; 3256 return -EINVAL;
3257 } 3257 }
3258
3259 /* modifier specific checks: */
3260 switch (r->modifier[i]) {
3261 case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
3262 /* NOTE: the pitch restriction may be lifted later if it turns
3263 * out that no hw has this restriction:
3264 */
3265 if (r->pixel_format != DRM_FORMAT_NV12 ||
3266 width % 128 || height % 32 ||
3267 r->pitches[i] % 128) {
3268 DRM_DEBUG_KMS("bad modifier data for plane %d\n", i);
3269 return -EINVAL;
3270 }
3271 break;
3272
3273 default:
3274 break;
3275 }
3258 } 3276 }
3259 3277
3260 for (i = num_planes; i < 4; i++) { 3278 for (i = num_planes; i < 4; i++) {
@@ -4714,7 +4732,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
4714 return 0; 4732 return 0;
4715 4733
4716 if (edid) 4734 if (edid)
4717 size = EDID_LENGTH + (1 + edid->extensions); 4735 size = EDID_LENGTH * (1 + edid->extensions);
4718 4736
4719 ret = drm_property_replace_global_blob(dev, 4737 ret = drm_property_replace_global_blob(dev,
4720 &connector->edid_blob_ptr, 4738 &connector->edid_blob_ptr,
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 132581ca4ad8..778bbb6425b8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -867,8 +867,16 @@ static void drm_dp_destroy_port(struct kref *kref)
867 port->vcpi.num_slots = 0; 867 port->vcpi.num_slots = 0;
868 868
869 kfree(port->cached_edid); 869 kfree(port->cached_edid);
870 if (port->connector) 870
871 (*port->mgr->cbs->destroy_connector)(mgr, port->connector); 871 /* we can't destroy the connector here, as
872 we might be holding the mode_config.mutex
873 from an EDID retrieval */
874 if (port->connector) {
875 mutex_lock(&mgr->destroy_connector_lock);
876 list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
877 mutex_unlock(&mgr->destroy_connector_lock);
878 schedule_work(&mgr->destroy_connector_work);
879 }
872 drm_dp_port_teardown_pdt(port, port->pdt); 880 drm_dp_port_teardown_pdt(port, port->pdt);
873 881
874 if (!port->input && port->vcpi.vcpi > 0) 882 if (!port->input && port->vcpi.vcpi > 0)
@@ -1163,6 +1171,8 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1163 struct drm_dp_mst_port *port; 1171 struct drm_dp_mst_port *port;
1164 int i; 1172 int i;
1165 /* find the port by iterating down */ 1173 /* find the port by iterating down */
1174
1175 mutex_lock(&mgr->lock);
1166 mstb = mgr->mst_primary; 1176 mstb = mgr->mst_primary;
1167 1177
1168 for (i = 0; i < lct - 1; i++) { 1178 for (i = 0; i < lct - 1; i++) {
@@ -1182,6 +1192,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
1182 } 1192 }
1183 } 1193 }
1184 kref_get(&mstb->kref); 1194 kref_get(&mstb->kref);
1195 mutex_unlock(&mgr->lock);
1185 return mstb; 1196 return mstb;
1186} 1197}
1187 1198
@@ -1189,7 +1200,7 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1189 struct drm_dp_mst_branch *mstb) 1200 struct drm_dp_mst_branch *mstb)
1190{ 1201{
1191 struct drm_dp_mst_port *port; 1202 struct drm_dp_mst_port *port;
1192 1203 struct drm_dp_mst_branch *mstb_child;
1193 if (!mstb->link_address_sent) { 1204 if (!mstb->link_address_sent) {
1194 drm_dp_send_link_address(mgr, mstb); 1205 drm_dp_send_link_address(mgr, mstb);
1195 mstb->link_address_sent = true; 1206 mstb->link_address_sent = true;
@@ -1204,17 +1215,31 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1204 if (!port->available_pbn) 1215 if (!port->available_pbn)
1205 drm_dp_send_enum_path_resources(mgr, mstb, port); 1216 drm_dp_send_enum_path_resources(mgr, mstb, port);
1206 1217
1207 if (port->mstb) 1218 if (port->mstb) {
1208 drm_dp_check_and_send_link_address(mgr, port->mstb); 1219 mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1220 if (mstb_child) {
1221 drm_dp_check_and_send_link_address(mgr, mstb_child);
1222 drm_dp_put_mst_branch_device(mstb_child);
1223 }
1224 }
1209 } 1225 }
1210} 1226}
1211 1227
1212static void drm_dp_mst_link_probe_work(struct work_struct *work) 1228static void drm_dp_mst_link_probe_work(struct work_struct *work)
1213{ 1229{
1214 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); 1230 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1231 struct drm_dp_mst_branch *mstb;
1215 1232
1216 drm_dp_check_and_send_link_address(mgr, mgr->mst_primary); 1233 mutex_lock(&mgr->lock);
1217 1234 mstb = mgr->mst_primary;
1235 if (mstb) {
1236 kref_get(&mstb->kref);
1237 }
1238 mutex_unlock(&mgr->lock);
1239 if (mstb) {
1240 drm_dp_check_and_send_link_address(mgr, mstb);
1241 drm_dp_put_mst_branch_device(mstb);
1242 }
1218} 1243}
1219 1244
1220static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 1245static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -2632,6 +2657,30 @@ static void drm_dp_tx_work(struct work_struct *work)
2632 mutex_unlock(&mgr->qlock); 2657 mutex_unlock(&mgr->qlock);
2633} 2658}
2634 2659
2660static void drm_dp_destroy_connector_work(struct work_struct *work)
2661{
2662 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2663 struct drm_connector *connector;
2664
2665 /*
2666 * Not a regular list traverse as we have to drop the destroy
2667 * connector lock before destroying the connector, to avoid AB->BA
2668 * ordering between this lock and the config mutex.
2669 */
2670 for (;;) {
2671 mutex_lock(&mgr->destroy_connector_lock);
2672 connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
2673 if (!connector) {
2674 mutex_unlock(&mgr->destroy_connector_lock);
2675 break;
2676 }
2677 list_del(&connector->destroy_list);
2678 mutex_unlock(&mgr->destroy_connector_lock);
2679
2680 mgr->cbs->destroy_connector(mgr, connector);
2681 }
2682}
2683
2635/** 2684/**
2636 * drm_dp_mst_topology_mgr_init - initialise a topology manager 2685 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2637 * @mgr: manager struct to initialise 2686 * @mgr: manager struct to initialise
@@ -2651,10 +2700,13 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2651 mutex_init(&mgr->lock); 2700 mutex_init(&mgr->lock);
2652 mutex_init(&mgr->qlock); 2701 mutex_init(&mgr->qlock);
2653 mutex_init(&mgr->payload_lock); 2702 mutex_init(&mgr->payload_lock);
2703 mutex_init(&mgr->destroy_connector_lock);
2654 INIT_LIST_HEAD(&mgr->tx_msg_upq); 2704 INIT_LIST_HEAD(&mgr->tx_msg_upq);
2655 INIT_LIST_HEAD(&mgr->tx_msg_downq); 2705 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2706 INIT_LIST_HEAD(&mgr->destroy_connector_list);
2656 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); 2707 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2657 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); 2708 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2709 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
2658 init_waitqueue_head(&mgr->tx_waitq); 2710 init_waitqueue_head(&mgr->tx_waitq);
2659 mgr->dev = dev; 2711 mgr->dev = dev;
2660 mgr->aux = aux; 2712 mgr->aux = aux;
@@ -2679,6 +2731,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2679 */ 2731 */
2680void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) 2732void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2681{ 2733{
2734 flush_work(&mgr->destroy_connector_work);
2682 mutex_lock(&mgr->payload_lock); 2735 mutex_lock(&mgr->payload_lock);
2683 kfree(mgr->payloads); 2736 kfree(mgr->payloads);
2684 mgr->payloads = NULL; 2737 mgr->payloads = NULL;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 3b3c4f537e95..b7bf4ce8c012 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -46,13 +46,11 @@ MODULE_AUTHOR(CORE_AUTHOR);
46MODULE_DESCRIPTION(CORE_DESC); 46MODULE_DESCRIPTION(CORE_DESC);
47MODULE_LICENSE("GPL and additional rights"); 47MODULE_LICENSE("GPL and additional rights");
48MODULE_PARM_DESC(debug, "Enable debug output"); 48MODULE_PARM_DESC(debug, "Enable debug output");
49MODULE_PARM_DESC(atomic, "Enable experimental atomic KMS API");
50MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)"); 49MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
51MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 50MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
52MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps"); 51MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
53 52
54module_param_named(debug, drm_debug, int, 0600); 53module_param_named(debug, drm_debug, int, 0600);
55module_param_named_unsafe(atomic, drm_atomic, bool, 0600);
56 54
57static DEFINE_SPINLOCK(drm_minor_lock); 55static DEFINE_SPINLOCK(drm_minor_lock);
58static struct idr drm_minors_idr; 56static struct idr drm_minors_idr;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index e419eedf751d..bd75f303da63 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -110,7 +110,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
110 cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, 110 cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size,
111 &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); 111 &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN);
112 if (!cma_obj->vaddr) { 112 if (!cma_obj->vaddr) {
113 dev_err(drm->dev, "failed to allocate buffer with size %d\n", 113 dev_err(drm->dev, "failed to allocate buffer with size %zu\n",
114 size); 114 size);
115 ret = -ENOMEM; 115 ret = -ENOMEM;
116 goto error; 116 goto error;
@@ -388,7 +388,7 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
388 388
389 off = drm_vma_node_start(&obj->vma_node); 389 off = drm_vma_node_start(&obj->vma_node);
390 390
391 seq_printf(m, "%2d (%2d) %08llx %pad %p %d", 391 seq_printf(m, "%2d (%2d) %08llx %pad %p %zu",
392 obj->name, obj->refcount.refcount.counter, 392 obj->name, obj->refcount.refcount.counter,
393 off, &cma_obj->paddr, cma_obj->vaddr, obj->size); 393 off, &cma_obj->paddr, cma_obj->vaddr, obj->size);
394 394
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9bac1b7479af..b1d303fa2327 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -36,9 +36,6 @@
36 36
37#include <linux/pci.h> 37#include <linux/pci.h>
38#include <linux/export.h> 38#include <linux/export.h>
39#ifdef CONFIG_X86
40#include <asm/mtrr.h>
41#endif
42 39
43static int drm_version(struct drm_device *dev, void *data, 40static int drm_version(struct drm_device *dev, void *data,
44 struct drm_file *file_priv); 41 struct drm_file *file_priv);
@@ -197,16 +194,7 @@ static int drm_getmap(struct drm_device *dev, void *data,
197 map->type = r_list->map->type; 194 map->type = r_list->map->type;
198 map->flags = r_list->map->flags; 195 map->flags = r_list->map->flags;
199 map->handle = (void *)(unsigned long) r_list->user_token; 196 map->handle = (void *)(unsigned long) r_list->user_token;
200 197 map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
201#ifdef CONFIG_X86
202 /*
203 * There appears to be exactly one user of the mtrr index: dritest.
204 * It's easy enough to keep it working on non-PAT systems.
205 */
206 map->mtrr = phys_wc_to_mtrr_index(r_list->map->mtrr);
207#else
208 map->mtrr = -1;
209#endif
210 198
211 mutex_unlock(&dev->struct_mutex); 199 mutex_unlock(&dev->struct_mutex);
212 200
@@ -350,9 +338,6 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
350 file_priv->universal_planes = req->value; 338 file_priv->universal_planes = req->value;
351 break; 339 break;
352 case DRM_CLIENT_CAP_ATOMIC: 340 case DRM_CLIENT_CAP_ATOMIC:
353 /* for now, hide behind experimental drm.atomic moduleparam */
354 if (!drm_atomic)
355 return -EINVAL;
356 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 341 if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
357 return -EINVAL; 342 return -EINVAL;
358 if (req->value > 1) 343 if (req->value > 1)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 162dd29b2451..9f935f55d74c 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -309,7 +309,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
309 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of 309 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
310 * simpler APIs by using the helper functions @drm_gem_prime_export and 310 * simpler APIs by using the helper functions @drm_gem_prime_export and
311 * @drm_gem_prime_import. These functions implement dma-buf support in terms of 311 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
312 * five lower-level driver callbacks: 312 * six lower-level driver callbacks:
313 * 313 *
314 * Export callbacks: 314 * Export callbacks:
315 * 315 *
@@ -321,6 +321,8 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
321 * 321 *
322 * - @gem_prime_vunmap: vunmap a buffer exported by your driver 322 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
323 * 323 *
324 * - @gem_prime_mmap (optional): mmap a buffer exported by your driver
325 *
324 * Import callback: 326 * Import callback:
325 * 327 *
326 * - @gem_prime_import_sg_table (import): produce a GEM object from another 328 * - @gem_prime_import_sg_table (import): produce a GEM object from another
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 487ddf5ffe51..0f6cd33b531f 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device,
217 217
218 mutex_unlock(&dev->mode_config.mutex); 218 mutex_unlock(&dev->mode_config.mutex);
219 219
220 return ret; 220 return ret ? ret : count;
221} 221}
222 222
223static ssize_t status_show(struct device *device, 223static ssize_t status_show(struct device *device,
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 0a6780367d28..43003c4ad80b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -24,16 +24,22 @@ config DRM_EXYNOS_FIMD
24 help 24 help
25 Choose this option if you want to use Exynos FIMD for DRM. 25 Choose this option if you want to use Exynos FIMD for DRM.
26 26
27config DRM_EXYNOS7_DECON 27config DRM_EXYNOS5433_DECON
28 bool "Exynos DRM DECON" 28 bool "Exynos5433 DRM DECON"
29 depends on DRM_EXYNOS 29 depends on DRM_EXYNOS
30 help
31 Choose this option if you want to use Exynos5433 DECON for DRM.
32
33config DRM_EXYNOS7_DECON
34 bool "Exynos7 DRM DECON"
35 depends on DRM_EXYNOS && !FB_S3C
30 select FB_MODE_HELPERS 36 select FB_MODE_HELPERS
31 help 37 help
32 Choose this option if you want to use Exynos DECON for DRM. 38 Choose this option if you want to use Exynos DECON for DRM.
33 39
34config DRM_EXYNOS_DPI 40config DRM_EXYNOS_DPI
35 bool "EXYNOS DRM parallel output support" 41 bool "EXYNOS DRM parallel output support"
36 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) 42 depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
37 select DRM_PANEL 43 select DRM_PANEL
38 default n 44 default n
39 help 45 help
@@ -41,7 +47,7 @@ config DRM_EXYNOS_DPI
41 47
42config DRM_EXYNOS_DSI 48config DRM_EXYNOS_DSI
43 bool "EXYNOS DRM MIPI-DSI driver support" 49 bool "EXYNOS DRM MIPI-DSI driver support"
44 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) 50 depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS5433_DECON || DRM_EXYNOS7_DECON)
45 select DRM_MIPI_DSI 51 select DRM_MIPI_DSI
46 select DRM_PANEL 52 select DRM_PANEL
47 default n 53 default n
@@ -50,7 +56,7 @@ config DRM_EXYNOS_DSI
50 56
51config DRM_EXYNOS_DP 57config DRM_EXYNOS_DP
52 bool "EXYNOS DRM DP driver support" 58 bool "EXYNOS DRM DP driver support"
53 depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) 59 depends on DRM_EXYNOS && (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
54 default DRM_EXYNOS 60 default DRM_EXYNOS
55 select DRM_PANEL 61 select DRM_PANEL
56 help 62 help
@@ -97,3 +103,9 @@ config DRM_EXYNOS_GSC
97 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM 103 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
98 help 104 help
99 Choose this option if you want to use Exynos GSC for DRM. 105 Choose this option if you want to use Exynos GSC for DRM.
106
107config DRM_EXYNOS_MIC
108 bool "Exynos DRM MIC"
109 depends on (DRM_EXYNOS && DRM_EXYNOS5433_DECON)
110 help
111 Choose this option if you want to use Exynos MIC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index cc90679cfc06..7de0b1084fcd 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -10,6 +10,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
10 10
11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o 11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
12exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 12exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
13exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o
13exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o 14exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o
14exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o 15exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o
15exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o 16exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o
@@ -21,5 +22,6 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
21exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o 22exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
22exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o 23exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
23exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o 24exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
25exynosdrm-$(CONFIG_DRM_EXYNOS_MIC) += exynos_drm_mic.o
24 26
25obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o 27obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
new file mode 100644
index 000000000000..8b1225f245fc
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -0,0 +1,660 @@
1/* drivers/gpu/drm/exynos5433_drm_decon.c
2 *
3 * Copyright (C) 2015 Samsung Electronics Co.Ltd
4 * Authors:
5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Hyungwon Hwang <human.hwang@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundationr
11 */
12
13#include <linux/platform_device.h>
14#include <linux/clk.h>
15#include <linux/component.h>
16#include <linux/of_gpio.h>
17#include <linux/pm_runtime.h>
18
19#include <video/exynos5433_decon.h>
20
21#include "exynos_drm_drv.h"
22#include "exynos_drm_crtc.h"
23#include "exynos_drm_plane.h"
24#include "exynos_drm_iommu.h"
25
26#define WINDOWS_NR 3
27#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
28
29struct decon_context {
30 struct device *dev;
31 struct drm_device *drm_dev;
32 struct exynos_drm_crtc *crtc;
33 struct exynos_drm_plane planes[WINDOWS_NR];
34 void __iomem *addr;
35 struct clk *clks[6];
36 unsigned int default_win;
37 unsigned long irq_flags;
38 int pipe;
39 bool suspended;
40
41#define BIT_CLKS_ENABLED 0
42#define BIT_IRQS_ENABLED 1
43 unsigned long enabled;
44 bool i80_if;
45 atomic_t win_updated;
46};
47
48static const char * const decon_clks_name[] = {
49 "aclk_decon",
50 "aclk_smmu_decon0x",
51 "aclk_xiu_decon0x",
52 "pclk_smmu_decon0x",
53 "sclk_decon_vclk",
54 "sclk_decon_eclk",
55};
56
57static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
58{
59 struct decon_context *ctx = crtc->ctx;
60 u32 val;
61
62 if (ctx->suspended)
63 return -EPERM;
64
65 if (test_and_set_bit(0, &ctx->irq_flags)) {
66 val = VIDINTCON0_INTEN;
67 if (ctx->i80_if)
68 val |= VIDINTCON0_FRAMEDONE;
69 else
70 val |= VIDINTCON0_INTFRMEN;
71
72 writel(val, ctx->addr + DECON_VIDINTCON0);
73 }
74
75 return 0;
76}
77
78static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
79{
80 struct decon_context *ctx = crtc->ctx;
81
82 if (ctx->suspended)
83 return;
84
85 if (test_and_clear_bit(0, &ctx->irq_flags))
86 writel(0, ctx->addr + DECON_VIDINTCON0);
87}
88
89static void decon_setup_trigger(struct decon_context *ctx)
90{
91 u32 val = TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F |
92 TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN;
93 writel(val, ctx->addr + DECON_TRIGCON);
94}
95
96static void decon_commit(struct exynos_drm_crtc *crtc)
97{
98 struct decon_context *ctx = crtc->ctx;
99 struct drm_display_mode *mode = &crtc->base.mode;
100 u32 val;
101
102 if (ctx->suspended)
103 return;
104
105 /* enable clock gate */
106 val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
107 writel(val, ctx->addr + DECON_CMU);
108
109 /* lcd on and use command if */
110 val = VIDOUT_LCD_ON;
111 if (ctx->i80_if)
112 val |= VIDOUT_COMMAND_IF;
113 else
114 val |= VIDOUT_RGB_IF;
115 writel(val, ctx->addr + DECON_VIDOUTCON0);
116
117 val = VIDTCON2_LINEVAL(mode->vdisplay - 1) |
118 VIDTCON2_HOZVAL(mode->hdisplay - 1);
119 writel(val, ctx->addr + DECON_VIDTCON2);
120
121 if (!ctx->i80_if) {
122 val = VIDTCON00_VBPD_F(
123 mode->crtc_vtotal - mode->crtc_vsync_end) |
124 VIDTCON00_VFPD_F(
125 mode->crtc_vsync_start - mode->crtc_vdisplay);
126 writel(val, ctx->addr + DECON_VIDTCON00);
127
128 val = VIDTCON01_VSPW_F(
129 mode->crtc_vsync_end - mode->crtc_vsync_start);
130 writel(val, ctx->addr + DECON_VIDTCON01);
131
132 val = VIDTCON10_HBPD_F(
133 mode->crtc_htotal - mode->crtc_hsync_end) |
134 VIDTCON10_HFPD_F(
135 mode->crtc_hsync_start - mode->crtc_hdisplay);
136 writel(val, ctx->addr + DECON_VIDTCON10);
137
138 val = VIDTCON11_HSPW_F(
139 mode->crtc_hsync_end - mode->crtc_hsync_start);
140 writel(val, ctx->addr + DECON_VIDTCON11);
141 }
142
143 decon_setup_trigger(ctx);
144
145 /* enable output and display signal */
146 val = VIDCON0_ENVID | VIDCON0_ENVID_F;
147 writel(val, ctx->addr + DECON_VIDCON0);
148}
149
150#define COORDINATE_X(x) (((x) & 0xfff) << 12)
151#define COORDINATE_Y(x) ((x) & 0xfff)
152#define OFFSIZE(x) (((x) & 0x3fff) << 14)
153#define PAGEWIDTH(x) ((x) & 0x3fff)
154
155static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
156{
157 struct exynos_drm_plane *plane = &ctx->planes[win];
158 unsigned long val;
159
160 val = readl(ctx->addr + DECON_WINCONx(win));
161 val &= ~WINCONx_BPPMODE_MASK;
162
163 switch (plane->pixel_format) {
164 case DRM_FORMAT_XRGB1555:
165 val |= WINCONx_BPPMODE_16BPP_I1555;
166 val |= WINCONx_HAWSWP_F;
167 val |= WINCONx_BURSTLEN_16WORD;
168 break;
169 case DRM_FORMAT_RGB565:
170 val |= WINCONx_BPPMODE_16BPP_565;
171 val |= WINCONx_HAWSWP_F;
172 val |= WINCONx_BURSTLEN_16WORD;
173 break;
174 case DRM_FORMAT_XRGB8888:
175 val |= WINCONx_BPPMODE_24BPP_888;
176 val |= WINCONx_WSWP_F;
177 val |= WINCONx_BURSTLEN_16WORD;
178 break;
179 case DRM_FORMAT_ARGB8888:
180 val |= WINCONx_BPPMODE_32BPP_A8888;
181 val |= WINCONx_WSWP_F | WINCONx_BLD_PIX_F | WINCONx_ALPHA_SEL_F;
182 val |= WINCONx_BURSTLEN_16WORD;
183 break;
184 default:
185 DRM_ERROR("Proper pixel format is not set\n");
186 return;
187 }
188
189 DRM_DEBUG_KMS("bpp = %u\n", plane->bpp);
190
191 /*
192 * In case of exynos, setting dma-burst to 16Word causes permanent
193 * tearing for very small buffers, e.g. cursor buffer. Burst Mode
194 * switching which is based on plane size is not recommended as
195 * plane size varies a lot towards the end of the screen and rapid
196 * movement causes unstable DMA which results into iommu crash/tear.
197 */
198
199 if (plane->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
200 val &= ~WINCONx_BURSTLEN_MASK;
201 val |= WINCONx_BURSTLEN_8WORD;
202 }
203
204 writel(val, ctx->addr + DECON_WINCONx(win));
205}
206
207static void decon_shadow_protect_win(struct decon_context *ctx, int win,
208 bool protect)
209{
210 u32 val;
211
212 val = readl(ctx->addr + DECON_SHADOWCON);
213
214 if (protect)
215 val |= SHADOWCON_Wx_PROTECT(win);
216 else
217 val &= ~SHADOWCON_Wx_PROTECT(win);
218
219 writel(val, ctx->addr + DECON_SHADOWCON);
220}
221
222static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
223{
224 struct decon_context *ctx = crtc->ctx;
225 struct exynos_drm_plane *plane;
226 u32 val;
227
228 if (win < 0 || win >= WINDOWS_NR)
229 return;
230
231 plane = &ctx->planes[win];
232
233 if (ctx->suspended)
234 return;
235
236 decon_shadow_protect_win(ctx, win, true);
237
238 val = COORDINATE_X(plane->crtc_x) | COORDINATE_Y(plane->crtc_y);
239 writel(val, ctx->addr + DECON_VIDOSDxA(win));
240
241 val = COORDINATE_X(plane->crtc_x + plane->crtc_width - 1) |
242 COORDINATE_Y(plane->crtc_y + plane->crtc_height - 1);
243 writel(val, ctx->addr + DECON_VIDOSDxB(win));
244
245 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
246 VIDOSD_Wx_ALPHA_B_F(0x0);
247 writel(val, ctx->addr + DECON_VIDOSDxC(win));
248
249 val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
250 VIDOSD_Wx_ALPHA_B_F(0x0);
251 writel(val, ctx->addr + DECON_VIDOSDxD(win));
252
253 writel(plane->dma_addr[0], ctx->addr + DECON_VIDW0xADD0B0(win));
254
255 val = plane->dma_addr[0] + plane->pitch * plane->crtc_height;
256 writel(val, ctx->addr + DECON_VIDW0xADD1B0(win));
257
258 val = OFFSIZE(plane->pitch - plane->crtc_width * (plane->bpp >> 3))
259 | PAGEWIDTH(plane->crtc_width * (plane->bpp >> 3));
260 writel(val, ctx->addr + DECON_VIDW0xADD2(win));
261
262 decon_win_set_pixfmt(ctx, win);
263
264 /* window enable */
265 val = readl(ctx->addr + DECON_WINCONx(win));
266 val |= WINCONx_ENWIN_F;
267 writel(val, ctx->addr + DECON_WINCONx(win));
268
269 decon_shadow_protect_win(ctx, win, false);
270
271 /* standalone update */
272 val = readl(ctx->addr + DECON_UPDATE);
273 val |= STANDALONE_UPDATE_F;
274 writel(val, ctx->addr + DECON_UPDATE);
275
276 if (ctx->i80_if)
277 atomic_set(&ctx->win_updated, 1);
278}
279
280static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
281{
282 struct decon_context *ctx = crtc->ctx;
283 struct exynos_drm_plane *plane;
284 u32 val;
285
286 if (win < 0 || win >= WINDOWS_NR)
287 return;
288
289 plane = &ctx->planes[win];
290
291 if (ctx->suspended)
292 return;
293
294 decon_shadow_protect_win(ctx, win, true);
295
296 /* window disable */
297 val = readl(ctx->addr + DECON_WINCONx(win));
298 val &= ~WINCONx_ENWIN_F;
299 writel(val, ctx->addr + DECON_WINCONx(win));
300
301 decon_shadow_protect_win(ctx, win, false);
302
303 /* standalone update */
304 val = readl(ctx->addr + DECON_UPDATE);
305 val |= STANDALONE_UPDATE_F;
306 writel(val, ctx->addr + DECON_UPDATE);
307}
308
309static void decon_swreset(struct decon_context *ctx)
310{
311 unsigned int tries;
312
313 writel(0, ctx->addr + DECON_VIDCON0);
314 for (tries = 2000; tries; --tries) {
315 if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_STOP_STATUS)
316 break;
317 udelay(10);
318 }
319
320 WARN(tries == 0, "failed to disable DECON\n");
321
322 writel(VIDCON0_SWRESET, ctx->addr + DECON_VIDCON0);
323 for (tries = 2000; tries; --tries) {
324 if (~readl(ctx->addr + DECON_VIDCON0) & VIDCON0_SWRESET)
325 break;
326 udelay(10);
327 }
328
329 WARN(tries == 0, "failed to software reset DECON\n");
330}
331
332static void decon_enable(struct exynos_drm_crtc *crtc)
333{
334 struct decon_context *ctx = crtc->ctx;
335 int ret;
336 int i;
337
338 if (!ctx->suspended)
339 return;
340
341 ctx->suspended = false;
342
343 pm_runtime_get_sync(ctx->dev);
344
345 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
346 ret = clk_prepare_enable(ctx->clks[i]);
347 if (ret < 0)
348 goto err;
349 }
350
351 set_bit(BIT_CLKS_ENABLED, &ctx->enabled);
352
353 /* if vblank was enabled status, enable it again. */
354 if (test_and_clear_bit(0, &ctx->irq_flags))
355 decon_enable_vblank(ctx->crtc);
356
357 decon_commit(ctx->crtc);
358
359 return;
360err:
361 while (--i >= 0)
362 clk_disable_unprepare(ctx->clks[i]);
363
364 ctx->suspended = true;
365}
366
367static void decon_disable(struct exynos_drm_crtc *crtc)
368{
369 struct decon_context *ctx = crtc->ctx;
370 int i;
371
372 if (ctx->suspended)
373 return;
374
375 /*
376 * We need to make sure that all windows are disabled before we
377 * suspend that connector. Otherwise we might try to scan from
378 * a destroyed buffer later.
379 */
380 for (i = 0; i < WINDOWS_NR; i++)
381 decon_win_disable(crtc, i);
382
383 decon_swreset(ctx);
384
385 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
386 clk_disable_unprepare(ctx->clks[i]);
387
388 clear_bit(BIT_CLKS_ENABLED, &ctx->enabled);
389
390 pm_runtime_put_sync(ctx->dev);
391
392 ctx->suspended = true;
393}
394
395void decon_te_irq_handler(struct exynos_drm_crtc *crtc)
396{
397 struct decon_context *ctx = crtc->ctx;
398 u32 val;
399
400 if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
401 return;
402
403 if (atomic_add_unless(&ctx->win_updated, -1, 0)) {
404 /* trigger */
405 val = readl(ctx->addr + DECON_TRIGCON);
406 val |= TRIGCON_SWTRIGCMD;
407 writel(val, ctx->addr + DECON_TRIGCON);
408 }
409
410 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
411}
412
413static void decon_clear_channels(struct exynos_drm_crtc *crtc)
414{
415 struct decon_context *ctx = crtc->ctx;
416 int win, i, ret;
417 u32 val;
418
419 DRM_DEBUG_KMS("%s\n", __FILE__);
420
421 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
422 ret = clk_prepare_enable(ctx->clks[i]);
423 if (ret < 0)
424 goto err;
425 }
426
427 for (win = 0; win < WINDOWS_NR; win++) {
428 /* shadow update disable */
429 val = readl(ctx->addr + DECON_SHADOWCON);
430 val |= SHADOWCON_Wx_PROTECT(win);
431 writel(val, ctx->addr + DECON_SHADOWCON);
432
433 /* window disable */
434 val = readl(ctx->addr + DECON_WINCONx(win));
435 val &= ~WINCONx_ENWIN_F;
436 writel(val, ctx->addr + DECON_WINCONx(win));
437
438 /* shadow update enable */
439 val = readl(ctx->addr + DECON_SHADOWCON);
440 val &= ~SHADOWCON_Wx_PROTECT(win);
441 writel(val, ctx->addr + DECON_SHADOWCON);
442
443 /* standalone update */
444 val = readl(ctx->addr + DECON_UPDATE);
445 val |= STANDALONE_UPDATE_F;
446 writel(val, ctx->addr + DECON_UPDATE);
447 }
448 /* TODO: wait for possible vsync */
449 msleep(50);
450
451err:
452 while (--i >= 0)
453 clk_disable_unprepare(ctx->clks[i]);
454}
455
456static struct exynos_drm_crtc_ops decon_crtc_ops = {
457 .enable = decon_enable,
458 .disable = decon_disable,
459 .commit = decon_commit,
460 .enable_vblank = decon_enable_vblank,
461 .disable_vblank = decon_disable_vblank,
462 .commit = decon_commit,
463 .win_commit = decon_win_commit,
464 .win_disable = decon_win_disable,
465 .te_handler = decon_te_irq_handler,
466 .clear_channels = decon_clear_channels,
467};
468
469static int decon_bind(struct device *dev, struct device *master, void *data)
470{
471 struct decon_context *ctx = dev_get_drvdata(dev);
472 struct drm_device *drm_dev = data;
473 struct exynos_drm_private *priv = drm_dev->dev_private;
474 struct exynos_drm_plane *exynos_plane;
475 enum drm_plane_type type;
476 unsigned int zpos;
477 int ret;
478
479 ctx->drm_dev = drm_dev;
480 ctx->pipe = priv->pipe++;
481
482 for (zpos = 0; zpos < WINDOWS_NR; zpos++) {
483 type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY :
484 DRM_PLANE_TYPE_OVERLAY;
485 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos],
486 1 << ctx->pipe, type, zpos);
487 if (ret)
488 return ret;
489 }
490
491 exynos_plane = &ctx->planes[ctx->default_win];
492 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
493 ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD,
494 &decon_crtc_ops, ctx);
495 if (IS_ERR(ctx->crtc)) {
496 ret = PTR_ERR(ctx->crtc);
497 goto err;
498 }
499
500 ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
501 if (ret)
502 goto err;
503
504 return ret;
505err:
506 priv->pipe--;
507 return ret;
508}
509
510static void decon_unbind(struct device *dev, struct device *master, void *data)
511{
512 struct decon_context *ctx = dev_get_drvdata(dev);
513
514 decon_disable(ctx->crtc);
515
516 /* detach this sub driver from iommu mapping if supported. */
517 if (is_drm_iommu_supported(ctx->drm_dev))
518 drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
519}
520
521static const struct component_ops decon_component_ops = {
522 .bind = decon_bind,
523 .unbind = decon_unbind,
524};
525
526static irqreturn_t decon_vsync_irq_handler(int irq, void *dev_id)
527{
528 struct decon_context *ctx = dev_id;
529 u32 val;
530
531 if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
532 goto out;
533
534 val = readl(ctx->addr + DECON_VIDINTCON1);
535 if (val & VIDINTCON1_INTFRMPEND) {
536 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
537
538 /* clear */
539 writel(VIDINTCON1_INTFRMPEND, ctx->addr + DECON_VIDINTCON1);
540 }
541
542out:
543 return IRQ_HANDLED;
544}
545
546static irqreturn_t decon_lcd_sys_irq_handler(int irq, void *dev_id)
547{
548 struct decon_context *ctx = dev_id;
549 u32 val;
550
551 if (!test_bit(BIT_CLKS_ENABLED, &ctx->enabled))
552 goto out;
553
554 val = readl(ctx->addr + DECON_VIDINTCON1);
555 if (val & VIDINTCON1_INTFRMDONEPEND) {
556 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
557
558 /* clear */
559 writel(VIDINTCON1_INTFRMDONEPEND,
560 ctx->addr + DECON_VIDINTCON1);
561 }
562
563out:
564 return IRQ_HANDLED;
565}
566
567static int exynos5433_decon_probe(struct platform_device *pdev)
568{
569 struct device *dev = &pdev->dev;
570 struct decon_context *ctx;
571 struct resource *res;
572 int ret;
573 int i;
574
575 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
576 if (!ctx)
577 return -ENOMEM;
578
579 ctx->default_win = 0;
580 ctx->suspended = true;
581 ctx->dev = dev;
582 if (of_get_child_by_name(dev->of_node, "i80-if-timings"))
583 ctx->i80_if = true;
584
585 for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
586 struct clk *clk;
587
588 clk = devm_clk_get(ctx->dev, decon_clks_name[i]);
589 if (IS_ERR(clk))
590 return PTR_ERR(clk);
591
592 ctx->clks[i] = clk;
593 }
594
595 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
596 if (!res) {
597 dev_err(dev, "cannot find IO resource\n");
598 return -ENXIO;
599 }
600
601 ctx->addr = devm_ioremap_resource(dev, res);
602 if (IS_ERR(ctx->addr)) {
603 dev_err(dev, "ioremap failed\n");
604 return PTR_ERR(ctx->addr);
605 }
606
607 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
608 ctx->i80_if ? "lcd_sys" : "vsync");
609 if (!res) {
610 dev_err(dev, "cannot find IRQ resource\n");
611 return -ENXIO;
612 }
613
614 ret = devm_request_irq(dev, res->start, ctx->i80_if ?
615 decon_lcd_sys_irq_handler : decon_vsync_irq_handler, 0,
616 "drm_decon", ctx);
617 if (ret < 0) {
618 dev_err(dev, "lcd_sys irq request failed\n");
619 return ret;
620 }
621
622 platform_set_drvdata(pdev, ctx);
623
624 pm_runtime_enable(dev);
625
626 ret = component_add(dev, &decon_component_ops);
627 if (ret)
628 goto err_disable_pm_runtime;
629
630 return 0;
631
632err_disable_pm_runtime:
633 pm_runtime_disable(dev);
634
635 return ret;
636}
637
638static int exynos5433_decon_remove(struct platform_device *pdev)
639{
640 pm_runtime_disable(&pdev->dev);
641
642 component_del(&pdev->dev, &decon_component_ops);
643
644 return 0;
645}
646
647static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
648 { .compatible = "samsung,exynos5433-decon" },
649 {},
650};
651MODULE_DEVICE_TABLE(of, exynos5433_decon_driver_dt_match);
652
653struct platform_driver exynos5433_decon_driver = {
654 .probe = exynos5433_decon_probe,
655 .remove = exynos5433_decon_remove,
656 .driver = {
657 .name = "exynos5433-decon",
658 .of_match_table = exynos5433_decon_driver_dt_match,
659 },
660};
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 6714e5b193ea..362532afd1a5 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -89,8 +89,9 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
89 DRM_DEBUG_KMS("vblank wait timed out.\n"); 89 DRM_DEBUG_KMS("vblank wait timed out.\n");
90} 90}
91 91
92static void decon_clear_channel(struct decon_context *ctx) 92static void decon_clear_channels(struct exynos_drm_crtc *crtc)
93{ 93{
94 struct decon_context *ctx = crtc->ctx;
94 unsigned int win, ch_enabled = 0; 95 unsigned int win, ch_enabled = 0;
95 96
96 DRM_DEBUG_KMS("%s\n", __FILE__); 97 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -120,27 +121,16 @@ static int decon_ctx_initialize(struct decon_context *ctx,
120 struct drm_device *drm_dev) 121 struct drm_device *drm_dev)
121{ 122{
122 struct exynos_drm_private *priv = drm_dev->dev_private; 123 struct exynos_drm_private *priv = drm_dev->dev_private;
124 int ret;
123 125
124 ctx->drm_dev = drm_dev; 126 ctx->drm_dev = drm_dev;
125 ctx->pipe = priv->pipe++; 127 ctx->pipe = priv->pipe++;
126 128
127 /* attach this sub driver to iommu mapping if supported. */ 129 ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev);
128 if (is_drm_iommu_supported(ctx->drm_dev)) { 130 if (ret)
129 int ret; 131 priv->pipe--;
130
131 /*
132 * If any channel is already active, iommu will throw
133 * a PAGE FAULT when enabled. So clear any channel if enabled.
134 */
135 decon_clear_channel(ctx);
136 ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
137 if (ret) {
138 DRM_ERROR("drm_iommu_attach failed.\n");
139 return ret;
140 }
141 }
142 132
143 return 0; 133 return ret;
144} 134}
145 135
146static void decon_ctx_remove(struct decon_context *ctx) 136static void decon_ctx_remove(struct decon_context *ctx)
@@ -175,7 +165,7 @@ static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
175static void decon_commit(struct exynos_drm_crtc *crtc) 165static void decon_commit(struct exynos_drm_crtc *crtc)
176{ 166{
177 struct decon_context *ctx = crtc->ctx; 167 struct decon_context *ctx = crtc->ctx;
178 struct drm_display_mode *mode = &crtc->base.mode; 168 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
179 u32 val, clkdiv; 169 u32 val, clkdiv;
180 170
181 if (ctx->suspended) 171 if (ctx->suspended)
@@ -395,7 +385,7 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
395static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win) 385static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
396{ 386{
397 struct decon_context *ctx = crtc->ctx; 387 struct decon_context *ctx = crtc->ctx;
398 struct drm_display_mode *mode = &crtc->base.mode; 388 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
399 struct exynos_drm_plane *plane; 389 struct exynos_drm_plane *plane;
400 int padding; 390 int padding;
401 unsigned long val, alpha; 391 unsigned long val, alpha;
@@ -410,11 +400,8 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
410 400
411 plane = &ctx->planes[win]; 401 plane = &ctx->planes[win];
412 402
413 /* If suspended, enable this on resume */ 403 if (ctx->suspended)
414 if (ctx->suspended) {
415 plane->resume = true;
416 return; 404 return;
417 }
418 405
419 /* 406 /*
420 * SHADOWCON/PRTCON register is used for enabling timing. 407 * SHADOWCON/PRTCON register is used for enabling timing.
@@ -506,8 +493,6 @@ static void decon_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
506 val = readl(ctx->regs + DECON_UPDATE); 493 val = readl(ctx->regs + DECON_UPDATE);
507 val |= DECON_UPDATE_STANDALONE_F; 494 val |= DECON_UPDATE_STANDALONE_F;
508 writel(val, ctx->regs + DECON_UPDATE); 495 writel(val, ctx->regs + DECON_UPDATE);
509
510 plane->enabled = true;
511} 496}
512 497
513static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) 498static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
@@ -521,11 +506,8 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
521 506
522 plane = &ctx->planes[win]; 507 plane = &ctx->planes[win];
523 508
524 if (ctx->suspended) { 509 if (ctx->suspended)
525 /* do not resume this window*/
526 plane->resume = false;
527 return; 510 return;
528 }
529 511
530 /* protect windows */ 512 /* protect windows */
531 decon_shadow_protect_win(ctx, win, true); 513 decon_shadow_protect_win(ctx, win, true);
@@ -541,49 +523,6 @@ static void decon_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
541 val = readl(ctx->regs + DECON_UPDATE); 523 val = readl(ctx->regs + DECON_UPDATE);
542 val |= DECON_UPDATE_STANDALONE_F; 524 val |= DECON_UPDATE_STANDALONE_F;
543 writel(val, ctx->regs + DECON_UPDATE); 525 writel(val, ctx->regs + DECON_UPDATE);
544
545 plane->enabled = false;
546}
547
548static void decon_window_suspend(struct decon_context *ctx)
549{
550 struct exynos_drm_plane *plane;
551 int i;
552
553 for (i = 0; i < WINDOWS_NR; i++) {
554 plane = &ctx->planes[i];
555 plane->resume = plane->enabled;
556 if (plane->enabled)
557 decon_win_disable(ctx->crtc, i);
558 }
559}
560
561static void decon_window_resume(struct decon_context *ctx)
562{
563 struct exynos_drm_plane *plane;
564 int i;
565
566 for (i = 0; i < WINDOWS_NR; i++) {
567 plane = &ctx->planes[i];
568 plane->enabled = plane->resume;
569 plane->resume = false;
570 }
571}
572
573static void decon_apply(struct decon_context *ctx)
574{
575 struct exynos_drm_plane *plane;
576 int i;
577
578 for (i = 0; i < WINDOWS_NR; i++) {
579 plane = &ctx->planes[i];
580 if (plane->enabled)
581 decon_win_commit(ctx->crtc, i);
582 else
583 decon_win_disable(ctx->crtc, i);
584 }
585
586 decon_commit(ctx->crtc);
587} 526}
588 527
589static void decon_init(struct decon_context *ctx) 528static void decon_init(struct decon_context *ctx)
@@ -603,12 +542,13 @@ static void decon_init(struct decon_context *ctx)
603 writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0)); 542 writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
604} 543}
605 544
606static int decon_poweron(struct decon_context *ctx) 545static void decon_enable(struct exynos_drm_crtc *crtc)
607{ 546{
547 struct decon_context *ctx = crtc->ctx;
608 int ret; 548 int ret;
609 549
610 if (!ctx->suspended) 550 if (!ctx->suspended)
611 return 0; 551 return;
612 552
613 ctx->suspended = false; 553 ctx->suspended = false;
614 554
@@ -617,68 +557,51 @@ static int decon_poweron(struct decon_context *ctx)
617 ret = clk_prepare_enable(ctx->pclk); 557 ret = clk_prepare_enable(ctx->pclk);
618 if (ret < 0) { 558 if (ret < 0) {
619 DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret); 559 DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
620 goto pclk_err; 560 return;
621 } 561 }
622 562
623 ret = clk_prepare_enable(ctx->aclk); 563 ret = clk_prepare_enable(ctx->aclk);
624 if (ret < 0) { 564 if (ret < 0) {
625 DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret); 565 DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
626 goto aclk_err; 566 return;
627 } 567 }
628 568
629 ret = clk_prepare_enable(ctx->eclk); 569 ret = clk_prepare_enable(ctx->eclk);
630 if (ret < 0) { 570 if (ret < 0) {
631 DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret); 571 DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
632 goto eclk_err; 572 return;
633 } 573 }
634 574
635 ret = clk_prepare_enable(ctx->vclk); 575 ret = clk_prepare_enable(ctx->vclk);
636 if (ret < 0) { 576 if (ret < 0) {
637 DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret); 577 DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
638 goto vclk_err; 578 return;
639 } 579 }
640 580
641 decon_init(ctx); 581 decon_init(ctx);
642 582
643 /* if vblank was enabled status, enable it again. */ 583 /* if vblank was enabled status, enable it again. */
644 if (test_and_clear_bit(0, &ctx->irq_flags)) { 584 if (test_and_clear_bit(0, &ctx->irq_flags))
645 ret = decon_enable_vblank(ctx->crtc); 585 decon_enable_vblank(ctx->crtc);
646 if (ret) {
647 DRM_ERROR("Failed to re-enable vblank [%d]\n", ret);
648 goto err;
649 }
650 }
651
652 decon_window_resume(ctx);
653
654 decon_apply(ctx);
655 586
656 return 0; 587 decon_commit(ctx->crtc);
657
658err:
659 clk_disable_unprepare(ctx->vclk);
660vclk_err:
661 clk_disable_unprepare(ctx->eclk);
662eclk_err:
663 clk_disable_unprepare(ctx->aclk);
664aclk_err:
665 clk_disable_unprepare(ctx->pclk);
666pclk_err:
667 ctx->suspended = true;
668 return ret;
669} 588}
670 589
671static int decon_poweroff(struct decon_context *ctx) 590static void decon_disable(struct exynos_drm_crtc *crtc)
672{ 591{
592 struct decon_context *ctx = crtc->ctx;
593 int i;
594
673 if (ctx->suspended) 595 if (ctx->suspended)
674 return 0; 596 return;
675 597
676 /* 598 /*
677 * We need to make sure that all windows are disabled before we 599 * We need to make sure that all windows are disabled before we
678 * suspend that connector. Otherwise we might try to scan from 600 * suspend that connector. Otherwise we might try to scan from
679 * a destroyed buffer later. 601 * a destroyed buffer later.
680 */ 602 */
681 decon_window_suspend(ctx); 603 for (i = 0; i < WINDOWS_NR; i++)
604 decon_win_disable(crtc, i);
682 605
683 clk_disable_unprepare(ctx->vclk); 606 clk_disable_unprepare(ctx->vclk);
684 clk_disable_unprepare(ctx->eclk); 607 clk_disable_unprepare(ctx->eclk);
@@ -688,30 +611,11 @@ static int decon_poweroff(struct decon_context *ctx)
688 pm_runtime_put_sync(ctx->dev); 611 pm_runtime_put_sync(ctx->dev);
689 612
690 ctx->suspended = true; 613 ctx->suspended = true;
691 return 0;
692}
693
694static void decon_dpms(struct exynos_drm_crtc *crtc, int mode)
695{
696 DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
697
698 switch (mode) {
699 case DRM_MODE_DPMS_ON:
700 decon_poweron(crtc->ctx);
701 break;
702 case DRM_MODE_DPMS_STANDBY:
703 case DRM_MODE_DPMS_SUSPEND:
704 case DRM_MODE_DPMS_OFF:
705 decon_poweroff(crtc->ctx);
706 break;
707 default:
708 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
709 break;
710 }
711} 614}
712 615
713static const struct exynos_drm_crtc_ops decon_crtc_ops = { 616static const struct exynos_drm_crtc_ops decon_crtc_ops = {
714 .dpms = decon_dpms, 617 .enable = decon_enable,
618 .disable = decon_disable,
715 .mode_fixup = decon_mode_fixup, 619 .mode_fixup = decon_mode_fixup,
716 .commit = decon_commit, 620 .commit = decon_commit,
717 .enable_vblank = decon_enable_vblank, 621 .enable_vblank = decon_enable_vblank,
@@ -719,6 +623,7 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
719 .wait_for_vblank = decon_wait_for_vblank, 623 .wait_for_vblank = decon_wait_for_vblank,
720 .win_commit = decon_win_commit, 624 .win_commit = decon_win_commit,
721 .win_disable = decon_win_disable, 625 .win_disable = decon_win_disable,
626 .clear_channels = decon_clear_channels,
722}; 627};
723 628
724 629
@@ -796,7 +701,7 @@ static void decon_unbind(struct device *dev, struct device *master,
796{ 701{
797 struct decon_context *ctx = dev_get_drvdata(dev); 702 struct decon_context *ctx = dev_get_drvdata(dev);
798 703
799 decon_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); 704 decon_disable(ctx->crtc);
800 705
801 if (ctx->display) 706 if (ctx->display)
802 exynos_dpi_remove(ctx->display); 707 exynos_dpi_remove(ctx->display);
@@ -824,11 +729,6 @@ static int decon_probe(struct platform_device *pdev)
824 if (!ctx) 729 if (!ctx)
825 return -ENOMEM; 730 return -ENOMEM;
826 731
827 ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC,
828 EXYNOS_DISPLAY_TYPE_LCD);
829 if (ret)
830 return ret;
831
832 ctx->dev = dev; 732 ctx->dev = dev;
833 ctx->suspended = true; 733 ctx->suspended = true;
834 734
@@ -838,10 +738,8 @@ static int decon_probe(struct platform_device *pdev)
838 of_node_put(i80_if_timings); 738 of_node_put(i80_if_timings);
839 739
840 ctx->regs = of_iomap(dev->of_node, 0); 740 ctx->regs = of_iomap(dev->of_node, 0);
841 if (!ctx->regs) { 741 if (!ctx->regs)
842 ret = -ENOMEM; 742 return -ENOMEM;
843 goto err_del_component;
844 }
845 743
846 ctx->pclk = devm_clk_get(dev, "pclk_decon0"); 744 ctx->pclk = devm_clk_get(dev, "pclk_decon0");
847 if (IS_ERR(ctx->pclk)) { 745 if (IS_ERR(ctx->pclk)) {
@@ -911,8 +809,6 @@ err_disable_pm_runtime:
911err_iounmap: 809err_iounmap:
912 iounmap(ctx->regs); 810 iounmap(ctx->regs);
913 811
914err_del_component:
915 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CRTC);
916 return ret; 812 return ret;
917} 813}
918 814
@@ -925,7 +821,6 @@ static int decon_remove(struct platform_device *pdev)
925 iounmap(ctx->regs); 821 iounmap(ctx->regs);
926 822
927 component_del(&pdev->dev, &decon_component_ops); 823 component_del(&pdev->dev, &decon_component_ops);
928 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
929 824
930 return 0; 825 return 0;
931} 826}
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 30feb7d06624..172b8002a2c8 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -28,8 +28,8 @@
28#include <drm/drmP.h> 28#include <drm/drmP.h>
29#include <drm/drm_crtc.h> 29#include <drm/drm_crtc.h>
30#include <drm/drm_crtc_helper.h> 30#include <drm/drm_crtc_helper.h>
31#include <drm/drm_atomic_helper.h>
31#include <drm/drm_panel.h> 32#include <drm/drm_panel.h>
32#include <drm/bridge/ptn3460.h>
33 33
34#include "exynos_dp_core.h" 34#include "exynos_dp_core.h"
35 35
@@ -953,10 +953,13 @@ static void exynos_dp_connector_destroy(struct drm_connector *connector)
953} 953}
954 954
955static struct drm_connector_funcs exynos_dp_connector_funcs = { 955static struct drm_connector_funcs exynos_dp_connector_funcs = {
956 .dpms = drm_helper_connector_dpms, 956 .dpms = drm_atomic_helper_connector_dpms,
957 .fill_modes = drm_helper_probe_single_connector_modes, 957 .fill_modes = drm_helper_probe_single_connector_modes,
958 .detect = exynos_dp_detect, 958 .detect = exynos_dp_detect,
959 .destroy = exynos_dp_connector_destroy, 959 .destroy = exynos_dp_connector_destroy,
960 .reset = drm_atomic_helper_connector_reset,
961 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
962 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
960}; 963};
961 964
962static int exynos_dp_get_modes(struct drm_connector *connector) 965static int exynos_dp_get_modes(struct drm_connector *connector)
@@ -1329,7 +1332,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
1329 struct device *dev = &pdev->dev; 1332 struct device *dev = &pdev->dev;
1330 struct device_node *panel_node, *bridge_node, *endpoint; 1333 struct device_node *panel_node, *bridge_node, *endpoint;
1331 struct exynos_dp_device *dp; 1334 struct exynos_dp_device *dp;
1332 int ret;
1333 1335
1334 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), 1336 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
1335 GFP_KERNEL); 1337 GFP_KERNEL);
@@ -1340,11 +1342,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
1340 dp->display.ops = &exynos_dp_display_ops; 1342 dp->display.ops = &exynos_dp_display_ops;
1341 platform_set_drvdata(pdev, dp); 1343 platform_set_drvdata(pdev, dp);
1342 1344
1343 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
1344 dp->display.type);
1345 if (ret)
1346 return ret;
1347
1348 panel_node = of_parse_phandle(dev->of_node, "panel", 0); 1345 panel_node = of_parse_phandle(dev->of_node, "panel", 0);
1349 if (panel_node) { 1346 if (panel_node) {
1350 dp->panel = of_drm_find_panel(panel_node); 1347 dp->panel = of_drm_find_panel(panel_node);
@@ -1365,18 +1362,12 @@ static int exynos_dp_probe(struct platform_device *pdev)
1365 return -EPROBE_DEFER; 1362 return -EPROBE_DEFER;
1366 } 1363 }
1367 1364
1368 ret = component_add(&pdev->dev, &exynos_dp_ops); 1365 return component_add(&pdev->dev, &exynos_dp_ops);
1369 if (ret)
1370 exynos_drm_component_del(&pdev->dev,
1371 EXYNOS_DEVICE_TYPE_CONNECTOR);
1372
1373 return ret;
1374} 1366}
1375 1367
1376static int exynos_dp_remove(struct platform_device *pdev) 1368static int exynos_dp_remove(struct platform_device *pdev)
1377{ 1369{
1378 component_del(&pdev->dev, &exynos_dp_ops); 1370 component_del(&pdev->dev, &exynos_dp_ops);
1379 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
1380 1371
1381 return 0; 1372 return 0;
1382} 1373}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9006b947e03c..644b4b76e071 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -14,57 +14,47 @@
14 14
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_atomic.h>
18#include <drm/drm_atomic_helper.h>
17 19
18#include "exynos_drm_crtc.h" 20#include "exynos_drm_crtc.h"
19#include "exynos_drm_drv.h" 21#include "exynos_drm_drv.h"
20#include "exynos_drm_encoder.h" 22#include "exynos_drm_encoder.h"
21#include "exynos_drm_plane.h" 23#include "exynos_drm_plane.h"
22 24
23static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 25static void exynos_drm_crtc_enable(struct drm_crtc *crtc)
24{ 26{
25 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 27 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
26 28
27 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode); 29 if (exynos_crtc->enabled)
28
29 if (exynos_crtc->dpms == mode) {
30 DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
31 return; 30 return;
32 }
33
34 if (mode > DRM_MODE_DPMS_ON) {
35 /* wait for the completion of page flip. */
36 if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
37 (exynos_crtc->event == NULL), HZ/20))
38 exynos_crtc->event = NULL;
39 drm_crtc_vblank_off(crtc);
40 }
41
42 if (exynos_crtc->ops->dpms)
43 exynos_crtc->ops->dpms(exynos_crtc, mode);
44 31
45 exynos_crtc->dpms = mode; 32 if (exynos_crtc->ops->enable)
33 exynos_crtc->ops->enable(exynos_crtc);
46 34
47 if (mode == DRM_MODE_DPMS_ON) 35 exynos_crtc->enabled = true;
48 drm_crtc_vblank_on(crtc);
49}
50 36
51static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) 37 drm_crtc_vblank_on(crtc);
52{
53 /* drm framework doesn't check NULL. */
54} 38}
55 39
56static void exynos_drm_crtc_commit(struct drm_crtc *crtc) 40static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
57{ 41{
58 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 42 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
59 struct exynos_drm_plane *exynos_plane = to_exynos_plane(crtc->primary);
60 43
61 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 44 if (!exynos_crtc->enabled)
45 return;
62 46
63 if (exynos_crtc->ops->win_commit) 47 /* wait for the completion of page flip. */
64 exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos); 48 if (!wait_event_timeout(exynos_crtc->pending_flip_queue,
49 (exynos_crtc->event == NULL), HZ/20))
50 exynos_crtc->event = NULL;
65 51
66 if (exynos_crtc->ops->commit) 52 drm_crtc_vblank_off(crtc);
67 exynos_crtc->ops->commit(exynos_crtc); 53
54 if (exynos_crtc->ops->disable)
55 exynos_crtc->ops->disable(exynos_crtc);
56
57 exynos_crtc->enabled = false;
68} 58}
69 59
70static bool 60static bool
@@ -81,145 +71,38 @@ exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
81 return true; 71 return true;
82} 72}
83 73
84static int 74static void
85exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, 75exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
86 struct drm_display_mode *adjusted_mode, int x, int y,
87 struct drm_framebuffer *old_fb)
88{ 76{
89 struct drm_framebuffer *fb = crtc->primary->fb; 77 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
90 unsigned int crtc_w;
91 unsigned int crtc_h;
92 int ret;
93
94 /*
95 * copy the mode data adjusted by mode_fixup() into crtc->mode
96 * so that hardware can be seet to proper mode.
97 */
98 memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
99
100 ret = exynos_check_plane(crtc->primary, fb);
101 if (ret < 0)
102 return ret;
103
104 crtc_w = fb->width - x;
105 crtc_h = fb->height - y;
106 exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
107 crtc_w, crtc_h, x, y, crtc_w, crtc_h);
108 78
109 return 0; 79 if (exynos_crtc->ops->commit)
80 exynos_crtc->ops->commit(exynos_crtc);
110} 81}
111 82
112static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 83static void exynos_crtc_atomic_begin(struct drm_crtc *crtc)
113 struct drm_framebuffer *old_fb)
114{ 84{
115 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 85 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
116 struct drm_framebuffer *fb = crtc->primary->fb;
117 unsigned int crtc_w;
118 unsigned int crtc_h;
119 86
120 /* when framebuffer changing is requested, crtc's dpms should be on */ 87 if (crtc->state->event) {
121 if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) { 88 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
122 DRM_ERROR("failed framebuffer changing request.\n"); 89 exynos_crtc->event = crtc->state->event;
123 return -EPERM;
124 } 90 }
125
126 crtc_w = fb->width - x;
127 crtc_h = fb->height - y;
128
129 return exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
130 crtc_w, crtc_h, x, y, crtc_w, crtc_h);
131} 91}
132 92
133static void exynos_drm_crtc_disable(struct drm_crtc *crtc) 93static void exynos_crtc_atomic_flush(struct drm_crtc *crtc)
134{ 94{
135 struct drm_plane *plane;
136 int ret;
137
138 exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
139
140 drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) {
141 if (plane->crtc != crtc)
142 continue;
143
144 ret = plane->funcs->disable_plane(plane);
145 if (ret)
146 DRM_ERROR("Failed to disable plane %d\n", ret);
147 }
148} 95}
149 96
150static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { 97static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
151 .dpms = exynos_drm_crtc_dpms, 98 .enable = exynos_drm_crtc_enable,
152 .prepare = exynos_drm_crtc_prepare,
153 .commit = exynos_drm_crtc_commit,
154 .mode_fixup = exynos_drm_crtc_mode_fixup,
155 .mode_set = exynos_drm_crtc_mode_set,
156 .mode_set_base = exynos_drm_crtc_mode_set_base,
157 .disable = exynos_drm_crtc_disable, 99 .disable = exynos_drm_crtc_disable,
100 .mode_fixup = exynos_drm_crtc_mode_fixup,
101 .mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
102 .atomic_begin = exynos_crtc_atomic_begin,
103 .atomic_flush = exynos_crtc_atomic_flush,
158}; 104};
159 105
160static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
161 struct drm_framebuffer *fb,
162 struct drm_pending_vblank_event *event,
163 uint32_t page_flip_flags)
164{
165 struct drm_device *dev = crtc->dev;
166 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
167 struct drm_framebuffer *old_fb = crtc->primary->fb;
168 unsigned int crtc_w, crtc_h;
169 int ret;
170
171 /* when the page flip is requested, crtc's dpms should be on */
172 if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
173 DRM_ERROR("failed page flip request.\n");
174 return -EINVAL;
175 }
176
177 if (!event)
178 return -EINVAL;
179
180 spin_lock_irq(&dev->event_lock);
181 if (exynos_crtc->event) {
182 ret = -EBUSY;
183 goto out;
184 }
185
186 ret = drm_vblank_get(dev, exynos_crtc->pipe);
187 if (ret) {
188 DRM_DEBUG("failed to acquire vblank counter\n");
189 goto out;
190 }
191
192 exynos_crtc->event = event;
193 spin_unlock_irq(&dev->event_lock);
194
195 /*
196 * the pipe from user always is 0 so we can set pipe number
197 * of current owner to event.
198 */
199 event->pipe = exynos_crtc->pipe;
200
201 crtc->primary->fb = fb;
202 crtc_w = fb->width - crtc->x;
203 crtc_h = fb->height - crtc->y;
204 ret = exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
205 crtc_w, crtc_h, crtc->x, crtc->y,
206 crtc_w, crtc_h);
207 if (ret) {
208 crtc->primary->fb = old_fb;
209 spin_lock_irq(&dev->event_lock);
210 exynos_crtc->event = NULL;
211 drm_vblank_put(dev, exynos_crtc->pipe);
212 spin_unlock_irq(&dev->event_lock);
213 return ret;
214 }
215
216 return 0;
217
218out:
219 spin_unlock_irq(&dev->event_lock);
220 return ret;
221}
222
223static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) 106static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
224{ 107{
225 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 108 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -232,9 +115,12 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
232} 115}
233 116
234static struct drm_crtc_funcs exynos_crtc_funcs = { 117static struct drm_crtc_funcs exynos_crtc_funcs = {
235 .set_config = drm_crtc_helper_set_config, 118 .set_config = drm_atomic_helper_set_config,
236 .page_flip = exynos_drm_crtc_page_flip, 119 .page_flip = drm_atomic_helper_page_flip,
237 .destroy = exynos_drm_crtc_destroy, 120 .destroy = exynos_drm_crtc_destroy,
121 .reset = drm_atomic_helper_crtc_reset,
122 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
123 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
238}; 124};
239 125
240struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev, 126struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
@@ -255,7 +141,6 @@ struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
255 141
256 init_waitqueue_head(&exynos_crtc->pending_flip_queue); 142 init_waitqueue_head(&exynos_crtc->pending_flip_queue);
257 143
258 exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
259 exynos_crtc->pipe = pipe; 144 exynos_crtc->pipe = pipe;
260 exynos_crtc->type = type; 145 exynos_crtc->type = type;
261 exynos_crtc->ops = ops; 146 exynos_crtc->ops = ops;
@@ -286,7 +171,7 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
286 struct exynos_drm_crtc *exynos_crtc = 171 struct exynos_drm_crtc *exynos_crtc =
287 to_exynos_crtc(private->crtc[pipe]); 172 to_exynos_crtc(private->crtc[pipe]);
288 173
289 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) 174 if (!exynos_crtc->enabled)
290 return -EPERM; 175 return -EPERM;
291 176
292 if (exynos_crtc->ops->enable_vblank) 177 if (exynos_crtc->ops->enable_vblank)
@@ -301,7 +186,7 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
301 struct exynos_drm_crtc *exynos_crtc = 186 struct exynos_drm_crtc *exynos_crtc =
302 to_exynos_crtc(private->crtc[pipe]); 187 to_exynos_crtc(private->crtc[pipe]);
303 188
304 if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) 189 if (!exynos_crtc->enabled)
305 return; 190 return;
306 191
307 if (exynos_crtc->ops->disable_vblank) 192 if (exynos_crtc->ops->disable_vblank)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 37678cf4425a..7cb6595c1894 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -13,6 +13,7 @@
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h> 14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_panel.h> 15#include <drm/drm_panel.h>
16#include <drm/drm_atomic_helper.h>
16 17
17#include <linux/regulator/consumer.h> 18#include <linux/regulator/consumer.h>
18 19
@@ -59,10 +60,13 @@ static void exynos_dpi_connector_destroy(struct drm_connector *connector)
59} 60}
60 61
61static struct drm_connector_funcs exynos_dpi_connector_funcs = { 62static struct drm_connector_funcs exynos_dpi_connector_funcs = {
62 .dpms = drm_helper_connector_dpms, 63 .dpms = drm_atomic_helper_connector_dpms,
63 .detect = exynos_dpi_detect, 64 .detect = exynos_dpi_detect,
64 .fill_modes = drm_helper_probe_single_connector_modes, 65 .fill_modes = drm_helper_probe_single_connector_modes,
65 .destroy = exynos_dpi_connector_destroy, 66 .destroy = exynos_dpi_connector_destroy,
67 .reset = drm_atomic_helper_connector_reset,
68 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
69 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
66}; 70};
67 71
68static int exynos_dpi_get_modes(struct drm_connector *connector) 72static int exynos_dpi_get_modes(struct drm_connector *connector)
@@ -309,33 +313,19 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
309 ctx->dev = dev; 313 ctx->dev = dev;
310 ctx->dpms_mode = DRM_MODE_DPMS_OFF; 314 ctx->dpms_mode = DRM_MODE_DPMS_OFF;
311 315
312 ret = exynos_drm_component_add(dev,
313 EXYNOS_DEVICE_TYPE_CONNECTOR,
314 ctx->display.type);
315 if (ret)
316 return ERR_PTR(ret);
317
318 ret = exynos_dpi_parse_dt(ctx); 316 ret = exynos_dpi_parse_dt(ctx);
319 if (ret < 0) { 317 if (ret < 0) {
320 devm_kfree(dev, ctx); 318 devm_kfree(dev, ctx);
321 goto err_del_component; 319 return NULL;
322 } 320 }
323 321
324 if (ctx->panel_node) { 322 if (ctx->panel_node) {
325 ctx->panel = of_drm_find_panel(ctx->panel_node); 323 ctx->panel = of_drm_find_panel(ctx->panel_node);
326 if (!ctx->panel) { 324 if (!ctx->panel)
327 exynos_drm_component_del(dev,
328 EXYNOS_DEVICE_TYPE_CONNECTOR);
329 return ERR_PTR(-EPROBE_DEFER); 325 return ERR_PTR(-EPROBE_DEFER);
330 }
331 } 326 }
332 327
333 return &ctx->display; 328 return &ctx->display;
334
335err_del_component:
336 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
337
338 return NULL;
339} 329}
340 330
341int exynos_dpi_remove(struct exynos_drm_display *display) 331int exynos_dpi_remove(struct exynos_drm_display *display)
@@ -347,7 +337,5 @@ int exynos_dpi_remove(struct exynos_drm_display *display)
347 if (ctx->panel) 337 if (ctx->panel)
348 drm_panel_detach(ctx->panel); 338 drm_panel_detach(ctx->panel);
349 339
350 exynos_drm_component_del(ctx->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
351
352 return 0; 340 return 0;
353} 341}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 8ac465208eae..63a68c60a353 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -38,19 +38,6 @@
38#define DRIVER_MAJOR 1 38#define DRIVER_MAJOR 1
39#define DRIVER_MINOR 0 39#define DRIVER_MINOR 0
40 40
41static struct platform_device *exynos_drm_pdev;
42
43static DEFINE_MUTEX(drm_component_lock);
44static LIST_HEAD(drm_component_list);
45
46struct component_dev {
47 struct list_head list;
48 struct device *crtc_dev;
49 struct device *conn_dev;
50 enum exynos_drm_output_type out_type;
51 unsigned int dev_type_flag;
52};
53
54static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 41static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
55{ 42{
56 struct exynos_drm_private *private; 43 struct exynos_drm_private *private;
@@ -98,6 +85,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
98 if (ret) 85 if (ret)
99 goto err_cleanup_vblank; 86 goto err_cleanup_vblank;
100 87
88 drm_mode_config_reset(dev);
89
101 /* 90 /*
102 * enable drm irq mode. 91 * enable drm irq mode.
103 * - with irq_enabled = true, we can use the vblank feature. 92 * - with irq_enabled = true, we can use the vblank feature.
@@ -348,190 +337,29 @@ static const struct dev_pm_ops exynos_drm_pm_ops = {
348 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume) 337 SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_sys_suspend, exynos_drm_sys_resume)
349}; 338};
350 339
351int exynos_drm_component_add(struct device *dev, 340/* forward declaration */
352 enum exynos_drm_device_type dev_type, 341static struct platform_driver exynos_drm_platform_driver;
353 enum exynos_drm_output_type out_type)
354{
355 struct component_dev *cdev;
356
357 if (dev_type != EXYNOS_DEVICE_TYPE_CRTC &&
358 dev_type != EXYNOS_DEVICE_TYPE_CONNECTOR) {
359 DRM_ERROR("invalid device type.\n");
360 return -EINVAL;
361 }
362
363 mutex_lock(&drm_component_lock);
364
365 /*
366 * Make sure to check if there is a component which has two device
367 * objects, for connector and for encoder/connector.
368 * It should make sure that crtc and encoder/connector drivers are
369 * ready before exynos drm core binds them.
370 */
371 list_for_each_entry(cdev, &drm_component_list, list) {
372 if (cdev->out_type == out_type) {
373 /*
374 * If crtc and encoder/connector device objects are
375 * added already just return.
376 */
377 if (cdev->dev_type_flag == (EXYNOS_DEVICE_TYPE_CRTC |
378 EXYNOS_DEVICE_TYPE_CONNECTOR)) {
379 mutex_unlock(&drm_component_lock);
380 return 0;
381 }
382
383 if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
384 cdev->crtc_dev = dev;
385 cdev->dev_type_flag |= dev_type;
386 }
387
388 if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
389 cdev->conn_dev = dev;
390 cdev->dev_type_flag |= dev_type;
391 }
392
393 mutex_unlock(&drm_component_lock);
394 return 0;
395 }
396 }
397
398 mutex_unlock(&drm_component_lock);
399
400 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
401 if (!cdev)
402 return -ENOMEM;
403
404 if (dev_type == EXYNOS_DEVICE_TYPE_CRTC)
405 cdev->crtc_dev = dev;
406 if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR)
407 cdev->conn_dev = dev;
408
409 cdev->out_type = out_type;
410 cdev->dev_type_flag = dev_type;
411
412 mutex_lock(&drm_component_lock);
413 list_add_tail(&cdev->list, &drm_component_list);
414 mutex_unlock(&drm_component_lock);
415
416 return 0;
417}
418
419void exynos_drm_component_del(struct device *dev,
420 enum exynos_drm_device_type dev_type)
421{
422 struct component_dev *cdev, *next;
423
424 mutex_lock(&drm_component_lock);
425
426 list_for_each_entry_safe(cdev, next, &drm_component_list, list) {
427 if (dev_type == EXYNOS_DEVICE_TYPE_CRTC) {
428 if (cdev->crtc_dev == dev) {
429 cdev->crtc_dev = NULL;
430 cdev->dev_type_flag &= ~dev_type;
431 }
432 }
433
434 if (dev_type == EXYNOS_DEVICE_TYPE_CONNECTOR) {
435 if (cdev->conn_dev == dev) {
436 cdev->conn_dev = NULL;
437 cdev->dev_type_flag &= ~dev_type;
438 }
439 }
440
441 /*
442 * Release cdev object only in case that both of crtc and
443 * encoder/connector device objects are NULL.
444 */
445 if (!cdev->crtc_dev && !cdev->conn_dev) {
446 list_del(&cdev->list);
447 kfree(cdev);
448 }
449 }
450
451 mutex_unlock(&drm_component_lock);
452}
453
454static int compare_dev(struct device *dev, void *data)
455{
456 return dev == (struct device *)data;
457}
458
459static struct component_match *exynos_drm_match_add(struct device *dev)
460{
461 struct component_match *match = NULL;
462 struct component_dev *cdev;
463 unsigned int attach_cnt = 0;
464
465 mutex_lock(&drm_component_lock);
466
467 /* Do not retry to probe if there is no any kms driver regitered. */
468 if (list_empty(&drm_component_list)) {
469 mutex_unlock(&drm_component_lock);
470 return ERR_PTR(-ENODEV);
471 }
472
473 list_for_each_entry(cdev, &drm_component_list, list) {
474 /*
475 * Add components to master only in case that crtc and
476 * encoder/connector device objects exist.
477 */
478 if (!cdev->crtc_dev || !cdev->conn_dev)
479 continue;
480
481 attach_cnt++;
482
483 mutex_unlock(&drm_component_lock);
484
485 /*
486 * fimd and dpi modules have same device object so add
487 * only crtc device object in this case.
488 */
489 if (cdev->crtc_dev == cdev->conn_dev) {
490 component_match_add(dev, &match, compare_dev,
491 cdev->crtc_dev);
492 goto out_lock;
493 }
494
495 /*
496 * Do not chage below call order.
497 * crtc device first should be added to master because
498 * connector/encoder need pipe number of crtc when they
499 * are created.
500 */
501 component_match_add(dev, &match, compare_dev, cdev->crtc_dev);
502 component_match_add(dev, &match, compare_dev, cdev->conn_dev);
503
504out_lock:
505 mutex_lock(&drm_component_lock);
506 }
507
508 mutex_unlock(&drm_component_lock);
509
510 return attach_cnt ? match : ERR_PTR(-EPROBE_DEFER);
511}
512
513static int exynos_drm_bind(struct device *dev)
514{
515 return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
516}
517
518static void exynos_drm_unbind(struct device *dev)
519{
520 drm_put_dev(dev_get_drvdata(dev));
521}
522
523static const struct component_master_ops exynos_drm_ops = {
524 .bind = exynos_drm_bind,
525 .unbind = exynos_drm_unbind,
526};
527 342
343/*
344 * Connector drivers should not be placed before associated crtc drivers,
345 * because connector requires pipe number of its crtc during initialization.
346 */
528static struct platform_driver *const exynos_drm_kms_drivers[] = { 347static struct platform_driver *const exynos_drm_kms_drivers[] = {
348#ifdef CONFIG_DRM_EXYNOS_VIDI
349 &vidi_driver,
350#endif
529#ifdef CONFIG_DRM_EXYNOS_FIMD 351#ifdef CONFIG_DRM_EXYNOS_FIMD
530 &fimd_driver, 352 &fimd_driver,
531#endif 353#endif
354#ifdef CONFIG_DRM_EXYNOS5433_DECON
355 &exynos5433_decon_driver,
356#endif
532#ifdef CONFIG_DRM_EXYNOS7_DECON 357#ifdef CONFIG_DRM_EXYNOS7_DECON
533 &decon_driver, 358 &decon_driver,
534#endif 359#endif
360#ifdef CONFIG_DRM_EXYNOS_MIC
361 &mic_driver,
362#endif
535#ifdef CONFIG_DRM_EXYNOS_DP 363#ifdef CONFIG_DRM_EXYNOS_DP
536 &dp_driver, 364 &dp_driver,
537#endif 365#endif
@@ -560,6 +388,59 @@ static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
560#ifdef CONFIG_DRM_EXYNOS_IPP 388#ifdef CONFIG_DRM_EXYNOS_IPP
561 &ipp_driver, 389 &ipp_driver,
562#endif 390#endif
391 &exynos_drm_platform_driver,
392};
393
394static struct platform_driver *const exynos_drm_drv_with_simple_dev[] = {
395#ifdef CONFIG_DRM_EXYNOS_VIDI
396 &vidi_driver,
397#endif
398#ifdef CONFIG_DRM_EXYNOS_IPP
399 &ipp_driver,
400#endif
401 &exynos_drm_platform_driver,
402};
403#define PDEV_COUNT ARRAY_SIZE(exynos_drm_drv_with_simple_dev)
404
405static int compare_dev(struct device *dev, void *data)
406{
407 return dev == (struct device *)data;
408}
409
410static struct component_match *exynos_drm_match_add(struct device *dev)
411{
412 struct component_match *match = NULL;
413 int i;
414
415 for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) {
416 struct device_driver *drv = &exynos_drm_kms_drivers[i]->driver;
417 struct device *p = NULL, *d;
418
419 while ((d = bus_find_device(&platform_bus_type, p, drv,
420 (void *)platform_bus_type.match))) {
421 put_device(p);
422 component_match_add(dev, &match, compare_dev, d);
423 p = d;
424 }
425 put_device(p);
426 }
427
428 return match ?: ERR_PTR(-ENODEV);
429}
430
431static int exynos_drm_bind(struct device *dev)
432{
433 return drm_platform_init(&exynos_drm_driver, to_platform_device(dev));
434}
435
436static void exynos_drm_unbind(struct device *dev)
437{
438 drm_put_dev(dev_get_drvdata(dev));
439}
440
441static const struct component_master_ops exynos_drm_ops = {
442 .bind = exynos_drm_bind,
443 .unbind = exynos_drm_unbind,
563}; 444};
564 445
565static int exynos_drm_platform_probe(struct platform_device *pdev) 446static int exynos_drm_platform_probe(struct platform_device *pdev)
@@ -570,9 +451,8 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
570 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); 451 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
571 452
572 match = exynos_drm_match_add(&pdev->dev); 453 match = exynos_drm_match_add(&pdev->dev);
573 if (IS_ERR(match)) { 454 if (IS_ERR(match))
574 return PTR_ERR(match); 455 return PTR_ERR(match);
575 }
576 456
577 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops, 457 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
578 match); 458 match);
@@ -584,13 +464,6 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
584 return 0; 464 return 0;
585} 465}
586 466
587static const char * const strings[] = {
588 "samsung,exynos3",
589 "samsung,exynos4",
590 "samsung,exynos5",
591 "samsung,exynos7",
592};
593
594static struct platform_driver exynos_drm_platform_driver = { 467static struct platform_driver exynos_drm_platform_driver = {
595 .probe = exynos_drm_platform_probe, 468 .probe = exynos_drm_platform_probe,
596 .remove = exynos_drm_platform_remove, 469 .remove = exynos_drm_platform_remove,
@@ -600,101 +473,125 @@ static struct platform_driver exynos_drm_platform_driver = {
600 }, 473 },
601}; 474};
602 475
603static int exynos_drm_init(void) 476static struct platform_device *exynos_drm_pdevs[PDEV_COUNT];
477
478static void exynos_drm_unregister_devices(void)
604{ 479{
605 bool is_exynos = false; 480 int i = PDEV_COUNT;
606 int ret, i, j;
607 481
608 /* 482 while (--i >= 0) {
609 * Register device object only in case of Exynos SoC. 483 platform_device_unregister(exynos_drm_pdevs[i]);
610 * 484 exynos_drm_pdevs[i] = NULL;
611 * Below codes resolves temporarily infinite loop issue incurred
612 * by Exynos drm driver when using multi-platform kernel.
613 * So these codes will be replaced with more generic way later.
614 */
615 for (i = 0; i < ARRAY_SIZE(strings); i++) {
616 if (of_machine_is_compatible(strings[i])) {
617 is_exynos = true;
618 break;
619 }
620 } 485 }
486}
621 487
622 if (!is_exynos) 488static int exynos_drm_register_devices(void)
623 return -ENODEV; 489{
490 int i;
624 491
625 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, 492 for (i = 0; i < PDEV_COUNT; ++i) {
626 NULL, 0); 493 struct platform_driver *d = exynos_drm_drv_with_simple_dev[i];
627 if (IS_ERR(exynos_drm_pdev)) 494 struct platform_device *pdev =
628 return PTR_ERR(exynos_drm_pdev); 495 platform_device_register_simple(d->driver.name, -1,
496 NULL, 0);
629 497
630 ret = exynos_drm_probe_vidi(); 498 if (!IS_ERR(pdev)) {
631 if (ret < 0) 499 exynos_drm_pdevs[i] = pdev;
632 goto err_unregister_pd; 500 continue;
501 }
502 while (--i >= 0) {
503 platform_device_unregister(exynos_drm_pdevs[i]);
504 exynos_drm_pdevs[i] = NULL;
505 }
633 506
634 for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) { 507 return PTR_ERR(pdev);
635 ret = platform_driver_register(exynos_drm_kms_drivers[i]);
636 if (ret < 0)
637 goto err_unregister_kms_drivers;
638 } 508 }
639 509
640 for (j = 0; j < ARRAY_SIZE(exynos_drm_non_kms_drivers); ++j) { 510 return 0;
641 ret = platform_driver_register(exynos_drm_non_kms_drivers[j]); 511}
642 if (ret < 0)
643 goto err_unregister_non_kms_drivers;
644 }
645 512
646#ifdef CONFIG_DRM_EXYNOS_IPP 513static void exynos_drm_unregister_drivers(struct platform_driver * const *drv,
647 ret = exynos_platform_device_ipp_register(); 514 int count)
648 if (ret < 0) 515{
649 goto err_unregister_non_kms_drivers; 516 while (--count >= 0)
650#endif 517 platform_driver_unregister(drv[count]);
518}
651 519
652 ret = platform_driver_register(&exynos_drm_platform_driver); 520static int exynos_drm_register_drivers(struct platform_driver * const *drv,
653 if (ret) 521 int count)
654 goto err_unregister_resources; 522{
523 int i, ret;
655 524
656 return 0; 525 for (i = 0; i < count; ++i) {
526 ret = platform_driver_register(drv[i]);
527 if (!ret)
528 continue;
657 529
658err_unregister_resources: 530 while (--i >= 0)
659#ifdef CONFIG_DRM_EXYNOS_IPP 531 platform_driver_unregister(drv[i]);
660 exynos_platform_device_ipp_unregister();
661#endif
662 532
663err_unregister_non_kms_drivers: 533 return ret;
664 while (--j >= 0) 534 }
665 platform_driver_unregister(exynos_drm_non_kms_drivers[j]);
666 535
667err_unregister_kms_drivers: 536 return 0;
668 while (--i >= 0) 537}
669 platform_driver_unregister(exynos_drm_kms_drivers[i]);
670 538
671 exynos_drm_remove_vidi(); 539static inline int exynos_drm_register_kms_drivers(void)
540{
541 return exynos_drm_register_drivers(exynos_drm_kms_drivers,
542 ARRAY_SIZE(exynos_drm_kms_drivers));
543}
672 544
673err_unregister_pd: 545static inline int exynos_drm_register_non_kms_drivers(void)
674 platform_device_unregister(exynos_drm_pdev); 546{
547 return exynos_drm_register_drivers(exynos_drm_non_kms_drivers,
548 ARRAY_SIZE(exynos_drm_non_kms_drivers));
549}
675 550
676 return ret; 551static inline void exynos_drm_unregister_kms_drivers(void)
552{
553 exynos_drm_unregister_drivers(exynos_drm_kms_drivers,
554 ARRAY_SIZE(exynos_drm_kms_drivers));
677} 555}
678 556
679static void exynos_drm_exit(void) 557static inline void exynos_drm_unregister_non_kms_drivers(void)
680{ 558{
681 int i; 559 exynos_drm_unregister_drivers(exynos_drm_non_kms_drivers,
560 ARRAY_SIZE(exynos_drm_non_kms_drivers));
561}
682 562
683#ifdef CONFIG_DRM_EXYNOS_IPP 563static int exynos_drm_init(void)
684 exynos_platform_device_ipp_unregister(); 564{
685#endif 565 int ret;
686 566
687 for (i = ARRAY_SIZE(exynos_drm_non_kms_drivers) - 1; i >= 0; --i) 567 ret = exynos_drm_register_devices();
688 platform_driver_unregister(exynos_drm_non_kms_drivers[i]); 568 if (ret)
569 return ret;
689 570
690 for (i = ARRAY_SIZE(exynos_drm_kms_drivers) - 1; i >= 0; --i) 571 ret = exynos_drm_register_kms_drivers();
691 platform_driver_unregister(exynos_drm_kms_drivers[i]); 572 if (ret)
573 goto err_unregister_pdevs;
692 574
693 platform_driver_unregister(&exynos_drm_platform_driver); 575 ret = exynos_drm_register_non_kms_drivers();
576 if (ret)
577 goto err_unregister_kms_drivers;
578
579 return 0;
580
581err_unregister_kms_drivers:
582 exynos_drm_unregister_kms_drivers();
694 583
695 exynos_drm_remove_vidi(); 584err_unregister_pdevs:
585 exynos_drm_unregister_devices();
696 586
697 platform_device_unregister(exynos_drm_pdev); 587 return ret;
588}
589
590static void exynos_drm_exit(void)
591{
592 exynos_drm_unregister_non_kms_drivers();
593 exynos_drm_unregister_kms_drivers();
594 exynos_drm_unregister_devices();
698} 595}
699 596
700module_init(exynos_drm_init); 597module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 29e3fb78c615..dd00f160c1e5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -25,13 +25,6 @@
25#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc, base) 25#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc, base)
26#define to_exynos_plane(x) container_of(x, struct exynos_drm_plane, base) 26#define to_exynos_plane(x) container_of(x, struct exynos_drm_plane, base)
27 27
28/* This enumerates device type. */
29enum exynos_drm_device_type {
30 EXYNOS_DEVICE_TYPE_NONE,
31 EXYNOS_DEVICE_TYPE_CRTC,
32 EXYNOS_DEVICE_TYPE_CONNECTOR,
33};
34
35/* this enumerates display type. */ 28/* this enumerates display type. */
36enum exynos_drm_output_type { 29enum exynos_drm_output_type {
37 EXYNOS_DISPLAY_TYPE_NONE, 30 EXYNOS_DISPLAY_TYPE_NONE,
@@ -71,8 +64,6 @@ enum exynos_drm_output_type {
71 * @dma_addr: array of bus(accessed by dma) address to the memory region 64 * @dma_addr: array of bus(accessed by dma) address to the memory region
72 * allocated for a overlay. 65 * allocated for a overlay.
73 * @zpos: order of overlay layer(z position). 66 * @zpos: order of overlay layer(z position).
74 * @enabled: enabled or not.
75 * @resume: to resume or not.
76 * 67 *
77 * this structure is common to exynos SoC and its contents would be copied 68 * this structure is common to exynos SoC and its contents would be copied
78 * to hardware specific overlay info. 69 * to hardware specific overlay info.
@@ -101,9 +92,6 @@ struct exynos_drm_plane {
101 uint32_t pixel_format; 92 uint32_t pixel_format;
102 dma_addr_t dma_addr[MAX_FB_BUFFER]; 93 dma_addr_t dma_addr[MAX_FB_BUFFER];
103 unsigned int zpos; 94 unsigned int zpos;
104
105 bool enabled:1;
106 bool resume:1;
107}; 95};
108 96
109/* 97/*
@@ -157,7 +145,8 @@ struct exynos_drm_display {
157/* 145/*
158 * Exynos drm crtc ops 146 * Exynos drm crtc ops
159 * 147 *
160 * @dpms: control device power. 148 * @enable: enable the device
149 * @disable: disable the device
161 * @mode_fixup: fix mode data before applying it 150 * @mode_fixup: fix mode data before applying it
162 * @commit: set current hw specific display mode to hw. 151 * @commit: set current hw specific display mode to hw.
163 * @enable_vblank: specific driver callback for enabling vblank interrupt. 152 * @enable_vblank: specific driver callback for enabling vblank interrupt.
@@ -175,7 +164,8 @@ struct exynos_drm_display {
175 */ 164 */
176struct exynos_drm_crtc; 165struct exynos_drm_crtc;
177struct exynos_drm_crtc_ops { 166struct exynos_drm_crtc_ops {
178 void (*dpms)(struct exynos_drm_crtc *crtc, int mode); 167 void (*enable)(struct exynos_drm_crtc *crtc);
168 void (*disable)(struct exynos_drm_crtc *crtc);
179 bool (*mode_fixup)(struct exynos_drm_crtc *crtc, 169 bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
180 const struct drm_display_mode *mode, 170 const struct drm_display_mode *mode,
181 struct drm_display_mode *adjusted_mode); 171 struct drm_display_mode *adjusted_mode);
@@ -187,6 +177,7 @@ struct exynos_drm_crtc_ops {
187 void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos); 177 void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
188 void (*te_handler)(struct exynos_drm_crtc *crtc); 178 void (*te_handler)(struct exynos_drm_crtc *crtc);
189 void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable); 179 void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
180 void (*clear_channels)(struct exynos_drm_crtc *crtc);
190}; 181};
191 182
192/* 183/*
@@ -201,7 +192,7 @@ struct exynos_drm_crtc_ops {
201 * drm framework doesn't support multiple irq yet. 192 * drm framework doesn't support multiple irq yet.
202 * we can refer to the crtc to current hardware interrupt occurred through 193 * we can refer to the crtc to current hardware interrupt occurred through
203 * this pipe value. 194 * this pipe value.
204 * @dpms: store the crtc dpms value 195 * @enabled: if the crtc is enabled or not
205 * @event: vblank event that is currently queued for flip 196 * @event: vblank event that is currently queued for flip
206 * @ops: pointer to callbacks for exynos drm specific functionality 197 * @ops: pointer to callbacks for exynos drm specific functionality
207 * @ctx: A pointer to the crtc's implementation specific context 198 * @ctx: A pointer to the crtc's implementation specific context
@@ -210,7 +201,7 @@ struct exynos_drm_crtc {
210 struct drm_crtc base; 201 struct drm_crtc base;
211 enum exynos_drm_output_type type; 202 enum exynos_drm_output_type type;
212 unsigned int pipe; 203 unsigned int pipe;
213 unsigned int dpms; 204 bool enabled;
214 wait_queue_head_t pending_flip_queue; 205 wait_queue_head_t pending_flip_queue;
215 struct drm_pending_vblank_event *event; 206 struct drm_pending_vblank_event *event;
216 const struct exynos_drm_crtc_ops *ops; 207 const struct exynos_drm_crtc_ops *ops;
@@ -293,15 +284,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev);
293int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); 284int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
294void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); 285void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
295 286
296#ifdef CONFIG_DRM_EXYNOS_IPP
297int exynos_platform_device_ipp_register(void);
298void exynos_platform_device_ipp_unregister(void);
299#else
300static inline int exynos_platform_device_ipp_register(void) { return 0; }
301static inline void exynos_platform_device_ipp_unregister(void) {}
302#endif
303
304
305#ifdef CONFIG_DRM_EXYNOS_DPI 287#ifdef CONFIG_DRM_EXYNOS_DPI
306struct exynos_drm_display * exynos_dpi_probe(struct device *dev); 288struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
307int exynos_dpi_remove(struct exynos_drm_display *display); 289int exynos_dpi_remove(struct exynos_drm_display *display);
@@ -314,26 +296,12 @@ static inline int exynos_dpi_remove(struct exynos_drm_display *display)
314} 296}
315#endif 297#endif
316 298
317#ifdef CONFIG_DRM_EXYNOS_VIDI
318int exynos_drm_probe_vidi(void);
319void exynos_drm_remove_vidi(void);
320#else
321static inline int exynos_drm_probe_vidi(void) { return 0; }
322static inline void exynos_drm_remove_vidi(void) {}
323#endif
324
325/* This function creates a encoder and a connector, and initializes them. */ 299/* This function creates a encoder and a connector, and initializes them. */
326int exynos_drm_create_enc_conn(struct drm_device *dev, 300int exynos_drm_create_enc_conn(struct drm_device *dev,
327 struct exynos_drm_display *display); 301 struct exynos_drm_display *display);
328 302
329int exynos_drm_component_add(struct device *dev,
330 enum exynos_drm_device_type dev_type,
331 enum exynos_drm_output_type out_type);
332
333void exynos_drm_component_del(struct device *dev,
334 enum exynos_drm_device_type dev_type);
335
336extern struct platform_driver fimd_driver; 303extern struct platform_driver fimd_driver;
304extern struct platform_driver exynos5433_decon_driver;
337extern struct platform_driver decon_driver; 305extern struct platform_driver decon_driver;
338extern struct platform_driver dp_driver; 306extern struct platform_driver dp_driver;
339extern struct platform_driver dsi_driver; 307extern struct platform_driver dsi_driver;
@@ -346,4 +314,5 @@ extern struct platform_driver fimc_driver;
346extern struct platform_driver rotator_driver; 314extern struct platform_driver rotator_driver;
347extern struct platform_driver gsc_driver; 315extern struct platform_driver gsc_driver;
348extern struct platform_driver ipp_driver; 316extern struct platform_driver ipp_driver;
317extern struct platform_driver mic_driver;
349#endif 318#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 04927153bf38..0e58b36cb8c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -14,12 +14,14 @@
14#include <drm/drm_crtc_helper.h> 14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_mipi_dsi.h> 15#include <drm/drm_mipi_dsi.h>
16#include <drm/drm_panel.h> 16#include <drm/drm_panel.h>
17#include <drm/drm_atomic_helper.h>
17 18
18#include <linux/clk.h> 19#include <linux/clk.h>
19#include <linux/gpio/consumer.h> 20#include <linux/gpio/consumer.h>
20#include <linux/irq.h> 21#include <linux/irq.h>
21#include <linux/of_device.h> 22#include <linux/of_device.h>
22#include <linux/of_gpio.h> 23#include <linux/of_gpio.h>
24#include <linux/of_graph.h>
23#include <linux/phy/phy.h> 25#include <linux/phy/phy.h>
24#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
25#include <linux/component.h> 27#include <linux/component.h>
@@ -33,38 +35,6 @@
33/* returns true iff both arguments logically differs */ 35/* returns true iff both arguments logically differs */
34#define NEQV(a, b) (!(a) ^ !(b)) 36#define NEQV(a, b) (!(a) ^ !(b))
35 37
36#define DSIM_STATUS_REG 0x0 /* Status register */
37#define DSIM_SWRST_REG 0x4 /* Software reset register */
38#define DSIM_CLKCTRL_REG 0x8 /* Clock control register */
39#define DSIM_TIMEOUT_REG 0xc /* Time out register */
40#define DSIM_CONFIG_REG 0x10 /* Configuration register */
41#define DSIM_ESCMODE_REG 0x14 /* Escape mode register */
42
43/* Main display image resolution register */
44#define DSIM_MDRESOL_REG 0x18
45#define DSIM_MVPORCH_REG 0x1c /* Main display Vporch register */
46#define DSIM_MHPORCH_REG 0x20 /* Main display Hporch register */
47#define DSIM_MSYNC_REG 0x24 /* Main display sync area register */
48
49/* Sub display image resolution register */
50#define DSIM_SDRESOL_REG 0x28
51#define DSIM_INTSRC_REG 0x2c /* Interrupt source register */
52#define DSIM_INTMSK_REG 0x30 /* Interrupt mask register */
53#define DSIM_PKTHDR_REG 0x34 /* Packet Header FIFO register */
54#define DSIM_PAYLOAD_REG 0x38 /* Payload FIFO register */
55#define DSIM_RXFIFO_REG 0x3c /* Read FIFO register */
56#define DSIM_FIFOTHLD_REG 0x40 /* FIFO threshold level register */
57#define DSIM_FIFOCTRL_REG 0x44 /* FIFO status and control register */
58
59/* FIFO memory AC characteristic register */
60#define DSIM_PLLCTRL_REG 0x4c /* PLL control register */
61#define DSIM_PHYACCHR_REG 0x54 /* D-PHY AC characteristic register */
62#define DSIM_PHYACCHR1_REG 0x58 /* D-PHY AC characteristic register1 */
63#define DSIM_PHYCTRL_REG 0x5c
64#define DSIM_PHYTIMING_REG 0x64
65#define DSIM_PHYTIMING1_REG 0x68
66#define DSIM_PHYTIMING2_REG 0x6c
67
68/* DSIM_STATUS */ 38/* DSIM_STATUS */
69#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0) 39#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
70#define DSIM_STOP_STATE_CLK (1 << 8) 40#define DSIM_STOP_STATE_CLK (1 << 8)
@@ -128,8 +98,8 @@
128 98
129/* DSIM_MDRESOL */ 99/* DSIM_MDRESOL */
130#define DSIM_MAIN_STAND_BY (1 << 31) 100#define DSIM_MAIN_STAND_BY (1 << 31)
131#define DSIM_MAIN_VRESOL(x) (((x) & 0x7ff) << 16) 101#define DSIM_MAIN_VRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 16)
132#define DSIM_MAIN_HRESOL(x) (((x) & 0X7ff) << 0) 102#define DSIM_MAIN_HRESOL(x, num_bits) (((x) & ((1 << (num_bits)) - 1)) << 0)
133 103
134/* DSIM_MVPORCH */ 104/* DSIM_MVPORCH */
135#define DSIM_CMD_ALLOW(x) ((x) << 28) 105#define DSIM_CMD_ALLOW(x) ((x) << 28)
@@ -163,6 +133,7 @@
163#define DSIM_INT_PLL_STABLE (1 << 31) 133#define DSIM_INT_PLL_STABLE (1 << 31)
164#define DSIM_INT_SW_RST_RELEASE (1 << 30) 134#define DSIM_INT_SW_RST_RELEASE (1 << 30)
165#define DSIM_INT_SFR_FIFO_EMPTY (1 << 29) 135#define DSIM_INT_SFR_FIFO_EMPTY (1 << 29)
136#define DSIM_INT_SFR_HDR_FIFO_EMPTY (1 << 28)
166#define DSIM_INT_BTA (1 << 25) 137#define DSIM_INT_BTA (1 << 25)
167#define DSIM_INT_FRAME_DONE (1 << 24) 138#define DSIM_INT_FRAME_DONE (1 << 24)
168#define DSIM_INT_RX_TIMEOUT (1 << 21) 139#define DSIM_INT_RX_TIMEOUT (1 << 21)
@@ -211,6 +182,8 @@
211 182
212/* DSIM_PHYCTRL */ 183/* DSIM_PHYCTRL */
213#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0) 184#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
185#define DSIM_PHYCTRL_B_DPHYCTL_VREG_LP (1 << 30)
186#define DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP (1 << 14)
214 187
215/* DSIM_PHYTIMING */ 188/* DSIM_PHYTIMING */
216#define DSIM_PHYTIMING_LPX(x) ((x) << 8) 189#define DSIM_PHYTIMING_LPX(x) ((x) << 8)
@@ -234,6 +207,18 @@
234#define DSI_XFER_TIMEOUT_MS 100 207#define DSI_XFER_TIMEOUT_MS 100
235#define DSI_RX_FIFO_EMPTY 0x30800002 208#define DSI_RX_FIFO_EMPTY 0x30800002
236 209
210#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
211
212#define REG_ADDR(dsi, reg_idx) ((dsi)->reg_base + \
213 dsi->driver_data->reg_ofs[(reg_idx)])
214#define DSI_WRITE(dsi, reg_idx, val) writel((val), \
215 REG_ADDR((dsi), (reg_idx)))
216#define DSI_READ(dsi, reg_idx) readl(REG_ADDR((dsi), (reg_idx)))
217
218static char *clk_names[5] = { "bus_clk", "sclk_mipi",
219 "phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
220 "sclk_rgb_vclk_to_dsim0" };
221
237enum exynos_dsi_transfer_type { 222enum exynos_dsi_transfer_type {
238 EXYNOS_DSI_TX, 223 EXYNOS_DSI_TX,
239 EXYNOS_DSI_RX, 224 EXYNOS_DSI_RX,
@@ -259,12 +244,18 @@ struct exynos_dsi_transfer {
259#define DSIM_STATE_ENABLED BIT(0) 244#define DSIM_STATE_ENABLED BIT(0)
260#define DSIM_STATE_INITIALIZED BIT(1) 245#define DSIM_STATE_INITIALIZED BIT(1)
261#define DSIM_STATE_CMD_LPM BIT(2) 246#define DSIM_STATE_CMD_LPM BIT(2)
247#define DSIM_STATE_VIDOUT_AVAILABLE BIT(3)
262 248
263struct exynos_dsi_driver_data { 249struct exynos_dsi_driver_data {
250 unsigned int *reg_ofs;
264 unsigned int plltmr_reg; 251 unsigned int plltmr_reg;
265
266 unsigned int has_freqband:1; 252 unsigned int has_freqband:1;
267 unsigned int has_clklane_stop:1; 253 unsigned int has_clklane_stop:1;
254 unsigned int num_clks;
255 unsigned int max_freq;
256 unsigned int wait_for_reset;
257 unsigned int num_bits_resol;
258 unsigned int *reg_values;
268}; 259};
269 260
270struct exynos_dsi { 261struct exynos_dsi {
@@ -277,8 +268,7 @@ struct exynos_dsi {
277 268
278 void __iomem *reg_base; 269 void __iomem *reg_base;
279 struct phy *phy; 270 struct phy *phy;
280 struct clk *pll_clk; 271 struct clk **clks;
281 struct clk *bus_clk;
282 struct regulator_bulk_data supplies[2]; 272 struct regulator_bulk_data supplies[2];
283 int irq; 273 int irq;
284 int te_gpio; 274 int te_gpio;
@@ -299,6 +289,7 @@ struct exynos_dsi {
299 struct list_head transfer_list; 289 struct list_head transfer_list;
300 290
301 struct exynos_dsi_driver_data *driver_data; 291 struct exynos_dsi_driver_data *driver_data;
292 struct device_node *bridge_node;
302}; 293};
303 294
304#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) 295#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
@@ -309,25 +300,186 @@ static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
309 return container_of(d, struct exynos_dsi, display); 300 return container_of(d, struct exynos_dsi, display);
310} 301}
311 302
303enum reg_idx {
304 DSIM_STATUS_REG, /* Status register */
305 DSIM_SWRST_REG, /* Software reset register */
306 DSIM_CLKCTRL_REG, /* Clock control register */
307 DSIM_TIMEOUT_REG, /* Time out register */
308 DSIM_CONFIG_REG, /* Configuration register */
309 DSIM_ESCMODE_REG, /* Escape mode register */
310 DSIM_MDRESOL_REG,
311 DSIM_MVPORCH_REG, /* Main display Vporch register */
312 DSIM_MHPORCH_REG, /* Main display Hporch register */
313 DSIM_MSYNC_REG, /* Main display sync area register */
314 DSIM_INTSRC_REG, /* Interrupt source register */
315 DSIM_INTMSK_REG, /* Interrupt mask register */
316 DSIM_PKTHDR_REG, /* Packet Header FIFO register */
317 DSIM_PAYLOAD_REG, /* Payload FIFO register */
318 DSIM_RXFIFO_REG, /* Read FIFO register */
319 DSIM_FIFOCTRL_REG, /* FIFO status and control register */
320 DSIM_PLLCTRL_REG, /* PLL control register */
321 DSIM_PHYCTRL_REG,
322 DSIM_PHYTIMING_REG,
323 DSIM_PHYTIMING1_REG,
324 DSIM_PHYTIMING2_REG,
325 NUM_REGS
326};
327static unsigned int exynos_reg_ofs[] = {
328 [DSIM_STATUS_REG] = 0x00,
329 [DSIM_SWRST_REG] = 0x04,
330 [DSIM_CLKCTRL_REG] = 0x08,
331 [DSIM_TIMEOUT_REG] = 0x0c,
332 [DSIM_CONFIG_REG] = 0x10,
333 [DSIM_ESCMODE_REG] = 0x14,
334 [DSIM_MDRESOL_REG] = 0x18,
335 [DSIM_MVPORCH_REG] = 0x1c,
336 [DSIM_MHPORCH_REG] = 0x20,
337 [DSIM_MSYNC_REG] = 0x24,
338 [DSIM_INTSRC_REG] = 0x2c,
339 [DSIM_INTMSK_REG] = 0x30,
340 [DSIM_PKTHDR_REG] = 0x34,
341 [DSIM_PAYLOAD_REG] = 0x38,
342 [DSIM_RXFIFO_REG] = 0x3c,
343 [DSIM_FIFOCTRL_REG] = 0x44,
344 [DSIM_PLLCTRL_REG] = 0x4c,
345 [DSIM_PHYCTRL_REG] = 0x5c,
346 [DSIM_PHYTIMING_REG] = 0x64,
347 [DSIM_PHYTIMING1_REG] = 0x68,
348 [DSIM_PHYTIMING2_REG] = 0x6c,
349};
350
351static unsigned int exynos5433_reg_ofs[] = {
352 [DSIM_STATUS_REG] = 0x04,
353 [DSIM_SWRST_REG] = 0x0C,
354 [DSIM_CLKCTRL_REG] = 0x10,
355 [DSIM_TIMEOUT_REG] = 0x14,
356 [DSIM_CONFIG_REG] = 0x18,
357 [DSIM_ESCMODE_REG] = 0x1C,
358 [DSIM_MDRESOL_REG] = 0x20,
359 [DSIM_MVPORCH_REG] = 0x24,
360 [DSIM_MHPORCH_REG] = 0x28,
361 [DSIM_MSYNC_REG] = 0x2C,
362 [DSIM_INTSRC_REG] = 0x34,
363 [DSIM_INTMSK_REG] = 0x38,
364 [DSIM_PKTHDR_REG] = 0x3C,
365 [DSIM_PAYLOAD_REG] = 0x40,
366 [DSIM_RXFIFO_REG] = 0x44,
367 [DSIM_FIFOCTRL_REG] = 0x4C,
368 [DSIM_PLLCTRL_REG] = 0x94,
369 [DSIM_PHYCTRL_REG] = 0xA4,
370 [DSIM_PHYTIMING_REG] = 0xB4,
371 [DSIM_PHYTIMING1_REG] = 0xB8,
372 [DSIM_PHYTIMING2_REG] = 0xBC,
373};
374
375enum reg_value_idx {
376 RESET_TYPE,
377 PLL_TIMER,
378 STOP_STATE_CNT,
379 PHYCTRL_ULPS_EXIT,
380 PHYCTRL_VREG_LP,
381 PHYCTRL_SLEW_UP,
382 PHYTIMING_LPX,
383 PHYTIMING_HS_EXIT,
384 PHYTIMING_CLK_PREPARE,
385 PHYTIMING_CLK_ZERO,
386 PHYTIMING_CLK_POST,
387 PHYTIMING_CLK_TRAIL,
388 PHYTIMING_HS_PREPARE,
389 PHYTIMING_HS_ZERO,
390 PHYTIMING_HS_TRAIL
391};
392
393static unsigned int reg_values[] = {
394 [RESET_TYPE] = DSIM_SWRST,
395 [PLL_TIMER] = 500,
396 [STOP_STATE_CNT] = 0xf,
397 [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x0af),
398 [PHYCTRL_VREG_LP] = 0,
399 [PHYCTRL_SLEW_UP] = 0,
400 [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x06),
401 [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0b),
402 [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x07),
403 [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x27),
404 [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
405 [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x08),
406 [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
407 [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0d),
408 [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0b),
409};
410
411static unsigned int exynos5433_reg_values[] = {
412 [RESET_TYPE] = DSIM_FUNCRST,
413 [PLL_TIMER] = 22200,
414 [STOP_STATE_CNT] = 0xa,
415 [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x190),
416 [PHYCTRL_VREG_LP] = DSIM_PHYCTRL_B_DPHYCTL_VREG_LP,
417 [PHYCTRL_SLEW_UP] = DSIM_PHYCTRL_B_DPHYCTL_SLEW_UP,
418 [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
419 [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
420 [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x09),
421 [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2d),
422 [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0e),
423 [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
424 [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x0b),
425 [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x10),
426 [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
427};
428
312static struct exynos_dsi_driver_data exynos3_dsi_driver_data = { 429static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
430 .reg_ofs = exynos_reg_ofs,
313 .plltmr_reg = 0x50, 431 .plltmr_reg = 0x50,
314 .has_freqband = 1, 432 .has_freqband = 1,
315 .has_clklane_stop = 1, 433 .has_clklane_stop = 1,
434 .num_clks = 2,
435 .max_freq = 1000,
436 .wait_for_reset = 1,
437 .num_bits_resol = 11,
438 .reg_values = reg_values,
316}; 439};
317 440
318static struct exynos_dsi_driver_data exynos4_dsi_driver_data = { 441static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
442 .reg_ofs = exynos_reg_ofs,
319 .plltmr_reg = 0x50, 443 .plltmr_reg = 0x50,
320 .has_freqband = 1, 444 .has_freqband = 1,
321 .has_clklane_stop = 1, 445 .has_clklane_stop = 1,
446 .num_clks = 2,
447 .max_freq = 1000,
448 .wait_for_reset = 1,
449 .num_bits_resol = 11,
450 .reg_values = reg_values,
322}; 451};
323 452
324static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = { 453static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
454 .reg_ofs = exynos_reg_ofs,
325 .plltmr_reg = 0x58, 455 .plltmr_reg = 0x58,
326 .has_clklane_stop = 1, 456 .has_clklane_stop = 1,
457 .num_clks = 2,
458 .max_freq = 1000,
459 .wait_for_reset = 1,
460 .num_bits_resol = 11,
461 .reg_values = reg_values,
327}; 462};
328 463
329static struct exynos_dsi_driver_data exynos5_dsi_driver_data = { 464static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
465 .reg_ofs = exynos_reg_ofs,
330 .plltmr_reg = 0x58, 466 .plltmr_reg = 0x58,
467 .num_clks = 2,
468 .max_freq = 1000,
469 .wait_for_reset = 1,
470 .num_bits_resol = 11,
471 .reg_values = reg_values,
472};
473
474static struct exynos_dsi_driver_data exynos5433_dsi_driver_data = {
475 .reg_ofs = exynos5433_reg_ofs,
476 .plltmr_reg = 0xa0,
477 .has_clklane_stop = 1,
478 .num_clks = 5,
479 .max_freq = 1500,
480 .wait_for_reset = 0,
481 .num_bits_resol = 12,
482 .reg_values = exynos5433_reg_values,
331}; 483};
332 484
333static struct of_device_id exynos_dsi_of_match[] = { 485static struct of_device_id exynos_dsi_of_match[] = {
@@ -339,6 +491,8 @@ static struct of_device_id exynos_dsi_of_match[] = {
339 .data = &exynos4415_dsi_driver_data }, 491 .data = &exynos4415_dsi_driver_data },
340 { .compatible = "samsung,exynos5410-mipi-dsi", 492 { .compatible = "samsung,exynos5410-mipi-dsi",
341 .data = &exynos5_dsi_driver_data }, 493 .data = &exynos5_dsi_driver_data },
494 { .compatible = "samsung,exynos5433-mipi-dsi",
495 .data = &exynos5433_dsi_driver_data },
342 { } 496 { }
343}; 497};
344 498
@@ -361,8 +515,10 @@ static void exynos_dsi_wait_for_reset(struct exynos_dsi *dsi)
361 515
362static void exynos_dsi_reset(struct exynos_dsi *dsi) 516static void exynos_dsi_reset(struct exynos_dsi *dsi)
363{ 517{
518 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
519
364 reinit_completion(&dsi->completed); 520 reinit_completion(&dsi->completed);
365 writel(DSIM_SWRST, dsi->reg_base + DSIM_SWRST_REG); 521 DSI_WRITE(dsi, DSIM_SWRST_REG, driver_data->reg_values[RESET_TYPE]);
366} 522}
367 523
368#ifndef MHZ 524#ifndef MHZ
@@ -372,6 +528,7 @@ static void exynos_dsi_reset(struct exynos_dsi *dsi)
372static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi, 528static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
373 unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s) 529 unsigned long fin, unsigned long fout, u8 *p, u16 *m, u8 *s)
374{ 530{
531 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
375 unsigned long best_freq = 0; 532 unsigned long best_freq = 0;
376 u32 min_delta = 0xffffffff; 533 u32 min_delta = 0xffffffff;
377 u8 p_min, p_max; 534 u8 p_min, p_max;
@@ -395,7 +552,8 @@ static unsigned long exynos_dsi_pll_find_pms(struct exynos_dsi *dsi,
395 552
396 tmp = (u64)_m * fin; 553 tmp = (u64)_m * fin;
397 do_div(tmp, _p); 554 do_div(tmp, _p);
398 if (tmp < 500 * MHZ || tmp > 1000 * MHZ) 555 if (tmp < 500 * MHZ ||
556 tmp > driver_data->max_freq * MHZ)
399 continue; 557 continue;
400 558
401 tmp = (u64)_m * fin; 559 tmp = (u64)_m * fin;
@@ -431,16 +589,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
431 u16 m; 589 u16 m;
432 u32 reg; 590 u32 reg;
433 591
434 clk_set_rate(dsi->pll_clk, dsi->pll_clk_rate); 592 fin = dsi->pll_clk_rate;
435
436 fin = clk_get_rate(dsi->pll_clk);
437 if (!fin) {
438 dev_err(dsi->dev, "failed to get PLL clock frequency\n");
439 return 0;
440 }
441
442 dev_dbg(dsi->dev, "PLL input frequency: %lu\n", fin);
443
444 fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s); 593 fout = exynos_dsi_pll_find_pms(dsi, fin, freq, &p, &m, &s);
445 if (!fout) { 594 if (!fout) {
446 dev_err(dsi->dev, 595 dev_err(dsi->dev,
@@ -449,7 +598,8 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
449 } 598 }
450 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s); 599 dev_dbg(dsi->dev, "PLL freq %lu, (p %d, m %d, s %d)\n", fout, p, m, s);
451 600
452 writel(500, dsi->reg_base + driver_data->plltmr_reg); 601 writel(driver_data->reg_values[PLL_TIMER],
602 dsi->reg_base + driver_data->plltmr_reg);
453 603
454 reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s); 604 reg = DSIM_PLL_EN | DSIM_PLL_P(p) | DSIM_PLL_M(m) | DSIM_PLL_S(s);
455 605
@@ -471,7 +621,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
471 reg |= DSIM_FREQ_BAND(band); 621 reg |= DSIM_FREQ_BAND(band);
472 } 622 }
473 623
474 writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG); 624 DSI_WRITE(dsi, DSIM_PLLCTRL_REG, reg);
475 625
476 timeout = 1000; 626 timeout = 1000;
477 do { 627 do {
@@ -479,7 +629,7 @@ static unsigned long exynos_dsi_set_pll(struct exynos_dsi *dsi,
479 dev_err(dsi->dev, "PLL failed to stabilize\n"); 629 dev_err(dsi->dev, "PLL failed to stabilize\n");
480 return 0; 630 return 0;
481 } 631 }
482 reg = readl(dsi->reg_base + DSIM_STATUS_REG); 632 reg = DSI_READ(dsi, DSIM_STATUS_REG);
483 } while ((reg & DSIM_PLL_STABLE) == 0); 633 } while ((reg & DSIM_PLL_STABLE) == 0);
484 634
485 return fout; 635 return fout;
@@ -509,7 +659,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
509 dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n", 659 dev_dbg(dsi->dev, "hs_clk = %lu, byte_clk = %lu, esc_clk = %lu\n",
510 hs_clk, byte_clk, esc_clk); 660 hs_clk, byte_clk, esc_clk);
511 661
512 reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG); 662 reg = DSI_READ(dsi, DSIM_CLKCTRL_REG);
513 reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK 663 reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
514 | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS 664 | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
515 | DSIM_BYTE_CLK_SRC_MASK); 665 | DSIM_BYTE_CLK_SRC_MASK);
@@ -519,7 +669,7 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
519 | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1) 669 | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
520 | DSIM_BYTE_CLK_SRC(0) 670 | DSIM_BYTE_CLK_SRC(0)
521 | DSIM_TX_REQUEST_HSCLK; 671 | DSIM_TX_REQUEST_HSCLK;
522 writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG); 672 DSI_WRITE(dsi, DSIM_CLKCTRL_REG, reg);
523 673
524 return 0; 674 return 0;
525} 675}
@@ -527,22 +677,24 @@ static int exynos_dsi_enable_clock(struct exynos_dsi *dsi)
527static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi) 677static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
528{ 678{
529 struct exynos_dsi_driver_data *driver_data = dsi->driver_data; 679 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
680 unsigned int *reg_values = driver_data->reg_values;
530 u32 reg; 681 u32 reg;
531 682
532 if (driver_data->has_freqband) 683 if (driver_data->has_freqband)
533 return; 684 return;
534 685
535 /* B D-PHY: D-PHY Master & Slave Analog Block control */ 686 /* B D-PHY: D-PHY Master & Slave Analog Block control */
536 reg = DSIM_PHYCTRL_ULPS_EXIT(0x0af); 687 reg = reg_values[PHYCTRL_ULPS_EXIT] | reg_values[PHYCTRL_VREG_LP] |
537 writel(reg, dsi->reg_base + DSIM_PHYCTRL_REG); 688 reg_values[PHYCTRL_SLEW_UP];
689 DSI_WRITE(dsi, DSIM_PHYCTRL_REG, reg);
538 690
539 /* 691 /*
540 * T LPX: Transmitted length of any Low-Power state period 692 * T LPX: Transmitted length of any Low-Power state period
541 * T HS-EXIT: Time that the transmitter drives LP-11 following a HS 693 * T HS-EXIT: Time that the transmitter drives LP-11 following a HS
542 * burst 694 * burst
543 */ 695 */
544 reg = DSIM_PHYTIMING_LPX(0x06) | DSIM_PHYTIMING_HS_EXIT(0x0b); 696 reg = reg_values[PHYTIMING_LPX] | reg_values[PHYTIMING_HS_EXIT];
545 writel(reg, dsi->reg_base + DSIM_PHYTIMING_REG); 697 DSI_WRITE(dsi, DSIM_PHYTIMING_REG, reg);
546 698
547 /* 699 /*
548 * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00 700 * T CLK-PREPARE: Time that the transmitter drives the Clock Lane LP-00
@@ -557,11 +709,12 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
557 * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after 709 * T CLK-TRAIL: Time that the transmitter drives the HS-0 state after
558 * the last payload clock bit of a HS transmission burst 710 * the last payload clock bit of a HS transmission burst
559 */ 711 */
560 reg = DSIM_PHYTIMING1_CLK_PREPARE(0x07) | 712 reg = reg_values[PHYTIMING_CLK_PREPARE] |
561 DSIM_PHYTIMING1_CLK_ZERO(0x27) | 713 reg_values[PHYTIMING_CLK_ZERO] |
562 DSIM_PHYTIMING1_CLK_POST(0x0d) | 714 reg_values[PHYTIMING_CLK_POST] |
563 DSIM_PHYTIMING1_CLK_TRAIL(0x08); 715 reg_values[PHYTIMING_CLK_TRAIL];
564 writel(reg, dsi->reg_base + DSIM_PHYTIMING1_REG); 716
717 DSI_WRITE(dsi, DSIM_PHYTIMING1_REG, reg);
565 718
566 /* 719 /*
567 * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00 720 * T HS-PREPARE: Time that the transmitter drives the Data Lane LP-00
@@ -572,23 +725,31 @@ static void exynos_dsi_set_phy_ctrl(struct exynos_dsi *dsi)
572 * T HS-TRAIL: Time that the transmitter drives the flipped differential 725 * T HS-TRAIL: Time that the transmitter drives the flipped differential
573 * state after last payload data bit of a HS transmission burst 726 * state after last payload data bit of a HS transmission burst
574 */ 727 */
575 reg = DSIM_PHYTIMING2_HS_PREPARE(0x09) | DSIM_PHYTIMING2_HS_ZERO(0x0d) | 728 reg = reg_values[PHYTIMING_HS_PREPARE] | reg_values[PHYTIMING_HS_ZERO] |
576 DSIM_PHYTIMING2_HS_TRAIL(0x0b); 729 reg_values[PHYTIMING_HS_TRAIL];
577 writel(reg, dsi->reg_base + DSIM_PHYTIMING2_REG); 730 DSI_WRITE(dsi, DSIM_PHYTIMING2_REG, reg);
578} 731}
579 732
580static void exynos_dsi_disable_clock(struct exynos_dsi *dsi) 733static void exynos_dsi_disable_clock(struct exynos_dsi *dsi)
581{ 734{
582 u32 reg; 735 u32 reg;
583 736
584 reg = readl(dsi->reg_base + DSIM_CLKCTRL_REG); 737 reg = DSI_READ(dsi, DSIM_CLKCTRL_REG);
585 reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK 738 reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
586 | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN); 739 | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
587 writel(reg, dsi->reg_base + DSIM_CLKCTRL_REG); 740 DSI_WRITE(dsi, DSIM_CLKCTRL_REG, reg);
588 741
589 reg = readl(dsi->reg_base + DSIM_PLLCTRL_REG); 742 reg = DSI_READ(dsi, DSIM_PLLCTRL_REG);
590 reg &= ~DSIM_PLL_EN; 743 reg &= ~DSIM_PLL_EN;
591 writel(reg, dsi->reg_base + DSIM_PLLCTRL_REG); 744 DSI_WRITE(dsi, DSIM_PLLCTRL_REG, reg);
745}
746
747static void exynos_dsi_enable_lane(struct exynos_dsi *dsi, u32 lane)
748{
749 u32 reg = DSI_READ(dsi, DSIM_CONFIG_REG);
750 reg |= (DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1) | DSIM_LANE_EN_CLK |
751 DSIM_LANE_EN(lane));
752 DSI_WRITE(dsi, DSIM_CONFIG_REG, reg);
592} 753}
593 754
594static int exynos_dsi_init_link(struct exynos_dsi *dsi) 755static int exynos_dsi_init_link(struct exynos_dsi *dsi)
@@ -599,15 +760,14 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
599 u32 lanes_mask; 760 u32 lanes_mask;
600 761
601 /* Initialize FIFO pointers */ 762 /* Initialize FIFO pointers */
602 reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG); 763 reg = DSI_READ(dsi, DSIM_FIFOCTRL_REG);
603 reg &= ~0x1f; 764 reg &= ~0x1f;
604 writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG); 765 DSI_WRITE(dsi, DSIM_FIFOCTRL_REG, reg);
605 766
606 usleep_range(9000, 11000); 767 usleep_range(9000, 11000);
607 768
608 reg |= 0x1f; 769 reg |= 0x1f;
609 writel(reg, dsi->reg_base + DSIM_FIFOCTRL_REG); 770 DSI_WRITE(dsi, DSIM_FIFOCTRL_REG, reg);
610
611 usleep_range(9000, 11000); 771 usleep_range(9000, 11000);
612 772
613 /* DSI configuration */ 773 /* DSI configuration */
@@ -664,17 +824,6 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
664 return -EINVAL; 824 return -EINVAL;
665 } 825 }
666 826
667 reg |= DSIM_NUM_OF_DATA_LANE(dsi->lanes - 1);
668
669 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
670
671 reg |= DSIM_LANE_EN_CLK;
672 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
673
674 lanes_mask = BIT(dsi->lanes) - 1;
675 reg |= DSIM_LANE_EN(lanes_mask);
676 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
677
678 /* 827 /*
679 * Use non-continuous clock mode if the periparal wants and 828 * Use non-continuous clock mode if the periparal wants and
680 * host controller supports 829 * host controller supports
@@ -686,8 +835,11 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
686 if (driver_data->has_clklane_stop && 835 if (driver_data->has_clklane_stop &&
687 dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { 836 dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
688 reg |= DSIM_CLKLANE_STOP; 837 reg |= DSIM_CLKLANE_STOP;
689 writel(reg, dsi->reg_base + DSIM_CONFIG_REG);
690 } 838 }
839 DSI_WRITE(dsi, DSIM_CONFIG_REG, reg);
840
841 lanes_mask = BIT(dsi->lanes) - 1;
842 exynos_dsi_enable_lane(dsi, lanes_mask);
691 843
692 /* Check clock and data lane state are stop state */ 844 /* Check clock and data lane state are stop state */
693 timeout = 100; 845 timeout = 100;
@@ -697,19 +849,19 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
697 return -EFAULT; 849 return -EFAULT;
698 } 850 }
699 851
700 reg = readl(dsi->reg_base + DSIM_STATUS_REG); 852 reg = DSI_READ(dsi, DSIM_STATUS_REG);
701 if ((reg & DSIM_STOP_STATE_DAT(lanes_mask)) 853 if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
702 != DSIM_STOP_STATE_DAT(lanes_mask)) 854 != DSIM_STOP_STATE_DAT(lanes_mask))
703 continue; 855 continue;
704 } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK))); 856 } while (!(reg & (DSIM_STOP_STATE_CLK | DSIM_TX_READY_HS_CLK)));
705 857
706 reg = readl(dsi->reg_base + DSIM_ESCMODE_REG); 858 reg = DSI_READ(dsi, DSIM_ESCMODE_REG);
707 reg &= ~DSIM_STOP_STATE_CNT_MASK; 859 reg &= ~DSIM_STOP_STATE_CNT_MASK;
708 reg |= DSIM_STOP_STATE_CNT(0xf); 860 reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
709 writel(reg, dsi->reg_base + DSIM_ESCMODE_REG); 861 DSI_WRITE(dsi, DSIM_ESCMODE_REG, reg);
710 862
711 reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff); 863 reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
712 writel(reg, dsi->reg_base + DSIM_TIMEOUT_REG); 864 DSI_WRITE(dsi, DSIM_TIMEOUT_REG, reg);
713 865
714 return 0; 866 return 0;
715} 867}
@@ -717,25 +869,27 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
717static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi) 869static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi)
718{ 870{
719 struct videomode *vm = &dsi->vm; 871 struct videomode *vm = &dsi->vm;
872 unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
720 u32 reg; 873 u32 reg;
721 874
722 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { 875 if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
723 reg = DSIM_CMD_ALLOW(0xf) 876 reg = DSIM_CMD_ALLOW(0xf)
724 | DSIM_STABLE_VFP(vm->vfront_porch) 877 | DSIM_STABLE_VFP(vm->vfront_porch)
725 | DSIM_MAIN_VBP(vm->vback_porch); 878 | DSIM_MAIN_VBP(vm->vback_porch);
726 writel(reg, dsi->reg_base + DSIM_MVPORCH_REG); 879 DSI_WRITE(dsi, DSIM_MVPORCH_REG, reg);
727 880
728 reg = DSIM_MAIN_HFP(vm->hfront_porch) 881 reg = DSIM_MAIN_HFP(vm->hfront_porch)
729 | DSIM_MAIN_HBP(vm->hback_porch); 882 | DSIM_MAIN_HBP(vm->hback_porch);
730 writel(reg, dsi->reg_base + DSIM_MHPORCH_REG); 883 DSI_WRITE(dsi, DSIM_MHPORCH_REG, reg);
731 884
732 reg = DSIM_MAIN_VSA(vm->vsync_len) 885 reg = DSIM_MAIN_VSA(vm->vsync_len)
733 | DSIM_MAIN_HSA(vm->hsync_len); 886 | DSIM_MAIN_HSA(vm->hsync_len);
734 writel(reg, dsi->reg_base + DSIM_MSYNC_REG); 887 DSI_WRITE(dsi, DSIM_MSYNC_REG, reg);
735 } 888 }
889 reg = DSIM_MAIN_HRESOL(vm->hactive, num_bits_resol) |
890 DSIM_MAIN_VRESOL(vm->vactive, num_bits_resol);
736 891
737 reg = DSIM_MAIN_HRESOL(vm->hactive) | DSIM_MAIN_VRESOL(vm->vactive); 892 DSI_WRITE(dsi, DSIM_MDRESOL_REG, reg);
738 writel(reg, dsi->reg_base + DSIM_MDRESOL_REG);
739 893
740 dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive); 894 dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive);
741} 895}
@@ -744,12 +898,12 @@ static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable)
744{ 898{
745 u32 reg; 899 u32 reg;
746 900
747 reg = readl(dsi->reg_base + DSIM_MDRESOL_REG); 901 reg = DSI_READ(dsi, DSIM_MDRESOL_REG);
748 if (enable) 902 if (enable)
749 reg |= DSIM_MAIN_STAND_BY; 903 reg |= DSIM_MAIN_STAND_BY;
750 else 904 else
751 reg &= ~DSIM_MAIN_STAND_BY; 905 reg &= ~DSIM_MAIN_STAND_BY;
752 writel(reg, dsi->reg_base + DSIM_MDRESOL_REG); 906 DSI_WRITE(dsi, DSIM_MDRESOL_REG, reg);
753} 907}
754 908
755static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi) 909static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
@@ -757,7 +911,7 @@ static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
757 int timeout = 2000; 911 int timeout = 2000;
758 912
759 do { 913 do {
760 u32 reg = readl(dsi->reg_base + DSIM_FIFOCTRL_REG); 914 u32 reg = DSI_READ(dsi, DSIM_FIFOCTRL_REG);
761 915
762 if (!(reg & DSIM_SFR_HEADER_FULL)) 916 if (!(reg & DSIM_SFR_HEADER_FULL))
763 return 0; 917 return 0;
@@ -771,22 +925,21 @@ static int exynos_dsi_wait_for_hdr_fifo(struct exynos_dsi *dsi)
771 925
772static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm) 926static void exynos_dsi_set_cmd_lpm(struct exynos_dsi *dsi, bool lpm)
773{ 927{
774 u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG); 928 u32 v = DSI_READ(dsi, DSIM_ESCMODE_REG);
775 929
776 if (lpm) 930 if (lpm)
777 v |= DSIM_CMD_LPDT_LP; 931 v |= DSIM_CMD_LPDT_LP;
778 else 932 else
779 v &= ~DSIM_CMD_LPDT_LP; 933 v &= ~DSIM_CMD_LPDT_LP;
780 934
781 writel(v, dsi->reg_base + DSIM_ESCMODE_REG); 935 DSI_WRITE(dsi, DSIM_ESCMODE_REG, v);
782} 936}
783 937
784static void exynos_dsi_force_bta(struct exynos_dsi *dsi) 938static void exynos_dsi_force_bta(struct exynos_dsi *dsi)
785{ 939{
786 u32 v = readl(dsi->reg_base + DSIM_ESCMODE_REG); 940 u32 v = DSI_READ(dsi, DSIM_ESCMODE_REG);
787
788 v |= DSIM_FORCE_BTA; 941 v |= DSIM_FORCE_BTA;
789 writel(v, dsi->reg_base + DSIM_ESCMODE_REG); 942 DSI_WRITE(dsi, DSIM_ESCMODE_REG, v);
790} 943}
791 944
792static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, 945static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
@@ -810,7 +963,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
810 while (length >= 4) { 963 while (length >= 4) {
811 reg = (payload[3] << 24) | (payload[2] << 16) 964 reg = (payload[3] << 24) | (payload[2] << 16)
812 | (payload[1] << 8) | payload[0]; 965 | (payload[1] << 8) | payload[0];
813 writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG); 966 DSI_WRITE(dsi, DSIM_PAYLOAD_REG, reg);
814 payload += 4; 967 payload += 4;
815 length -= 4; 968 length -= 4;
816 } 969 }
@@ -825,7 +978,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
825 /* Fall through */ 978 /* Fall through */
826 case 1: 979 case 1:
827 reg |= payload[0]; 980 reg |= payload[0];
828 writel(reg, dsi->reg_base + DSIM_PAYLOAD_REG); 981 DSI_WRITE(dsi, DSIM_PAYLOAD_REG, reg);
829 break; 982 break;
830 case 0: 983 case 0:
831 /* Do nothing */ 984 /* Do nothing */
@@ -848,7 +1001,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi,
848 dsi->state ^= DSIM_STATE_CMD_LPM; 1001 dsi->state ^= DSIM_STATE_CMD_LPM;
849 } 1002 }
850 1003
851 writel(reg, dsi->reg_base + DSIM_PKTHDR_REG); 1004 DSI_WRITE(dsi, DSIM_PKTHDR_REG, reg);
852 1005
853 if (xfer->flags & MIPI_DSI_MSG_REQ_ACK) 1006 if (xfer->flags & MIPI_DSI_MSG_REQ_ACK)
854 exynos_dsi_force_bta(dsi); 1007 exynos_dsi_force_bta(dsi);
@@ -864,7 +1017,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
864 u32 reg; 1017 u32 reg;
865 1018
866 if (first) { 1019 if (first) {
867 reg = readl(dsi->reg_base + DSIM_RXFIFO_REG); 1020 reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
868 1021
869 switch (reg & 0x3f) { 1022 switch (reg & 0x3f) {
870 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE: 1023 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
@@ -903,7 +1056,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
903 1056
904 /* Receive payload */ 1057 /* Receive payload */
905 while (length >= 4) { 1058 while (length >= 4) {
906 reg = readl(dsi->reg_base + DSIM_RXFIFO_REG); 1059 reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
907 payload[0] = (reg >> 0) & 0xff; 1060 payload[0] = (reg >> 0) & 0xff;
908 payload[1] = (reg >> 8) & 0xff; 1061 payload[1] = (reg >> 8) & 0xff;
909 payload[2] = (reg >> 16) & 0xff; 1062 payload[2] = (reg >> 16) & 0xff;
@@ -913,7 +1066,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
913 } 1066 }
914 1067
915 if (length) { 1068 if (length) {
916 reg = readl(dsi->reg_base + DSIM_RXFIFO_REG); 1069 reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
917 switch (length) { 1070 switch (length) {
918 case 3: 1071 case 3:
919 payload[2] = (reg >> 16) & 0xff; 1072 payload[2] = (reg >> 16) & 0xff;
@@ -932,7 +1085,7 @@ static void exynos_dsi_read_from_fifo(struct exynos_dsi *dsi,
932clear_fifo: 1085clear_fifo:
933 length = DSI_RX_FIFO_SIZE / 4; 1086 length = DSI_RX_FIFO_SIZE / 4;
934 do { 1087 do {
935 reg = readl(dsi->reg_base + DSIM_RXFIFO_REG); 1088 reg = DSI_READ(dsi, DSIM_RXFIFO_REG);
936 if (reg == DSI_RX_FIFO_EMPTY) 1089 if (reg == DSI_RX_FIFO_EMPTY)
937 break; 1090 break;
938 } while (--length); 1091 } while (--length);
@@ -1088,23 +1241,26 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
1088 struct exynos_dsi *dsi = dev_id; 1241 struct exynos_dsi *dsi = dev_id;
1089 u32 status; 1242 u32 status;
1090 1243
1091 status = readl(dsi->reg_base + DSIM_INTSRC_REG); 1244 status = DSI_READ(dsi, DSIM_INTSRC_REG);
1092 if (!status) { 1245 if (!status) {
1093 static unsigned long int j; 1246 static unsigned long int j;
1094 if (printk_timed_ratelimit(&j, 500)) 1247 if (printk_timed_ratelimit(&j, 500))
1095 dev_warn(dsi->dev, "spurious interrupt\n"); 1248 dev_warn(dsi->dev, "spurious interrupt\n");
1096 return IRQ_HANDLED; 1249 return IRQ_HANDLED;
1097 } 1250 }
1098 writel(status, dsi->reg_base + DSIM_INTSRC_REG); 1251 DSI_WRITE(dsi, DSIM_INTSRC_REG, status);
1099 1252
1100 if (status & DSIM_INT_SW_RST_RELEASE) { 1253 if (status & DSIM_INT_SW_RST_RELEASE) {
1101 u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY); 1254 u32 mask = ~(DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
1102 writel(mask, dsi->reg_base + DSIM_INTMSK_REG); 1255 DSIM_INT_SFR_HDR_FIFO_EMPTY | DSIM_INT_FRAME_DONE |
1256 DSIM_INT_RX_ECC_ERR | DSIM_INT_SW_RST_RELEASE);
1257 DSI_WRITE(dsi, DSIM_INTMSK_REG, mask);
1103 complete(&dsi->completed); 1258 complete(&dsi->completed);
1104 return IRQ_HANDLED; 1259 return IRQ_HANDLED;
1105 } 1260 }
1106 1261
1107 if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY))) 1262 if (!(status & (DSIM_INT_RX_DONE | DSIM_INT_SFR_FIFO_EMPTY |
1263 DSIM_INT_FRAME_DONE | DSIM_INT_PLL_STABLE)))
1108 return IRQ_HANDLED; 1264 return IRQ_HANDLED;
1109 1265
1110 if (exynos_dsi_transfer_finish(dsi)) 1266 if (exynos_dsi_transfer_finish(dsi))
@@ -1118,7 +1274,7 @@ static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
1118 struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id; 1274 struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
1119 struct drm_encoder *encoder = dsi->display.encoder; 1275 struct drm_encoder *encoder = dsi->display.encoder;
1120 1276
1121 if (dsi->state & DSIM_STATE_ENABLED) 1277 if (dsi->state & DSIM_STATE_VIDOUT_AVAILABLE)
1122 exynos_drm_crtc_te_handler(encoder->crtc); 1278 exynos_drm_crtc_te_handler(encoder->crtc);
1123 1279
1124 return IRQ_HANDLED; 1280 return IRQ_HANDLED;
@@ -1142,10 +1298,17 @@ static void exynos_dsi_disable_irq(struct exynos_dsi *dsi)
1142 1298
1143static int exynos_dsi_init(struct exynos_dsi *dsi) 1299static int exynos_dsi_init(struct exynos_dsi *dsi)
1144{ 1300{
1301 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1302
1145 exynos_dsi_reset(dsi); 1303 exynos_dsi_reset(dsi);
1146 exynos_dsi_enable_irq(dsi); 1304 exynos_dsi_enable_irq(dsi);
1305
1306 if (driver_data->reg_values[RESET_TYPE] == DSIM_FUNCRST)
1307 exynos_dsi_enable_lane(dsi, BIT(dsi->lanes) - 1);
1308
1147 exynos_dsi_enable_clock(dsi); 1309 exynos_dsi_enable_clock(dsi);
1148 exynos_dsi_wait_for_reset(dsi); 1310 if (driver_data->wait_for_reset)
1311 exynos_dsi_wait_for_reset(dsi);
1149 exynos_dsi_set_phy_ctrl(dsi); 1312 exynos_dsi_set_phy_ctrl(dsi);
1150 exynos_dsi_init_link(dsi); 1313 exynos_dsi_init_link(dsi);
1151 1314
@@ -1164,15 +1327,15 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
1164 goto out; 1327 goto out;
1165 } 1328 }
1166 1329
1167 ret = gpio_request_one(dsi->te_gpio, GPIOF_IN, "te_gpio"); 1330 ret = gpio_request(dsi->te_gpio, "te_gpio");
1168 if (ret) { 1331 if (ret) {
1169 dev_err(dsi->dev, "gpio request failed with %d\n", ret); 1332 dev_err(dsi->dev, "gpio request failed with %d\n", ret);
1170 goto out; 1333 goto out;
1171 } 1334 }
1172 1335
1173 te_gpio_irq = gpio_to_irq(dsi->te_gpio); 1336 te_gpio_irq = gpio_to_irq(dsi->te_gpio);
1174
1175 irq_set_status_flags(te_gpio_irq, IRQ_NOAUTOEN); 1337 irq_set_status_flags(te_gpio_irq, IRQ_NOAUTOEN);
1338
1176 ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL, 1339 ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
1177 IRQF_TRIGGER_RISING, "TE", dsi); 1340 IRQF_TRIGGER_RISING, "TE", dsi);
1178 if (ret) { 1341 if (ret) {
@@ -1251,6 +1414,9 @@ static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
1251 struct exynos_dsi_transfer xfer; 1414 struct exynos_dsi_transfer xfer;
1252 int ret; 1415 int ret;
1253 1416
1417 if (!(dsi->state & DSIM_STATE_ENABLED))
1418 return -EINVAL;
1419
1254 if (!(dsi->state & DSIM_STATE_INITIALIZED)) { 1420 if (!(dsi->state & DSIM_STATE_INITIALIZED)) {
1255 ret = exynos_dsi_init(dsi); 1421 ret = exynos_dsi_init(dsi);
1256 if (ret) 1422 if (ret)
@@ -1294,7 +1460,8 @@ static const struct mipi_dsi_host_ops exynos_dsi_ops = {
1294 1460
1295static int exynos_dsi_poweron(struct exynos_dsi *dsi) 1461static int exynos_dsi_poweron(struct exynos_dsi *dsi)
1296{ 1462{
1297 int ret; 1463 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1464 int ret, i;
1298 1465
1299 ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies); 1466 ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1300 if (ret < 0) { 1467 if (ret < 0) {
@@ -1302,31 +1469,23 @@ static int exynos_dsi_poweron(struct exynos_dsi *dsi)
1302 return ret; 1469 return ret;
1303 } 1470 }
1304 1471
1305 ret = clk_prepare_enable(dsi->bus_clk); 1472 for (i = 0; i < driver_data->num_clks; i++) {
1306 if (ret < 0) { 1473 ret = clk_prepare_enable(dsi->clks[i]);
1307 dev_err(dsi->dev, "cannot enable bus clock %d\n", ret); 1474 if (ret < 0)
1308 goto err_bus_clk; 1475 goto err_clk;
1309 }
1310
1311 ret = clk_prepare_enable(dsi->pll_clk);
1312 if (ret < 0) {
1313 dev_err(dsi->dev, "cannot enable pll clock %d\n", ret);
1314 goto err_pll_clk;
1315 } 1476 }
1316 1477
1317 ret = phy_power_on(dsi->phy); 1478 ret = phy_power_on(dsi->phy);
1318 if (ret < 0) { 1479 if (ret < 0) {
1319 dev_err(dsi->dev, "cannot enable phy %d\n", ret); 1480 dev_err(dsi->dev, "cannot enable phy %d\n", ret);
1320 goto err_phy; 1481 goto err_clk;
1321 } 1482 }
1322 1483
1323 return 0; 1484 return 0;
1324 1485
1325err_phy: 1486err_clk:
1326 clk_disable_unprepare(dsi->pll_clk); 1487 while (--i > -1)
1327err_pll_clk: 1488 clk_disable_unprepare(dsi->clks[i]);
1328 clk_disable_unprepare(dsi->bus_clk);
1329err_bus_clk:
1330 regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); 1489 regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1331 1490
1332 return ret; 1491 return ret;
@@ -1334,7 +1493,8 @@ err_bus_clk:
1334 1493
1335static void exynos_dsi_poweroff(struct exynos_dsi *dsi) 1494static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
1336{ 1495{
1337 int ret; 1496 struct exynos_dsi_driver_data *driver_data = dsi->driver_data;
1497 int ret, i;
1338 1498
1339 usleep_range(10000, 20000); 1499 usleep_range(10000, 20000);
1340 1500
@@ -1350,8 +1510,8 @@ static void exynos_dsi_poweroff(struct exynos_dsi *dsi)
1350 1510
1351 phy_power_off(dsi->phy); 1511 phy_power_off(dsi->phy);
1352 1512
1353 clk_disable_unprepare(dsi->pll_clk); 1513 for (i = driver_data->num_clks - 1; i > -1; i--)
1354 clk_disable_unprepare(dsi->bus_clk); 1514 clk_disable_unprepare(dsi->clks[i]);
1355 1515
1356 ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies); 1516 ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
1357 if (ret < 0) 1517 if (ret < 0)
@@ -1369,8 +1529,11 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
1369 if (ret < 0) 1529 if (ret < 0)
1370 return ret; 1530 return ret;
1371 1531
1532 dsi->state |= DSIM_STATE_ENABLED;
1533
1372 ret = drm_panel_prepare(dsi->panel); 1534 ret = drm_panel_prepare(dsi->panel);
1373 if (ret < 0) { 1535 if (ret < 0) {
1536 dsi->state &= ~DSIM_STATE_ENABLED;
1374 exynos_dsi_poweroff(dsi); 1537 exynos_dsi_poweroff(dsi);
1375 return ret; 1538 return ret;
1376 } 1539 }
@@ -1378,8 +1541,6 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
1378 exynos_dsi_set_display_mode(dsi); 1541 exynos_dsi_set_display_mode(dsi);
1379 exynos_dsi_set_display_enable(dsi, true); 1542 exynos_dsi_set_display_enable(dsi, true);
1380 1543
1381 dsi->state |= DSIM_STATE_ENABLED;
1382
1383 ret = drm_panel_enable(dsi->panel); 1544 ret = drm_panel_enable(dsi->panel);
1384 if (ret < 0) { 1545 if (ret < 0) {
1385 dsi->state &= ~DSIM_STATE_ENABLED; 1546 dsi->state &= ~DSIM_STATE_ENABLED;
@@ -1389,6 +1550,8 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
1389 return ret; 1550 return ret;
1390 } 1551 }
1391 1552
1553 dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
1554
1392 return 0; 1555 return 0;
1393} 1556}
1394 1557
@@ -1397,12 +1560,15 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
1397 if (!(dsi->state & DSIM_STATE_ENABLED)) 1560 if (!(dsi->state & DSIM_STATE_ENABLED))
1398 return; 1561 return;
1399 1562
1563 dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
1564
1400 drm_panel_disable(dsi->panel); 1565 drm_panel_disable(dsi->panel);
1401 exynos_dsi_set_display_enable(dsi, false); 1566 exynos_dsi_set_display_enable(dsi, false);
1402 drm_panel_unprepare(dsi->panel); 1567 drm_panel_unprepare(dsi->panel);
1403 exynos_dsi_poweroff(dsi);
1404 1568
1405 dsi->state &= ~DSIM_STATE_ENABLED; 1569 dsi->state &= ~DSIM_STATE_ENABLED;
1570
1571 exynos_dsi_poweroff(dsi);
1406} 1572}
1407 1573
1408static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode) 1574static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
@@ -1457,10 +1623,13 @@ static void exynos_dsi_connector_destroy(struct drm_connector *connector)
1457} 1623}
1458 1624
1459static struct drm_connector_funcs exynos_dsi_connector_funcs = { 1625static struct drm_connector_funcs exynos_dsi_connector_funcs = {
1460 .dpms = drm_helper_connector_dpms, 1626 .dpms = drm_atomic_helper_connector_dpms,
1461 .detect = exynos_dsi_detect, 1627 .detect = exynos_dsi_detect,
1462 .fill_modes = drm_helper_probe_single_connector_modes, 1628 .fill_modes = drm_helper_probe_single_connector_modes,
1463 .destroy = exynos_dsi_connector_destroy, 1629 .destroy = exynos_dsi_connector_destroy,
1630 .reset = drm_atomic_helper_connector_reset,
1631 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1632 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1464}; 1633};
1465 1634
1466static int exynos_dsi_get_modes(struct drm_connector *connector) 1635static int exynos_dsi_get_modes(struct drm_connector *connector)
@@ -1627,7 +1796,22 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi)
1627 1796
1628 ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency", 1797 ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency",
1629 &dsi->esc_clk_rate); 1798 &dsi->esc_clk_rate);
1799 if (ret < 0)
1800 goto end;
1801
1802 of_node_put(ep);
1803
1804 ep = of_graph_get_next_endpoint(node, NULL);
1805 if (!ep) {
1806 ret = -ENXIO;
1807 goto end;
1808 }
1630 1809
1810 dsi->bridge_node = of_graph_get_remote_port_parent(ep);
1811 if (!dsi->bridge_node) {
1812 ret = -ENXIO;
1813 goto end;
1814 }
1631end: 1815end:
1632 of_node_put(ep); 1816 of_node_put(ep);
1633 1817
@@ -1640,6 +1824,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1640 struct exynos_drm_display *display = dev_get_drvdata(dev); 1824 struct exynos_drm_display *display = dev_get_drvdata(dev);
1641 struct exynos_dsi *dsi = display_to_dsi(display); 1825 struct exynos_dsi *dsi = display_to_dsi(display);
1642 struct drm_device *drm_dev = data; 1826 struct drm_device *drm_dev = data;
1827 struct drm_bridge *bridge;
1643 int ret; 1828 int ret;
1644 1829
1645 ret = exynos_drm_create_enc_conn(drm_dev, display); 1830 ret = exynos_drm_create_enc_conn(drm_dev, display);
@@ -1649,6 +1834,12 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
1649 return ret; 1834 return ret;
1650 } 1835 }
1651 1836
1837 bridge = of_drm_find_bridge(dsi->bridge_node);
1838 if (bridge) {
1839 display->encoder->bridge = bridge;
1840 drm_bridge_attach(drm_dev, bridge);
1841 }
1842
1652 return mipi_dsi_host_register(&dsi->dsi_host); 1843 return mipi_dsi_host_register(&dsi->dsi_host);
1653} 1844}
1654 1845
@@ -1673,7 +1864,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1673 struct device *dev = &pdev->dev; 1864 struct device *dev = &pdev->dev;
1674 struct resource *res; 1865 struct resource *res;
1675 struct exynos_dsi *dsi; 1866 struct exynos_dsi *dsi;
1676 int ret; 1867 int ret, i;
1677 1868
1678 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); 1869 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
1679 if (!dsi) 1870 if (!dsi)
@@ -1682,11 +1873,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1682 dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD; 1873 dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
1683 dsi->display.ops = &exynos_dsi_display_ops; 1874 dsi->display.ops = &exynos_dsi_display_ops;
1684 1875
1685 ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
1686 dsi->display.type);
1687 if (ret)
1688 return ret;
1689
1690 /* To be checked as invalid one */ 1876 /* To be checked as invalid one */
1691 dsi->te_gpio = -ENOENT; 1877 dsi->te_gpio = -ENOENT;
1692 1878
@@ -1702,7 +1888,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1702 1888
1703 ret = exynos_dsi_parse_dt(dsi); 1889 ret = exynos_dsi_parse_dt(dsi);
1704 if (ret) 1890 if (ret)
1705 goto err_del_component; 1891 return ret;
1706 1892
1707 dsi->supplies[0].supply = "vddcore"; 1893 dsi->supplies[0].supply = "vddcore";
1708 dsi->supplies[1].supply = "vddio"; 1894 dsi->supplies[1].supply = "vddio";
@@ -1713,40 +1899,44 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1713 return -EPROBE_DEFER; 1899 return -EPROBE_DEFER;
1714 } 1900 }
1715 1901
1716 dsi->pll_clk = devm_clk_get(dev, "pll_clk"); 1902 dsi->clks = devm_kzalloc(dev,
1717 if (IS_ERR(dsi->pll_clk)) { 1903 sizeof(*dsi->clks) * dsi->driver_data->num_clks,
1718 dev_info(dev, "failed to get dsi pll input clock\n"); 1904 GFP_KERNEL);
1719 ret = PTR_ERR(dsi->pll_clk); 1905 if (!dsi->clks)
1720 goto err_del_component; 1906 return -ENOMEM;
1721 } 1907
1908 for (i = 0; i < dsi->driver_data->num_clks; i++) {
1909 dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
1910 if (IS_ERR(dsi->clks[i])) {
1911 if (strcmp(clk_names[i], "sclk_mipi") == 0) {
1912 strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
1913 i--;
1914 continue;
1915 }
1722 1916
1723 dsi->bus_clk = devm_clk_get(dev, "bus_clk"); 1917 dev_info(dev, "failed to get the clock: %s\n",
1724 if (IS_ERR(dsi->bus_clk)) { 1918 clk_names[i]);
1725 dev_info(dev, "failed to get dsi bus clock\n"); 1919 return PTR_ERR(dsi->clks[i]);
1726 ret = PTR_ERR(dsi->bus_clk); 1920 }
1727 goto err_del_component;
1728 } 1921 }
1729 1922
1730 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1923 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1731 dsi->reg_base = devm_ioremap_resource(dev, res); 1924 dsi->reg_base = devm_ioremap_resource(dev, res);
1732 if (IS_ERR(dsi->reg_base)) { 1925 if (IS_ERR(dsi->reg_base)) {
1733 dev_err(dev, "failed to remap io region\n"); 1926 dev_err(dev, "failed to remap io region\n");
1734 ret = PTR_ERR(dsi->reg_base); 1927 return PTR_ERR(dsi->reg_base);
1735 goto err_del_component;
1736 } 1928 }
1737 1929
1738 dsi->phy = devm_phy_get(dev, "dsim"); 1930 dsi->phy = devm_phy_get(dev, "dsim");
1739 if (IS_ERR(dsi->phy)) { 1931 if (IS_ERR(dsi->phy)) {
1740 dev_info(dev, "failed to get dsim phy\n"); 1932 dev_info(dev, "failed to get dsim phy\n");
1741 ret = PTR_ERR(dsi->phy); 1933 return PTR_ERR(dsi->phy);
1742 goto err_del_component;
1743 } 1934 }
1744 1935
1745 dsi->irq = platform_get_irq(pdev, 0); 1936 dsi->irq = platform_get_irq(pdev, 0);
1746 if (dsi->irq < 0) { 1937 if (dsi->irq < 0) {
1747 dev_err(dev, "failed to request dsi irq resource\n"); 1938 dev_err(dev, "failed to request dsi irq resource\n");
1748 ret = dsi->irq; 1939 return dsi->irq;
1749 goto err_del_component;
1750 } 1940 }
1751 1941
1752 irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN); 1942 irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
@@ -1755,26 +1945,17 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1755 dev_name(dev), dsi); 1945 dev_name(dev), dsi);
1756 if (ret) { 1946 if (ret) {
1757 dev_err(dev, "failed to request dsi irq\n"); 1947 dev_err(dev, "failed to request dsi irq\n");
1758 goto err_del_component; 1948 return ret;
1759 } 1949 }
1760 1950
1761 platform_set_drvdata(pdev, &dsi->display); 1951 platform_set_drvdata(pdev, &dsi->display);
1762 1952
1763 ret = component_add(dev, &exynos_dsi_component_ops); 1953 return component_add(dev, &exynos_dsi_component_ops);
1764 if (ret)
1765 goto err_del_component;
1766
1767 return ret;
1768
1769err_del_component:
1770 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
1771 return ret;
1772} 1954}
1773 1955
1774static int exynos_dsi_remove(struct platform_device *pdev) 1956static int exynos_dsi_remove(struct platform_device *pdev)
1775{ 1957{
1776 component_del(&pdev->dev, &exynos_dsi_component_ops); 1958 component_del(&pdev->dev, &exynos_dsi_component_ops);
1777 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
1778 1959
1779 return 0; 1960 return 0;
1780} 1961}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 57de0bdc5a3b..7b89fd520e45 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -32,17 +32,6 @@ struct exynos_drm_encoder {
32 struct exynos_drm_display *display; 32 struct exynos_drm_display *display;
33}; 33};
34 34
35static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
36{
37 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
38 struct exynos_drm_display *display = exynos_encoder->display;
39
40 DRM_DEBUG_KMS("encoder dpms: %d\n", mode);
41
42 if (display->ops->dpms)
43 display->ops->dpms(display, mode);
44}
45
46static bool 35static bool
47exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder, 36exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
48 const struct drm_display_mode *mode, 37 const struct drm_display_mode *mode,
@@ -76,12 +65,7 @@ static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
76 display->ops->mode_set(display, adjusted_mode); 65 display->ops->mode_set(display, adjusted_mode);
77} 66}
78 67
79static void exynos_drm_encoder_prepare(struct drm_encoder *encoder) 68static void exynos_drm_encoder_enable(struct drm_encoder *encoder)
80{
81 /* drm framework doesn't check NULL. */
82}
83
84static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
85{ 69{
86 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder); 70 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
87 struct exynos_drm_display *display = exynos_encoder->display; 71 struct exynos_drm_display *display = exynos_encoder->display;
@@ -95,24 +79,17 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
95 79
96static void exynos_drm_encoder_disable(struct drm_encoder *encoder) 80static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
97{ 81{
98 struct drm_plane *plane; 82 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
99 struct drm_device *dev = encoder->dev; 83 struct exynos_drm_display *display = exynos_encoder->display;
100
101 exynos_drm_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
102 84
103 /* all planes connected to this encoder should be also disabled. */ 85 if (display->ops->dpms)
104 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 86 display->ops->dpms(display, DRM_MODE_DPMS_OFF);
105 if (plane->crtc && (plane->crtc == encoder->crtc))
106 plane->funcs->disable_plane(plane);
107 }
108} 87}
109 88
110static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { 89static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
111 .dpms = exynos_drm_encoder_dpms,
112 .mode_fixup = exynos_drm_encoder_mode_fixup, 90 .mode_fixup = exynos_drm_encoder_mode_fixup,
113 .mode_set = exynos_drm_encoder_mode_set, 91 .mode_set = exynos_drm_encoder_mode_set,
114 .prepare = exynos_drm_encoder_prepare, 92 .enable = exynos_drm_encoder_enable,
115 .commit = exynos_drm_encoder_commit,
116 .disable = exynos_drm_encoder_disable, 93 .disable = exynos_drm_encoder_disable,
117}; 94};
118 95
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 142eb4e3f59e..2b6320e6eae2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -16,6 +16,8 @@
16#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
17#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_fb_helper.h> 18#include <drm/drm_fb_helper.h>
19#include <drm/drm_atomic.h>
20#include <drm/drm_atomic_helper.h>
19#include <uapi/drm/exynos_drm.h> 21#include <uapi/drm/exynos_drm.h>
20 22
21#include "exynos_drm_drv.h" 23#include "exynos_drm_drv.h"
@@ -265,9 +267,46 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
265 exynos_drm_fbdev_init(dev); 267 exynos_drm_fbdev_init(dev);
266} 268}
267 269
270static int exynos_atomic_commit(struct drm_device *dev,
271 struct drm_atomic_state *state,
272 bool async)
273{
274 int ret;
275
276 ret = drm_atomic_helper_prepare_planes(dev, state);
277 if (ret)
278 return ret;
279
280 /* This is the point of no return */
281
282 drm_atomic_helper_swap_state(dev, state);
283
284 drm_atomic_helper_commit_modeset_disables(dev, state);
285
286 drm_atomic_helper_commit_modeset_enables(dev, state);
287
288 /*
289 * Exynos can't update planes with CRTCs and encoders disabled,
290 * its updates routines, specially for FIMD, requires the clocks
291 * to be enabled. So it is necessary to handle the modeset operations
292 * *before* the commit_planes() step, this way it will always
293 * have the relevant clocks enabled to perform the update.
294 */
295
296 drm_atomic_helper_commit_planes(dev, state);
297
298 drm_atomic_helper_cleanup_planes(dev, state);
299
300 drm_atomic_state_free(state);
301
302 return 0;
303}
304
268static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 305static const struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
269 .fb_create = exynos_user_fb_create, 306 .fb_create = exynos_user_fb_create,
270 .output_poll_changed = exynos_drm_output_poll_changed, 307 .output_poll_changed = exynos_drm_output_poll_changed,
308 .atomic_check = drm_atomic_helper_check,
309 .atomic_commit = exynos_atomic_commit,
271}; 310};
272 311
273void exynos_drm_mode_config_init(struct drm_device *dev) 312void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e71e331f0188..e0b085b4bdfa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -275,9 +275,6 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
275 275
276 } 276 }
277 277
278 /* disable all the possible outputs/crtcs before entering KMS mode */
279 drm_helper_disable_unused_functions(dev);
280
281 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); 278 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
282 if (ret < 0) { 279 if (ret < 0) {
283 DRM_ERROR("failed to set up hw configuration.\n"); 280 DRM_ERROR("failed to set up hw configuration.\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index a0edab833148..794e56c8798e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -196,6 +196,62 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
196 return (struct fimd_driver_data *)of_id->data; 196 return (struct fimd_driver_data *)of_id->data;
197} 197}
198 198
199static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
200{
201 struct fimd_context *ctx = crtc->ctx;
202 u32 val;
203
204 if (ctx->suspended)
205 return -EPERM;
206
207 if (!test_and_set_bit(0, &ctx->irq_flags)) {
208 val = readl(ctx->regs + VIDINTCON0);
209
210 val |= VIDINTCON0_INT_ENABLE;
211
212 if (ctx->i80_if) {
213 val |= VIDINTCON0_INT_I80IFDONE;
214 val |= VIDINTCON0_INT_SYSMAINCON;
215 val &= ~VIDINTCON0_INT_SYSSUBCON;
216 } else {
217 val |= VIDINTCON0_INT_FRAME;
218
219 val &= ~VIDINTCON0_FRAMESEL0_MASK;
220 val |= VIDINTCON0_FRAMESEL0_VSYNC;
221 val &= ~VIDINTCON0_FRAMESEL1_MASK;
222 val |= VIDINTCON0_FRAMESEL1_NONE;
223 }
224
225 writel(val, ctx->regs + VIDINTCON0);
226 }
227
228 return 0;
229}
230
231static void fimd_disable_vblank(struct exynos_drm_crtc *crtc)
232{
233 struct fimd_context *ctx = crtc->ctx;
234 u32 val;
235
236 if (ctx->suspended)
237 return;
238
239 if (test_and_clear_bit(0, &ctx->irq_flags)) {
240 val = readl(ctx->regs + VIDINTCON0);
241
242 val &= ~VIDINTCON0_INT_ENABLE;
243
244 if (ctx->i80_if) {
245 val &= ~VIDINTCON0_INT_I80IFDONE;
246 val &= ~VIDINTCON0_INT_SYSMAINCON;
247 val &= ~VIDINTCON0_INT_SYSSUBCON;
248 } else
249 val &= ~VIDINTCON0_INT_FRAME;
250
251 writel(val, ctx->regs + VIDINTCON0);
252 }
253}
254
199static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc) 255static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
200{ 256{
201 struct fimd_context *ctx = crtc->ctx; 257 struct fimd_context *ctx = crtc->ctx;
@@ -242,12 +298,19 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx,
242 writel(val, ctx->regs + SHADOWCON); 298 writel(val, ctx->regs + SHADOWCON);
243} 299}
244 300
245static void fimd_clear_channel(struct fimd_context *ctx) 301static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
246{ 302{
303 struct fimd_context *ctx = crtc->ctx;
247 unsigned int win, ch_enabled = 0; 304 unsigned int win, ch_enabled = 0;
248 305
249 DRM_DEBUG_KMS("%s\n", __FILE__); 306 DRM_DEBUG_KMS("%s\n", __FILE__);
250 307
308 /* Hardware is in unknown state, so ensure it gets enabled properly */
309 pm_runtime_get_sync(ctx->dev);
310
311 clk_prepare_enable(ctx->bus_clk);
312 clk_prepare_enable(ctx->lcd_clk);
313
251 /* Check if any channel is enabled. */ 314 /* Check if any channel is enabled. */
252 for (win = 0; win < WINDOWS_NR; win++) { 315 for (win = 0; win < WINDOWS_NR; win++) {
253 u32 val = readl(ctx->regs + WINCON(win)); 316 u32 val = readl(ctx->regs + WINCON(win));
@@ -265,36 +328,24 @@ static void fimd_clear_channel(struct fimd_context *ctx)
265 328
266 /* Wait for vsync, as disable channel takes effect at next vsync */ 329 /* Wait for vsync, as disable channel takes effect at next vsync */
267 if (ch_enabled) { 330 if (ch_enabled) {
268 unsigned int state = ctx->suspended; 331 int pipe = ctx->pipe;
269 332
270 ctx->suspended = 0; 333 /* ensure that vblank interrupt won't be reported to core */
271 fimd_wait_for_vblank(ctx->crtc); 334 ctx->suspended = false;
272 ctx->suspended = state; 335 ctx->pipe = -1;
273 }
274}
275 336
276static int fimd_iommu_attach_devices(struct fimd_context *ctx, 337 fimd_enable_vblank(ctx->crtc);
277 struct drm_device *drm_dev) 338 fimd_wait_for_vblank(ctx->crtc);
278{ 339 fimd_disable_vblank(ctx->crtc);
279
280 /* attach this sub driver to iommu mapping if supported. */
281 if (is_drm_iommu_supported(ctx->drm_dev)) {
282 int ret;
283
284 /*
285 * If any channel is already active, iommu will throw
286 * a PAGE FAULT when enabled. So clear any channel if enabled.
287 */
288 fimd_clear_channel(ctx);
289 ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
290 if (ret) {
291 DRM_ERROR("drm_iommu_attach failed.\n");
292 return ret;
293 }
294 340
341 ctx->suspended = true;
342 ctx->pipe = pipe;
295 } 343 }
296 344
297 return 0; 345 clk_disable_unprepare(ctx->lcd_clk);
346 clk_disable_unprepare(ctx->bus_clk);
347
348 pm_runtime_put(ctx->dev);
298} 349}
299 350
300static void fimd_iommu_detach_devices(struct fimd_context *ctx) 351static void fimd_iommu_detach_devices(struct fimd_context *ctx)
@@ -337,7 +388,7 @@ static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
337static void fimd_commit(struct exynos_drm_crtc *crtc) 388static void fimd_commit(struct exynos_drm_crtc *crtc)
338{ 389{
339 struct fimd_context *ctx = crtc->ctx; 390 struct fimd_context *ctx = crtc->ctx;
340 struct drm_display_mode *mode = &crtc->base.mode; 391 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
341 struct fimd_driver_data *driver_data = ctx->driver_data; 392 struct fimd_driver_data *driver_data = ctx->driver_data;
342 void *timing_base = ctx->regs + driver_data->timing_base; 393 void *timing_base = ctx->regs + driver_data->timing_base;
343 u32 val, clkdiv; 394 u32 val, clkdiv;
@@ -434,61 +485,6 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
434 writel(val, ctx->regs + VIDCON0); 485 writel(val, ctx->regs + VIDCON0);
435} 486}
436 487
437static int fimd_enable_vblank(struct exynos_drm_crtc *crtc)
438{
439 struct fimd_context *ctx = crtc->ctx;
440 u32 val;
441
442 if (ctx->suspended)
443 return -EPERM;
444
445 if (!test_and_set_bit(0, &ctx->irq_flags)) {
446 val = readl(ctx->regs + VIDINTCON0);
447
448 val |= VIDINTCON0_INT_ENABLE;
449
450 if (ctx->i80_if) {
451 val |= VIDINTCON0_INT_I80IFDONE;
452 val |= VIDINTCON0_INT_SYSMAINCON;
453 val &= ~VIDINTCON0_INT_SYSSUBCON;
454 } else {
455 val |= VIDINTCON0_INT_FRAME;
456
457 val &= ~VIDINTCON0_FRAMESEL0_MASK;
458 val |= VIDINTCON0_FRAMESEL0_VSYNC;
459 val &= ~VIDINTCON0_FRAMESEL1_MASK;
460 val |= VIDINTCON0_FRAMESEL1_NONE;
461 }
462
463 writel(val, ctx->regs + VIDINTCON0);
464 }
465
466 return 0;
467}
468
469static void fimd_disable_vblank(struct exynos_drm_crtc *crtc)
470{
471 struct fimd_context *ctx = crtc->ctx;
472 u32 val;
473
474 if (ctx->suspended)
475 return;
476
477 if (test_and_clear_bit(0, &ctx->irq_flags)) {
478 val = readl(ctx->regs + VIDINTCON0);
479
480 val &= ~VIDINTCON0_INT_ENABLE;
481
482 if (ctx->i80_if) {
483 val &= ~VIDINTCON0_INT_I80IFDONE;
484 val &= ~VIDINTCON0_INT_SYSMAINCON;
485 val &= ~VIDINTCON0_INT_SYSSUBCON;
486 } else
487 val &= ~VIDINTCON0_INT_FRAME;
488
489 writel(val, ctx->regs + VIDINTCON0);
490 }
491}
492 488
493static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win) 489static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win)
494{ 490{
@@ -634,11 +630,8 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
634 630
635 plane = &ctx->planes[win]; 631 plane = &ctx->planes[win];
636 632
637 /* If suspended, enable this on resume */ 633 if (ctx->suspended)
638 if (ctx->suspended) {
639 plane->resume = true;
640 return; 634 return;
641 }
642 635
643 /* 636 /*
644 * SHADOWCON/PRTCON register is used for enabling timing. 637 * SHADOWCON/PRTCON register is used for enabling timing.
@@ -728,8 +721,6 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
728 /* Enable DMA channel and unprotect windows */ 721 /* Enable DMA channel and unprotect windows */
729 fimd_shadow_protect_win(ctx, win, false); 722 fimd_shadow_protect_win(ctx, win, false);
730 723
731 plane->enabled = true;
732
733 if (ctx->i80_if) 724 if (ctx->i80_if)
734 atomic_set(&ctx->win_updated, 1); 725 atomic_set(&ctx->win_updated, 1);
735} 726}
@@ -744,11 +735,8 @@ static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
744 735
745 plane = &ctx->planes[win]; 736 plane = &ctx->planes[win];
746 737
747 if (ctx->suspended) { 738 if (ctx->suspended)
748 /* do not resume this window*/
749 plane->resume = false;
750 return; 739 return;
751 }
752 740
753 /* protect windows */ 741 /* protect windows */
754 fimd_shadow_protect_win(ctx, win, true); 742 fimd_shadow_protect_win(ctx, win, true);
@@ -760,57 +748,15 @@ static void fimd_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
760 748
761 /* unprotect windows */ 749 /* unprotect windows */
762 fimd_shadow_protect_win(ctx, win, false); 750 fimd_shadow_protect_win(ctx, win, false);
763
764 plane->enabled = false;
765}
766
767static void fimd_window_suspend(struct fimd_context *ctx)
768{
769 struct exynos_drm_plane *plane;
770 int i;
771
772 for (i = 0; i < WINDOWS_NR; i++) {
773 plane = &ctx->planes[i];
774 plane->resume = plane->enabled;
775 if (plane->enabled)
776 fimd_win_disable(ctx->crtc, i);
777 }
778}
779
780static void fimd_window_resume(struct fimd_context *ctx)
781{
782 struct exynos_drm_plane *plane;
783 int i;
784
785 for (i = 0; i < WINDOWS_NR; i++) {
786 plane = &ctx->planes[i];
787 plane->enabled = plane->resume;
788 plane->resume = false;
789 }
790} 751}
791 752
792static void fimd_apply(struct fimd_context *ctx) 753static void fimd_enable(struct exynos_drm_crtc *crtc)
793{
794 struct exynos_drm_plane *plane;
795 int i;
796
797 for (i = 0; i < WINDOWS_NR; i++) {
798 plane = &ctx->planes[i];
799 if (plane->enabled)
800 fimd_win_commit(ctx->crtc, i);
801 else
802 fimd_win_disable(ctx->crtc, i);
803 }
804
805 fimd_commit(ctx->crtc);
806}
807
808static int fimd_poweron(struct fimd_context *ctx)
809{ 754{
755 struct fimd_context *ctx = crtc->ctx;
810 int ret; 756 int ret;
811 757
812 if (!ctx->suspended) 758 if (!ctx->suspended)
813 return 0; 759 return;
814 760
815 ctx->suspended = false; 761 ctx->suspended = false;
816 762
@@ -819,50 +765,43 @@ static int fimd_poweron(struct fimd_context *ctx)
819 ret = clk_prepare_enable(ctx->bus_clk); 765 ret = clk_prepare_enable(ctx->bus_clk);
820 if (ret < 0) { 766 if (ret < 0) {
821 DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret); 767 DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
822 goto bus_clk_err; 768 return;
823 } 769 }
824 770
825 ret = clk_prepare_enable(ctx->lcd_clk); 771 ret = clk_prepare_enable(ctx->lcd_clk);
826 if (ret < 0) { 772 if (ret < 0) {
827 DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret); 773 DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
828 goto lcd_clk_err; 774 return;
829 } 775 }
830 776
831 /* if vblank was enabled status, enable it again. */ 777 /* if vblank was enabled status, enable it again. */
832 if (test_and_clear_bit(0, &ctx->irq_flags)) { 778 if (test_and_clear_bit(0, &ctx->irq_flags))
833 ret = fimd_enable_vblank(ctx->crtc); 779 fimd_enable_vblank(ctx->crtc);
834 if (ret) {
835 DRM_ERROR("Failed to re-enable vblank [%d]\n", ret);
836 goto enable_vblank_err;
837 }
838 }
839
840 fimd_window_resume(ctx);
841
842 fimd_apply(ctx);
843 780
844 return 0; 781 fimd_commit(ctx->crtc);
845
846enable_vblank_err:
847 clk_disable_unprepare(ctx->lcd_clk);
848lcd_clk_err:
849 clk_disable_unprepare(ctx->bus_clk);
850bus_clk_err:
851 ctx->suspended = true;
852 return ret;
853} 782}
854 783
855static int fimd_poweroff(struct fimd_context *ctx) 784static void fimd_disable(struct exynos_drm_crtc *crtc)
856{ 785{
786 struct fimd_context *ctx = crtc->ctx;
787 int i;
788
857 if (ctx->suspended) 789 if (ctx->suspended)
858 return 0; 790 return;
859 791
860 /* 792 /*
861 * We need to make sure that all windows are disabled before we 793 * We need to make sure that all windows are disabled before we
862 * suspend that connector. Otherwise we might try to scan from 794 * suspend that connector. Otherwise we might try to scan from
863 * a destroyed buffer later. 795 * a destroyed buffer later.
864 */ 796 */
865 fimd_window_suspend(ctx); 797 for (i = 0; i < WINDOWS_NR; i++)
798 fimd_win_disable(crtc, i);
799
800 fimd_enable_vblank(crtc);
801 fimd_wait_for_vblank(crtc);
802 fimd_disable_vblank(crtc);
803
804 writel(0, ctx->regs + VIDCON0);
866 805
867 clk_disable_unprepare(ctx->lcd_clk); 806 clk_disable_unprepare(ctx->lcd_clk);
868 clk_disable_unprepare(ctx->bus_clk); 807 clk_disable_unprepare(ctx->bus_clk);
@@ -870,26 +809,6 @@ static int fimd_poweroff(struct fimd_context *ctx)
870 pm_runtime_put_sync(ctx->dev); 809 pm_runtime_put_sync(ctx->dev);
871 810
872 ctx->suspended = true; 811 ctx->suspended = true;
873 return 0;
874}
875
876static void fimd_dpms(struct exynos_drm_crtc *crtc, int mode)
877{
878 DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
879
880 switch (mode) {
881 case DRM_MODE_DPMS_ON:
882 fimd_poweron(crtc->ctx);
883 break;
884 case DRM_MODE_DPMS_STANDBY:
885 case DRM_MODE_DPMS_SUSPEND:
886 case DRM_MODE_DPMS_OFF:
887 fimd_poweroff(crtc->ctx);
888 break;
889 default:
890 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
891 break;
892 }
893} 812}
894 813
895static void fimd_trigger(struct device *dev) 814static void fimd_trigger(struct device *dev)
@@ -964,7 +883,8 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
964} 883}
965 884
966static const struct exynos_drm_crtc_ops fimd_crtc_ops = { 885static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
967 .dpms = fimd_dpms, 886 .enable = fimd_enable,
887 .disable = fimd_disable,
968 .mode_fixup = fimd_mode_fixup, 888 .mode_fixup = fimd_mode_fixup,
969 .commit = fimd_commit, 889 .commit = fimd_commit,
970 .enable_vblank = fimd_enable_vblank, 890 .enable_vblank = fimd_enable_vblank,
@@ -974,6 +894,7 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
974 .win_disable = fimd_win_disable, 894 .win_disable = fimd_win_disable,
975 .te_handler = fimd_te_handler, 895 .te_handler = fimd_te_handler,
976 .clock_enable = fimd_dp_clock_enable, 896 .clock_enable = fimd_dp_clock_enable,
897 .clear_channels = fimd_clear_channels,
977}; 898};
978 899
979static irqreturn_t fimd_irq_handler(int irq, void *dev_id) 900static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@@ -1043,7 +964,11 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
1043 if (ctx->display) 964 if (ctx->display)
1044 exynos_drm_create_enc_conn(drm_dev, ctx->display); 965 exynos_drm_create_enc_conn(drm_dev, ctx->display);
1045 966
1046 return fimd_iommu_attach_devices(ctx, drm_dev); 967 ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
968 if (ret)
969 priv->pipe--;
970
971 return ret;
1047} 972}
1048 973
1049static void fimd_unbind(struct device *dev, struct device *master, 974static void fimd_unbind(struct device *dev, struct device *master,
@@ -1051,7 +976,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
1051{ 976{
1052 struct fimd_context *ctx = dev_get_drvdata(dev); 977 struct fimd_context *ctx = dev_get_drvdata(dev);
1053 978
1054 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); 979 fimd_disable(ctx->crtc);
1055 980
1056 fimd_iommu_detach_devices(ctx); 981 fimd_iommu_detach_devices(ctx);
1057 982
@@ -1079,11 +1004,6 @@ static int fimd_probe(struct platform_device *pdev)
1079 if (!ctx) 1004 if (!ctx)
1080 return -ENOMEM; 1005 return -ENOMEM;
1081 1006
1082 ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC,
1083 EXYNOS_DISPLAY_TYPE_LCD);
1084 if (ret)
1085 return ret;
1086
1087 ctx->dev = dev; 1007 ctx->dev = dev;
1088 ctx->suspended = true; 1008 ctx->suspended = true;
1089 ctx->driver_data = drm_fimd_get_driver_data(pdev); 1009 ctx->driver_data = drm_fimd_get_driver_data(pdev);
@@ -1134,38 +1054,33 @@ static int fimd_probe(struct platform_device *pdev)
1134 ctx->bus_clk = devm_clk_get(dev, "fimd"); 1054 ctx->bus_clk = devm_clk_get(dev, "fimd");
1135 if (IS_ERR(ctx->bus_clk)) { 1055 if (IS_ERR(ctx->bus_clk)) {
1136 dev_err(dev, "failed to get bus clock\n"); 1056 dev_err(dev, "failed to get bus clock\n");
1137 ret = PTR_ERR(ctx->bus_clk); 1057 return PTR_ERR(ctx->bus_clk);
1138 goto err_del_component;
1139 } 1058 }
1140 1059
1141 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); 1060 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
1142 if (IS_ERR(ctx->lcd_clk)) { 1061 if (IS_ERR(ctx->lcd_clk)) {
1143 dev_err(dev, "failed to get lcd clock\n"); 1062 dev_err(dev, "failed to get lcd clock\n");
1144 ret = PTR_ERR(ctx->lcd_clk); 1063 return PTR_ERR(ctx->lcd_clk);
1145 goto err_del_component;
1146 } 1064 }
1147 1065
1148 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1066 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1149 1067
1150 ctx->regs = devm_ioremap_resource(dev, res); 1068 ctx->regs = devm_ioremap_resource(dev, res);
1151 if (IS_ERR(ctx->regs)) { 1069 if (IS_ERR(ctx->regs))
1152 ret = PTR_ERR(ctx->regs); 1070 return PTR_ERR(ctx->regs);
1153 goto err_del_component;
1154 }
1155 1071
1156 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, 1072 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
1157 ctx->i80_if ? "lcd_sys" : "vsync"); 1073 ctx->i80_if ? "lcd_sys" : "vsync");
1158 if (!res) { 1074 if (!res) {
1159 dev_err(dev, "irq request failed.\n"); 1075 dev_err(dev, "irq request failed.\n");
1160 ret = -ENXIO; 1076 return -ENXIO;
1161 goto err_del_component;
1162 } 1077 }
1163 1078
1164 ret = devm_request_irq(dev, res->start, fimd_irq_handler, 1079 ret = devm_request_irq(dev, res->start, fimd_irq_handler,
1165 0, "drm_fimd", ctx); 1080 0, "drm_fimd", ctx);
1166 if (ret) { 1081 if (ret) {
1167 dev_err(dev, "irq request failed.\n"); 1082 dev_err(dev, "irq request failed.\n");
1168 goto err_del_component; 1083 return ret;
1169 } 1084 }
1170 1085
1171 init_waitqueue_head(&ctx->wait_vsync_queue); 1086 init_waitqueue_head(&ctx->wait_vsync_queue);
@@ -1175,8 +1090,7 @@ static int fimd_probe(struct platform_device *pdev)
1175 1090
1176 ctx->display = exynos_dpi_probe(dev); 1091 ctx->display = exynos_dpi_probe(dev);
1177 if (IS_ERR(ctx->display)) { 1092 if (IS_ERR(ctx->display)) {
1178 ret = PTR_ERR(ctx->display); 1093 return PTR_ERR(ctx->display);
1179 goto err_del_component;
1180 } 1094 }
1181 1095
1182 pm_runtime_enable(dev); 1096 pm_runtime_enable(dev);
@@ -1190,8 +1104,6 @@ static int fimd_probe(struct platform_device *pdev)
1190err_disable_pm_runtime: 1104err_disable_pm_runtime:
1191 pm_runtime_disable(dev); 1105 pm_runtime_disable(dev);
1192 1106
1193err_del_component:
1194 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CRTC);
1195 return ret; 1107 return ret;
1196} 1108}
1197 1109
@@ -1200,7 +1112,6 @@ static int fimd_remove(struct platform_device *pdev)
1200 pm_runtime_disable(&pdev->dev); 1112 pm_runtime_disable(&pdev->dev);
1201 1113
1202 component_del(&pdev->dev, &fimd_component_ops); 1114 component_del(&pdev->dev, &fimd_component_ops);
1203 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
1204 1115
1205 return 0; 1116 return 0;
1206} 1117}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 308173cb4f0a..6f42e2248288 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -61,7 +61,6 @@ struct exynos_drm_gem_buf {
61 * or at framebuffer creation. 61 * or at framebuffer creation.
62 * @size: size requested from user, in bytes and this size is aligned 62 * @size: size requested from user, in bytes and this size is aligned
63 * in page unit. 63 * in page unit.
64 * @vma: a pointer to vm_area.
65 * @flags: indicate memory type to allocated buffer and cache attruibute. 64 * @flags: indicate memory type to allocated buffer and cache attruibute.
66 * 65 *
67 * P.S. this object would be transferred to user as kms_bo.handle so 66 * P.S. this object would be transferred to user as kms_bo.handle so
@@ -71,7 +70,6 @@ struct exynos_drm_gem_obj {
71 struct drm_gem_object base; 70 struct drm_gem_object base;
72 struct exynos_drm_gem_buf *buffer; 71 struct exynos_drm_gem_buf *buffer;
73 unsigned long size; 72 unsigned long size;
74 struct vm_area_struct *vma;
75 unsigned int flags; 73 unsigned int flags;
76}; 74};
77 75
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index b32b291f88ff..d4ec7465e9cc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -100,6 +100,9 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
100 100
101 dma_set_max_seg_size(subdrv_dev, 0xffffffffu); 101 dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
102 102
103 if (subdrv_dev->archdata.mapping)
104 arm_iommu_detach_device(subdrv_dev);
105
103 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); 106 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
104 if (ret < 0) { 107 if (ret < 0) {
105 DRM_DEBUG_KMS("failed iommu attach.\n"); 108 DRM_DEBUG_KMS("failed iommu attach.\n");
@@ -114,8 +117,8 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
114 * If iommu attach succeeded, the sub driver would have dma_ops 117 * If iommu attach succeeded, the sub driver would have dma_ops
115 * for iommu and also all sub drivers have same dma_ops. 118 * for iommu and also all sub drivers have same dma_ops.
116 */ 119 */
117 if (!dev->archdata.dma_ops) 120 if (get_dma_ops(dev) == get_dma_ops(NULL))
118 dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops; 121 set_dma_ops(dev, get_dma_ops(subdrv_dev));
119 122
120 return 0; 123 return 0;
121} 124}
@@ -141,3 +144,17 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
141 iommu_detach_device(mapping->domain, subdrv_dev); 144 iommu_detach_device(mapping->domain, subdrv_dev);
142 drm_release_iommu_mapping(drm_dev); 145 drm_release_iommu_mapping(drm_dev);
143} 146}
147
148int drm_iommu_attach_device_if_possible(struct exynos_drm_crtc *exynos_crtc,
149 struct drm_device *drm_dev, struct device *subdrv_dev)
150{
151 int ret = 0;
152
153 if (is_drm_iommu_supported(drm_dev)) {
154 if (exynos_crtc->ops->clear_channels)
155 exynos_crtc->ops->clear_channels(exynos_crtc);
156 return drm_iommu_attach_device(drm_dev, subdrv_dev);
157 }
158
159 return ret;
160}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 35d25889b476..8341c7a475b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -38,6 +38,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
38#endif 38#endif
39} 39}
40 40
41int drm_iommu_attach_device_if_possible(
42 struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
43 struct device *subdrv_dev);
44
41#else 45#else
42 46
43static inline int drm_create_iommu_mapping(struct drm_device *drm_dev) 47static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
@@ -65,5 +69,12 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
65 return false; 69 return false;
66} 70}
67 71
72static inline int drm_iommu_attach_device_if_possible(
73 struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
74 struct device *subdrv_dev)
75{
76 return 0;
77}
78
68#endif 79#endif
69#endif 80#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index b7f1cbc46cc2..67e5451e066f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -45,9 +45,6 @@
45#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev)) 45#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
46#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M) 46#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
47 47
48/* platform device pointer for ipp device. */
49static struct platform_device *exynos_drm_ipp_pdev;
50
51/* 48/*
52 * A structure of event. 49 * A structure of event.
53 * 50 *
@@ -102,30 +99,6 @@ static LIST_HEAD(exynos_drm_ippdrv_list);
102static DEFINE_MUTEX(exynos_drm_ippdrv_lock); 99static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list); 100static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104 101
105int exynos_platform_device_ipp_register(void)
106{
107 struct platform_device *pdev;
108
109 if (exynos_drm_ipp_pdev)
110 return -EEXIST;
111
112 pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
113 if (IS_ERR(pdev))
114 return PTR_ERR(pdev);
115
116 exynos_drm_ipp_pdev = pdev;
117
118 return 0;
119}
120
121void exynos_platform_device_ipp_unregister(void)
122{
123 if (exynos_drm_ipp_pdev) {
124 platform_device_unregister(exynos_drm_ipp_pdev);
125 exynos_drm_ipp_pdev = NULL;
126 }
127}
128
129int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv) 102int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
130{ 103{
131 mutex_lock(&exynos_drm_ippdrv_lock); 104 mutex_lock(&exynos_drm_ippdrv_lock);
@@ -482,12 +455,11 @@ static int ipp_validate_mem_node(struct drm_device *drm_dev,
482{ 455{
483 struct drm_exynos_ipp_config *ipp_cfg; 456 struct drm_exynos_ipp_config *ipp_cfg;
484 unsigned int num_plane; 457 unsigned int num_plane;
485 unsigned long min_size, size; 458 unsigned long size, buf_size = 0, plane_size, img_size = 0;
486 unsigned int bpp; 459 unsigned int bpp, width, height;
487 int i; 460 int i;
488 461
489 /* The property id should already be varified */ 462 ipp_cfg = &c_node->property.config[m_node->ops_id];
490 ipp_cfg = &c_node->property.config[m_node->prop_id];
491 num_plane = drm_format_num_planes(ipp_cfg->fmt); 463 num_plane = drm_format_num_planes(ipp_cfg->fmt);
492 464
493 /** 465 /**
@@ -498,20 +470,45 @@ static int ipp_validate_mem_node(struct drm_device *drm_dev,
498 * but it seems more than enough 470 * but it seems more than enough
499 */ 471 */
500 for (i = 0; i < num_plane; ++i) { 472 for (i = 0; i < num_plane; ++i) {
501 if (!m_node->buf_info.handles[i]) { 473 width = ipp_cfg->sz.hsize;
502 DRM_ERROR("invalid handle for plane %d\n", i); 474 height = ipp_cfg->sz.vsize;
503 return -EINVAL;
504 }
505 bpp = drm_format_plane_cpp(ipp_cfg->fmt, i); 475 bpp = drm_format_plane_cpp(ipp_cfg->fmt, i);
506 min_size = (ipp_cfg->sz.hsize * ipp_cfg->sz.vsize * bpp) >> 3; 476
507 size = exynos_drm_gem_get_size(drm_dev, 477 /*
508 m_node->buf_info.handles[i], 478 * The result of drm_format_plane_cpp() for chroma planes must
509 c_node->filp); 479 * be used with drm_format_xxxx_chroma_subsampling() for
510 if (min_size > size) { 480 * correct result.
511 DRM_ERROR("invalid size for plane %d\n", i); 481 */
512 return -EINVAL; 482 if (i > 0) {
483 width /= drm_format_horz_chroma_subsampling(
484 ipp_cfg->fmt);
485 height /= drm_format_vert_chroma_subsampling(
486 ipp_cfg->fmt);
513 } 487 }
488 plane_size = width * height * bpp;
489 img_size += plane_size;
490
491 if (m_node->buf_info.handles[i]) {
492 size = exynos_drm_gem_get_size(drm_dev,
493 m_node->buf_info.handles[i],
494 c_node->filp);
495 if (plane_size > size) {
496 DRM_ERROR(
497 "buffer %d is smaller than required\n",
498 i);
499 return -EINVAL;
500 }
501
502 buf_size += size;
503 }
504 }
505
506 if (buf_size < img_size) {
507 DRM_ERROR("size of buffers(%lu) is smaller than image(%lu)\n",
508 buf_size, img_size);
509 return -EINVAL;
514 } 510 }
511
515 return 0; 512 return 0;
516} 513}
517 514
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
new file mode 100644
index 000000000000..8994eab56ba8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -0,0 +1,490 @@
1/*
2 * Copyright (C) 2015 Samsung Electronics Co.Ltd
3 * Authors:
4 * Hyungwon Hwang <human.hwang@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundationr
9 */
10
11#include <linux/platform_device.h>
12#include <video/of_videomode.h>
13#include <linux/of_address.h>
14#include <video/videomode.h>
15#include <linux/module.h>
16#include <linux/delay.h>
17#include <linux/mutex.h>
18#include <linux/of.h>
19#include <linux/of_graph.h>
20#include <linux/clk.h>
21#include <drm/drmP.h>
22#include <linux/mfd/syscon.h>
23#include <linux/regmap.h>
24
25/* Sysreg registers for MIC */
26#define DSD_CFG_MUX 0x1004
27#define MIC0_RGB_MUX (1 << 0)
28#define MIC0_I80_MUX (1 << 1)
29#define MIC0_ON_MUX (1 << 5)
30
31/* MIC registers */
32#define MIC_OP 0x0
33#define MIC_IP_VER 0x0004
34#define MIC_V_TIMING_0 0x0008
35#define MIC_V_TIMING_1 0x000C
36#define MIC_IMG_SIZE 0x0010
37#define MIC_INPUT_TIMING_0 0x0014
38#define MIC_INPUT_TIMING_1 0x0018
39#define MIC_2D_OUTPUT_TIMING_0 0x001C
40#define MIC_2D_OUTPUT_TIMING_1 0x0020
41#define MIC_2D_OUTPUT_TIMING_2 0x0024
42#define MIC_3D_OUTPUT_TIMING_0 0x0028
43#define MIC_3D_OUTPUT_TIMING_1 0x002C
44#define MIC_3D_OUTPUT_TIMING_2 0x0030
45#define MIC_CORE_PARA_0 0x0034
46#define MIC_CORE_PARA_1 0x0038
47#define MIC_CTC_CTRL 0x0040
48#define MIC_RD_DATA 0x0044
49
50#define MIC_UPD_REG (1 << 31)
51#define MIC_ON_REG (1 << 30)
52#define MIC_TD_ON_REG (1 << 29)
53#define MIC_BS_CHG_OUT (1 << 16)
54#define MIC_VIDEO_TYPE(x) (((x) & 0xf) << 12)
55#define MIC_PSR_EN (1 << 5)
56#define MIC_SW_RST (1 << 4)
57#define MIC_ALL_RST (1 << 3)
58#define MIC_CORE_VER_CONTROL (1 << 2)
59#define MIC_MODE_SEL_COMMAND_MODE (1 << 1)
60#define MIC_MODE_SEL_MASK (1 << 1)
61#define MIC_CORE_EN (1 << 0)
62
63#define MIC_V_PULSE_WIDTH(x) (((x) & 0x3fff) << 16)
64#define MIC_V_PERIOD_LINE(x) ((x) & 0x3fff)
65
66#define MIC_VBP_SIZE(x) (((x) & 0x3fff) << 16)
67#define MIC_VFP_SIZE(x) ((x) & 0x3fff)
68
69#define MIC_IMG_V_SIZE(x) (((x) & 0x3fff) << 16)
70#define MIC_IMG_H_SIZE(x) ((x) & 0x3fff)
71
72#define MIC_H_PULSE_WIDTH_IN(x) (((x) & 0x3fff) << 16)
73#define MIC_H_PERIOD_PIXEL_IN(x) ((x) & 0x3fff)
74
75#define MIC_HBP_SIZE_IN(x) (((x) & 0x3fff) << 16)
76#define MIC_HFP_SIZE_IN(x) ((x) & 0x3fff)
77
78#define MIC_H_PULSE_WIDTH_2D(x) (((x) & 0x3fff) << 16)
79#define MIC_H_PERIOD_PIXEL_2D(x) ((x) & 0x3fff)
80
81#define MIC_HBP_SIZE_2D(x) (((x) & 0x3fff) << 16)
82#define MIC_HFP_SIZE_2D(x) ((x) & 0x3fff)
83
84#define MIC_BS_SIZE_2D(x) ((x) & 0x3fff)
85
86enum {
87 ENDPOINT_DECON_NODE,
88 ENDPOINT_DSI_NODE,
89 NUM_ENDPOINTS
90};
91
92static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
93#define NUM_CLKS ARRAY_SIZE(clk_names)
94static DEFINE_MUTEX(mic_mutex);
95
96struct exynos_mic {
97 struct device *dev;
98 void __iomem *reg;
99 struct regmap *sysreg;
100 struct clk *clks[NUM_CLKS];
101
102 bool i80_mode;
103 struct videomode vm;
104 struct drm_encoder *encoder;
105 struct drm_bridge bridge;
106
107 bool enabled;
108};
109
110static void mic_set_path(struct exynos_mic *mic, bool enable)
111{
112 int ret;
113 unsigned int val;
114
115 ret = regmap_read(mic->sysreg, DSD_CFG_MUX, &val);
116 if (ret) {
117 DRM_ERROR("mic: Failed to read system register\n");
118 return;
119 }
120
121 if (enable) {
122 if (mic->i80_mode)
123 val |= MIC0_I80_MUX;
124 else
125 val |= MIC0_RGB_MUX;
126
127 val |= MIC0_ON_MUX;
128 } else
129 val &= ~(MIC0_RGB_MUX | MIC0_I80_MUX | MIC0_ON_MUX);
130
131 regmap_write(mic->sysreg, DSD_CFG_MUX, val);
132 if (ret)
133 DRM_ERROR("mic: Failed to read system register\n");
134}
135
136static int mic_sw_reset(struct exynos_mic *mic)
137{
138 unsigned int retry = 100;
139 int ret;
140
141 writel(MIC_SW_RST, mic->reg + MIC_OP);
142
143 while (retry-- > 0) {
144 ret = readl(mic->reg + MIC_OP);
145 if (!(ret & MIC_SW_RST))
146 return 0;
147
148 udelay(10);
149 }
150
151 return -ETIMEDOUT;
152}
153
154static void mic_set_porch_timing(struct exynos_mic *mic)
155{
156 struct videomode vm = mic->vm;
157 u32 reg;
158
159 reg = MIC_V_PULSE_WIDTH(vm.vsync_len) +
160 MIC_V_PERIOD_LINE(vm.vsync_len + vm.vactive +
161 vm.vback_porch + vm.vfront_porch);
162 writel(reg, mic->reg + MIC_V_TIMING_0);
163
164 reg = MIC_VBP_SIZE(vm.vback_porch) +
165 MIC_VFP_SIZE(vm.vfront_porch);
166 writel(reg, mic->reg + MIC_V_TIMING_1);
167
168 reg = MIC_V_PULSE_WIDTH(vm.hsync_len) +
169 MIC_V_PERIOD_LINE(vm.hsync_len + vm.hactive +
170 vm.hback_porch + vm.hfront_porch);
171 writel(reg, mic->reg + MIC_INPUT_TIMING_0);
172
173 reg = MIC_VBP_SIZE(vm.hback_porch) +
174 MIC_VFP_SIZE(vm.hfront_porch);
175 writel(reg, mic->reg + MIC_INPUT_TIMING_1);
176}
177
178static void mic_set_img_size(struct exynos_mic *mic)
179{
180 struct videomode *vm = &mic->vm;
181 u32 reg;
182
183 reg = MIC_IMG_H_SIZE(vm->hactive) +
184 MIC_IMG_V_SIZE(vm->vactive);
185
186 writel(reg, mic->reg + MIC_IMG_SIZE);
187}
188
189static void mic_set_output_timing(struct exynos_mic *mic)
190{
191 struct videomode vm = mic->vm;
192 u32 reg, bs_size_2d;
193
194 DRM_DEBUG("w: %u, h: %u\n", vm.hactive, vm.vactive);
195 bs_size_2d = ((vm.hactive >> 2) << 1) + (vm.vactive % 4);
196 reg = MIC_BS_SIZE_2D(bs_size_2d);
197 writel(reg, mic->reg + MIC_2D_OUTPUT_TIMING_2);
198
199 if (!mic->i80_mode) {
200 reg = MIC_H_PULSE_WIDTH_2D(vm.hsync_len) +
201 MIC_H_PERIOD_PIXEL_2D(vm.hsync_len + bs_size_2d +
202 vm.hback_porch + vm.hfront_porch);
203 writel(reg, mic->reg + MIC_2D_OUTPUT_TIMING_0);
204
205 reg = MIC_HBP_SIZE_2D(vm.hback_porch) +
206 MIC_H_PERIOD_PIXEL_2D(vm.hfront_porch);
207 writel(reg, mic->reg + MIC_2D_OUTPUT_TIMING_1);
208 }
209}
210
211static void mic_set_reg_on(struct exynos_mic *mic, bool enable)
212{
213 u32 reg = readl(mic->reg + MIC_OP);
214
215 if (enable) {
216 reg &= ~(MIC_MODE_SEL_MASK | MIC_CORE_VER_CONTROL | MIC_PSR_EN);
217 reg |= (MIC_CORE_EN | MIC_BS_CHG_OUT | MIC_ON_REG);
218
219 reg &= ~MIC_MODE_SEL_COMMAND_MODE;
220 if (mic->i80_mode)
221 reg |= MIC_MODE_SEL_COMMAND_MODE;
222 } else {
223 reg &= ~MIC_CORE_EN;
224 }
225
226 reg |= MIC_UPD_REG;
227 writel(reg, mic->reg + MIC_OP);
228}
229
230static struct device_node *get_remote_node(struct device_node *from, int reg)
231{
232 struct device_node *endpoint = NULL, *remote_node = NULL;
233
234 endpoint = of_graph_get_endpoint_by_regs(from, reg, -1);
235 if (!endpoint) {
236 DRM_ERROR("mic: Failed to find remote port from %s",
237 from->full_name);
238 goto exit;
239 }
240
241 remote_node = of_graph_get_remote_port_parent(endpoint);
242 if (!remote_node) {
243 DRM_ERROR("mic: Failed to find remote port parent from %s",
244 from->full_name);
245 goto exit;
246 }
247
248exit:
249 of_node_put(endpoint);
250 return remote_node;
251}
252
253static int parse_dt(struct exynos_mic *mic)
254{
255 int ret = 0, i, j;
256 struct device_node *remote_node;
257 struct device_node *nodes[3];
258
259 /*
260 * The order of endpoints does matter.
261 * The first node must be for decon and the second one must be for dsi.
262 */
263 for (i = 0, j = 0; i < NUM_ENDPOINTS; i++) {
264 remote_node = get_remote_node(mic->dev->of_node, i);
265 if (!remote_node) {
266 ret = -EPIPE;
267 goto exit;
268 }
269 nodes[j++] = remote_node;
270
271 switch (i) {
272 case ENDPOINT_DECON_NODE:
273 /* decon node */
274 if (of_get_child_by_name(remote_node,
275 "i80-if-timings"))
276 mic->i80_mode = 1;
277
278 break;
279 case ENDPOINT_DSI_NODE:
280 /* panel node */
281 remote_node = get_remote_node(remote_node, 1);
282 if (!remote_node) {
283 ret = -EPIPE;
284 goto exit;
285 }
286 nodes[j++] = remote_node;
287
288 ret = of_get_videomode(remote_node,
289 &mic->vm, 0);
290 if (ret) {
291 DRM_ERROR("mic: failed to get videomode");
292 goto exit;
293 }
294
295 break;
296 default:
297 DRM_ERROR("mic: Unknown endpoint from MIC");
298 break;
299 }
300 }
301
302exit:
303 while (--j > -1)
304 of_node_put(nodes[j]);
305
306 return ret;
307}
308
309void mic_disable(struct drm_bridge *bridge) { }
310
311void mic_post_disable(struct drm_bridge *bridge)
312{
313 struct exynos_mic *mic = bridge->driver_private;
314 int i;
315
316 mutex_lock(&mic_mutex);
317 if (!mic->enabled)
318 goto already_disabled;
319
320 mic_set_path(mic, 0);
321
322 for (i = NUM_CLKS - 1; i > -1; i--)
323 clk_disable_unprepare(mic->clks[i]);
324
325 mic->enabled = 0;
326
327already_disabled:
328 mutex_unlock(&mic_mutex);
329}
330
331void mic_pre_enable(struct drm_bridge *bridge)
332{
333 struct exynos_mic *mic = bridge->driver_private;
334 int ret, i;
335
336 mutex_lock(&mic_mutex);
337 if (mic->enabled)
338 goto already_enabled;
339
340 for (i = 0; i < NUM_CLKS; i++) {
341 ret = clk_prepare_enable(mic->clks[i]);
342 if (ret < 0) {
343 DRM_ERROR("Failed to enable clock (%s)\n",
344 clk_names[i]);
345 goto turn_off_clks;
346 }
347 }
348
349 mic_set_path(mic, 1);
350
351 ret = mic_sw_reset(mic);
352 if (ret) {
353 DRM_ERROR("Failed to reset\n");
354 goto turn_off_clks;
355 }
356
357 if (!mic->i80_mode)
358 mic_set_porch_timing(mic);
359 mic_set_img_size(mic);
360 mic_set_output_timing(mic);
361 mic_set_reg_on(mic, 1);
362 mic->enabled = 1;
363 mutex_unlock(&mic_mutex);
364
365 return;
366
367turn_off_clks:
368 while (--i > -1)
369 clk_disable_unprepare(mic->clks[i]);
370already_enabled:
371 mutex_unlock(&mic_mutex);
372}
373
374void mic_enable(struct drm_bridge *bridge) { }
375
376void mic_destroy(struct drm_bridge *bridge)
377{
378 struct exynos_mic *mic = bridge->driver_private;
379 int i;
380
381 mutex_lock(&mic_mutex);
382 if (!mic->enabled)
383 goto already_disabled;
384
385 for (i = NUM_CLKS - 1; i > -1; i--)
386 clk_disable_unprepare(mic->clks[i]);
387
388already_disabled:
389 mutex_unlock(&mic_mutex);
390}
391
392struct drm_bridge_funcs mic_bridge_funcs = {
393 .disable = mic_disable,
394 .post_disable = mic_post_disable,
395 .pre_enable = mic_pre_enable,
396 .enable = mic_enable,
397};
398
399int exynos_mic_probe(struct platform_device *pdev)
400{
401 struct device *dev = &pdev->dev;
402 struct exynos_mic *mic;
403 struct resource res;
404 int ret, i;
405
406 mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
407 if (!mic) {
408 DRM_ERROR("mic: Failed to allocate memory for MIC object\n");
409 ret = -ENOMEM;
410 goto err;
411 }
412
413 mic->dev = dev;
414
415 ret = parse_dt(mic);
416 if (ret)
417 goto err;
418
419 ret = of_address_to_resource(dev->of_node, 0, &res);
420 if (ret) {
421 DRM_ERROR("mic: Failed to get mem region for MIC\n");
422 goto err;
423 }
424 mic->reg = devm_ioremap(dev, res.start, resource_size(&res));
425 if (!mic->reg) {
426 DRM_ERROR("mic: Failed to remap for MIC\n");
427 ret = -ENOMEM;
428 goto err;
429 }
430
431 mic->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
432 "samsung,disp-syscon");
433 if (IS_ERR(mic->sysreg)) {
434 DRM_ERROR("mic: Failed to get system register.\n");
435 goto err;
436 }
437
438 mic->bridge.funcs = &mic_bridge_funcs;
439 mic->bridge.of_node = dev->of_node;
440 mic->bridge.driver_private = mic;
441 ret = drm_bridge_add(&mic->bridge);
442 if (ret) {
443 DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
444 goto err;
445 }
446
447 for (i = 0; i < NUM_CLKS; i++) {
448 mic->clks[i] = of_clk_get_by_name(dev->of_node, clk_names[i]);
449 if (IS_ERR(mic->clks[i])) {
450 DRM_ERROR("mic: Failed to get clock (%s)\n",
451 clk_names[i]);
452 ret = PTR_ERR(mic->clks[i]);
453 goto err;
454 }
455 }
456
457 DRM_DEBUG_KMS("MIC has been probed\n");
458
459err:
460 return ret;
461}
462
463static int exynos_mic_remove(struct platform_device *pdev)
464{
465 struct exynos_mic *mic = platform_get_drvdata(pdev);
466 int i;
467
468 drm_bridge_remove(&mic->bridge);
469
470 for (i = NUM_CLKS - 1; i > -1; i--)
471 clk_put(mic->clks[i]);
472
473 return 0;
474}
475
476static const struct of_device_id exynos_mic_of_match[] = {
477 { .compatible = "samsung,exynos5433-mic" },
478 { }
479};
480MODULE_DEVICE_TABLE(of, exynos_mic_of_match);
481
482struct platform_driver mic_driver = {
483 .probe = exynos_mic_probe,
484 .remove = exynos_mic_remove,
485 .driver = {
486 .name = "exynos-mic",
487 .owner = THIS_MODULE,
488 .of_match_table = exynos_mic_of_match,
489 },
490};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index b1180fbe7546..a729980d3c2f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -13,6 +13,7 @@
13 13
14#include <drm/exynos_drm.h> 14#include <drm/exynos_drm.h>
15#include <drm/drm_plane_helper.h> 15#include <drm/drm_plane_helper.h>
16#include <drm/drm_atomic_helper.h>
16#include "exynos_drm_drv.h" 17#include "exynos_drm_drv.h"
17#include "exynos_drm_crtc.h" 18#include "exynos_drm_crtc.h"
18#include "exynos_drm_fb.h" 19#include "exynos_drm_fb.h"
@@ -61,42 +62,21 @@ static int exynos_plane_get_size(int start, unsigned length, unsigned last)
61 return size; 62 return size;
62} 63}
63 64
64int exynos_check_plane(struct drm_plane *plane, struct drm_framebuffer *fb) 65static void exynos_plane_mode_set(struct drm_plane *plane,
65{ 66 struct drm_crtc *crtc,
66 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 67 struct drm_framebuffer *fb,
67 int nr; 68 int crtc_x, int crtc_y,
68 int i; 69 unsigned int crtc_w, unsigned int crtc_h,
69 70 uint32_t src_x, uint32_t src_y,
70 nr = exynos_drm_fb_get_buf_cnt(fb); 71 uint32_t src_w, uint32_t src_h)
71 for (i = 0; i < nr; i++) {
72 struct exynos_drm_gem_buf *buffer = exynos_drm_fb_buffer(fb, i);
73
74 if (!buffer) {
75 DRM_DEBUG_KMS("buffer is null\n");
76 return -EFAULT;
77 }
78
79 exynos_plane->dma_addr[i] = buffer->dma_addr + fb->offsets[i];
80
81 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
82 i, (unsigned long)exynos_plane->dma_addr[i]);
83 }
84
85 return 0;
86}
87
88void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
89 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
90 unsigned int crtc_w, unsigned int crtc_h,
91 uint32_t src_x, uint32_t src_y,
92 uint32_t src_w, uint32_t src_h)
93{ 72{
94 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 73 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
74 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
95 unsigned int actual_w; 75 unsigned int actual_w;
96 unsigned int actual_h; 76 unsigned int actual_h;
97 77
98 actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay); 78 actual_w = exynos_plane_get_size(crtc_x, crtc_w, mode->hdisplay);
99 actual_h = exynos_plane_get_size(crtc_y, crtc_h, crtc->mode.vdisplay); 79 actual_h = exynos_plane_get_size(crtc_y, crtc_h, mode->vdisplay);
100 80
101 if (crtc_x < 0) { 81 if (crtc_x < 0) {
102 if (actual_w) 82 if (actual_w)
@@ -132,10 +112,10 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
132 exynos_plane->crtc_height = actual_h; 112 exynos_plane->crtc_height = actual_h;
133 113
134 /* set drm mode data. */ 114 /* set drm mode data. */
135 exynos_plane->mode_width = crtc->mode.hdisplay; 115 exynos_plane->mode_width = mode->hdisplay;
136 exynos_plane->mode_height = crtc->mode.vdisplay; 116 exynos_plane->mode_height = mode->vdisplay;
137 exynos_plane->refresh = crtc->mode.vrefresh; 117 exynos_plane->refresh = mode->vrefresh;
138 exynos_plane->scan_flag = crtc->mode.flags; 118 exynos_plane->scan_flag = mode->flags;
139 119
140 DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)", 120 DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
141 exynos_plane->crtc_x, exynos_plane->crtc_y, 121 exynos_plane->crtc_x, exynos_plane->crtc_y,
@@ -144,48 +124,83 @@ void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
144 plane->crtc = crtc; 124 plane->crtc = crtc;
145} 125}
146 126
147int 127static struct drm_plane_funcs exynos_plane_funcs = {
148exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 128 .update_plane = drm_atomic_helper_update_plane,
149 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 129 .disable_plane = drm_atomic_helper_disable_plane,
150 unsigned int crtc_w, unsigned int crtc_h, 130 .destroy = drm_plane_cleanup,
151 uint32_t src_x, uint32_t src_y, 131 .reset = drm_atomic_helper_plane_reset,
152 uint32_t src_w, uint32_t src_h) 132 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
133 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
134};
135
136static int exynos_plane_atomic_check(struct drm_plane *plane,
137 struct drm_plane_state *state)
153{ 138{
139 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
140 int nr;
141 int i;
142
143 if (!state->fb)
144 return 0;
145
146 nr = exynos_drm_fb_get_buf_cnt(state->fb);
147 for (i = 0; i < nr; i++) {
148 struct exynos_drm_gem_buf *buffer =
149 exynos_drm_fb_buffer(state->fb, i);
150
151 if (!buffer) {
152 DRM_DEBUG_KMS("buffer is null\n");
153 return -EFAULT;
154 }
155
156 exynos_plane->dma_addr[i] = buffer->dma_addr +
157 state->fb->offsets[i];
158
159 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
160 i, (unsigned long)exynos_plane->dma_addr[i]);
161 }
162
163 return 0;
164}
154 165
155 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); 166static void exynos_plane_atomic_update(struct drm_plane *plane,
167 struct drm_plane_state *old_state)
168{
169 struct drm_plane_state *state = plane->state;
170 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(state->crtc);
156 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 171 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
157 int ret;
158 172
159 ret = exynos_check_plane(plane, fb); 173 if (!state->crtc)
160 if (ret < 0) 174 return;
161 return ret;
162 175
163 exynos_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, 176 exynos_plane_mode_set(plane, state->crtc, state->fb,
164 crtc_w, crtc_h, src_x >> 16, src_y >> 16, 177 state->crtc_x, state->crtc_y,
165 src_w >> 16, src_h >> 16); 178 state->crtc_w, state->crtc_h,
179 state->src_x >> 16, state->src_y >> 16,
180 state->src_w >> 16, state->src_h >> 16);
166 181
167 if (exynos_crtc->ops->win_commit) 182 if (exynos_crtc->ops->win_commit)
168 exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos); 183 exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
169
170 return 0;
171} 184}
172 185
173static int exynos_disable_plane(struct drm_plane *plane) 186static void exynos_plane_atomic_disable(struct drm_plane *plane,
187 struct drm_plane_state *old_state)
174{ 188{
175 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); 189 struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
176 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); 190 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(old_state->crtc);
177 191
178 if (exynos_crtc && exynos_crtc->ops->win_disable) 192 if (!old_state->crtc)
193 return;
194
195 if (exynos_crtc->ops->win_disable)
179 exynos_crtc->ops->win_disable(exynos_crtc, 196 exynos_crtc->ops->win_disable(exynos_crtc,
180 exynos_plane->zpos); 197 exynos_plane->zpos);
181
182 return 0;
183} 198}
184 199
185static struct drm_plane_funcs exynos_plane_funcs = { 200static const struct drm_plane_helper_funcs plane_helper_funcs = {
186 .update_plane = exynos_update_plane, 201 .atomic_check = exynos_plane_atomic_check,
187 .disable_plane = exynos_disable_plane, 202 .atomic_update = exynos_plane_atomic_update,
188 .destroy = drm_plane_cleanup, 203 .atomic_disable = exynos_plane_atomic_disable,
189}; 204};
190 205
191static void exynos_plane_attach_zpos_property(struct drm_plane *plane, 206static void exynos_plane_attach_zpos_property(struct drm_plane *plane,
@@ -223,6 +238,8 @@ int exynos_plane_init(struct drm_device *dev,
223 return err; 238 return err;
224 } 239 }
225 240
241 drm_plane_helper_add(&exynos_plane->base, &plane_helper_funcs);
242
226 exynos_plane->zpos = zpos; 243 exynos_plane->zpos = zpos;
227 244
228 if (type == DRM_PLANE_TYPE_OVERLAY) 245 if (type == DRM_PLANE_TYPE_OVERLAY)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
index f360590d1412..8c88ae983c38 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -9,17 +9,6 @@
9 * 9 *
10 */ 10 */
11 11
12int exynos_check_plane(struct drm_plane *plane, struct drm_framebuffer *fb);
13void exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
14 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
15 unsigned int crtc_w, unsigned int crtc_h,
16 uint32_t src_x, uint32_t src_y,
17 uint32_t src_w, uint32_t src_h);
18int exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
19 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
20 unsigned int crtc_w, unsigned int crtc_h,
21 uint32_t src_x, uint32_t src_y,
22 uint32_t src_w, uint32_t src_h);
23int exynos_plane_init(struct drm_device *dev, 12int exynos_plane_init(struct drm_device *dev,
24 struct exynos_drm_plane *exynos_plane, 13 struct exynos_drm_plane *exynos_plane,
25 unsigned long possible_crtcs, enum drm_plane_type type, 14 unsigned long possible_crtcs, enum drm_plane_type type,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 1b3479a8db5f..3413393d8a16 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -20,6 +20,7 @@
20 20
21#include <drm/drm_edid.h> 21#include <drm/drm_edid.h>
22#include <drm/drm_crtc_helper.h> 22#include <drm/drm_crtc_helper.h>
23#include <drm/drm_atomic_helper.h>
23 24
24#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
25#include "exynos_drm_crtc.h" 26#include "exynos_drm_crtc.h"
@@ -130,78 +131,34 @@ static void vidi_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
130 131
131 plane = &ctx->planes[win]; 132 plane = &ctx->planes[win];
132 133
133 plane->enabled = true;
134
135 DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr); 134 DRM_DEBUG_KMS("dma_addr = %pad\n", plane->dma_addr);
136 135
137 if (ctx->vblank_on) 136 if (ctx->vblank_on)
138 schedule_work(&ctx->work); 137 schedule_work(&ctx->work);
139} 138}
140 139
141static void vidi_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) 140static void vidi_enable(struct exynos_drm_crtc *crtc)
142{ 141{
143 struct vidi_context *ctx = crtc->ctx; 142 struct vidi_context *ctx = crtc->ctx;
144 struct exynos_drm_plane *plane;
145
146 if (win < 0 || win >= WINDOWS_NR)
147 return;
148
149 plane = &ctx->planes[win];
150 plane->enabled = false;
151
152 /* TODO. */
153}
154
155static int vidi_power_on(struct vidi_context *ctx, bool enable)
156{
157 struct exynos_drm_plane *plane;
158 int i;
159
160 DRM_DEBUG_KMS("%s\n", __FILE__);
161 143
162 if (enable != false && enable != true) 144 mutex_lock(&ctx->lock);
163 return -EINVAL;
164
165 if (enable) {
166 ctx->suspended = false;
167 145
168 /* if vblank was enabled status, enable it again. */ 146 ctx->suspended = false;
169 if (test_and_clear_bit(0, &ctx->irq_flags))
170 vidi_enable_vblank(ctx->crtc);
171 147
172 for (i = 0; i < WINDOWS_NR; i++) { 148 /* if vblank was enabled status, enable it again. */
173 plane = &ctx->planes[i]; 149 if (test_and_clear_bit(0, &ctx->irq_flags))
174 if (plane->enabled) 150 vidi_enable_vblank(ctx->crtc);
175 vidi_win_commit(ctx->crtc, i);
176 }
177 } else {
178 ctx->suspended = true;
179 }
180 151
181 return 0; 152 mutex_unlock(&ctx->lock);
182} 153}
183 154
184static void vidi_dpms(struct exynos_drm_crtc *crtc, int mode) 155static void vidi_disable(struct exynos_drm_crtc *crtc)
185{ 156{
186 struct vidi_context *ctx = crtc->ctx; 157 struct vidi_context *ctx = crtc->ctx;
187 158
188 DRM_DEBUG_KMS("%d\n", mode);
189
190 mutex_lock(&ctx->lock); 159 mutex_lock(&ctx->lock);
191 160
192 switch (mode) { 161 ctx->suspended = true;
193 case DRM_MODE_DPMS_ON:
194 vidi_power_on(ctx, true);
195 break;
196 case DRM_MODE_DPMS_STANDBY:
197 case DRM_MODE_DPMS_SUSPEND:
198 case DRM_MODE_DPMS_OFF:
199 vidi_power_on(ctx, false);
200 break;
201 default:
202 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
203 break;
204 }
205 162
206 mutex_unlock(&ctx->lock); 163 mutex_unlock(&ctx->lock);
207} 164}
@@ -218,11 +175,11 @@ static int vidi_ctx_initialize(struct vidi_context *ctx,
218} 175}
219 176
220static const struct exynos_drm_crtc_ops vidi_crtc_ops = { 177static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
221 .dpms = vidi_dpms, 178 .enable = vidi_enable,
179 .disable = vidi_disable,
222 .enable_vblank = vidi_enable_vblank, 180 .enable_vblank = vidi_enable_vblank,
223 .disable_vblank = vidi_disable_vblank, 181 .disable_vblank = vidi_disable_vblank,
224 .win_commit = vidi_win_commit, 182 .win_commit = vidi_win_commit,
225 .win_disable = vidi_win_disable,
226}; 183};
227 184
228static void vidi_fake_vblank_handler(struct work_struct *work) 185static void vidi_fake_vblank_handler(struct work_struct *work)
@@ -384,10 +341,13 @@ static void vidi_connector_destroy(struct drm_connector *connector)
384} 341}
385 342
386static struct drm_connector_funcs vidi_connector_funcs = { 343static struct drm_connector_funcs vidi_connector_funcs = {
387 .dpms = drm_helper_connector_dpms, 344 .dpms = drm_atomic_helper_connector_dpms,
388 .fill_modes = drm_helper_probe_single_connector_modes, 345 .fill_modes = drm_helper_probe_single_connector_modes,
389 .detect = vidi_detect, 346 .detect = vidi_detect,
390 .destroy = vidi_connector_destroy, 347 .destroy = vidi_connector_destroy,
348 .reset = drm_atomic_helper_connector_reset,
349 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
350 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
391}; 351};
392 352
393static int vidi_get_modes(struct drm_connector *connector) 353static int vidi_get_modes(struct drm_connector *connector)
@@ -520,16 +480,6 @@ static int vidi_probe(struct platform_device *pdev)
520 ctx->default_win = 0; 480 ctx->default_win = 0;
521 ctx->pdev = pdev; 481 ctx->pdev = pdev;
522 482
523 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
524 EXYNOS_DISPLAY_TYPE_VIDI);
525 if (ret)
526 return ret;
527
528 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
529 ctx->display.type);
530 if (ret)
531 goto err_del_crtc_component;
532
533 INIT_WORK(&ctx->work, vidi_fake_vblank_handler); 483 INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
534 484
535 mutex_init(&ctx->lock); 485 mutex_init(&ctx->lock);
@@ -539,7 +489,7 @@ static int vidi_probe(struct platform_device *pdev)
539 ret = device_create_file(&pdev->dev, &dev_attr_connection); 489 ret = device_create_file(&pdev->dev, &dev_attr_connection);
540 if (ret < 0) { 490 if (ret < 0) {
541 DRM_ERROR("failed to create connection sysfs.\n"); 491 DRM_ERROR("failed to create connection sysfs.\n");
542 goto err_del_conn_component; 492 return ret;
543 } 493 }
544 494
545 ret = component_add(&pdev->dev, &vidi_component_ops); 495 ret = component_add(&pdev->dev, &vidi_component_ops);
@@ -550,10 +500,6 @@ static int vidi_probe(struct platform_device *pdev)
550 500
551err_remove_file: 501err_remove_file:
552 device_remove_file(&pdev->dev, &dev_attr_connection); 502 device_remove_file(&pdev->dev, &dev_attr_connection);
553err_del_conn_component:
554 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
555err_del_crtc_component:
556 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
557 503
558 return ret; 504 return ret;
559} 505}
@@ -570,8 +516,6 @@ static int vidi_remove(struct platform_device *pdev)
570 } 516 }
571 517
572 component_del(&pdev->dev, &vidi_component_ops); 518 component_del(&pdev->dev, &vidi_component_ops);
573 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
574 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
575 519
576 return 0; 520 return 0;
577} 521}
@@ -584,38 +528,3 @@ struct platform_driver vidi_driver = {
584 .owner = THIS_MODULE, 528 .owner = THIS_MODULE,
585 }, 529 },
586}; 530};
587
588int exynos_drm_probe_vidi(void)
589{
590 struct platform_device *pdev;
591 int ret;
592
593 pdev = platform_device_register_simple("exynos-drm-vidi", -1, NULL, 0);
594 if (IS_ERR(pdev))
595 return PTR_ERR(pdev);
596
597 ret = platform_driver_register(&vidi_driver);
598 if (ret) {
599 platform_device_unregister(pdev);
600 return ret;
601 }
602
603 return ret;
604}
605
606static int exynos_drm_remove_vidi_device(struct device *dev, void *data)
607{
608 platform_device_unregister(to_platform_device(dev));
609
610 return 0;
611}
612
613void exynos_drm_remove_vidi(void)
614{
615 int ret = driver_for_each_device(&vidi_driver.driver, NULL, NULL,
616 exynos_drm_remove_vidi_device);
617 /* silence compiler warning */
618 (void)ret;
619
620 platform_driver_unregister(&vidi_driver);
621}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 5eba971f394a..99e286489031 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -17,6 +17,7 @@
17#include <drm/drmP.h> 17#include <drm/drmP.h>
18#include <drm/drm_edid.h> 18#include <drm/drm_edid.h>
19#include <drm/drm_crtc_helper.h> 19#include <drm/drm_crtc_helper.h>
20#include <drm/drm_atomic_helper.h>
20 21
21#include "regs-hdmi.h" 22#include "regs-hdmi.h"
22 23
@@ -1050,10 +1051,13 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
1050} 1051}
1051 1052
1052static struct drm_connector_funcs hdmi_connector_funcs = { 1053static struct drm_connector_funcs hdmi_connector_funcs = {
1053 .dpms = drm_helper_connector_dpms, 1054 .dpms = drm_atomic_helper_connector_dpms,
1054 .fill_modes = drm_helper_probe_single_connector_modes, 1055 .fill_modes = drm_helper_probe_single_connector_modes,
1055 .detect = hdmi_detect, 1056 .detect = hdmi_detect,
1056 .destroy = hdmi_connector_destroy, 1057 .destroy = hdmi_connector_destroy,
1058 .reset = drm_atomic_helper_connector_reset,
1059 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1060 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1057}; 1061};
1058 1062
1059static int hdmi_get_modes(struct drm_connector *connector) 1063static int hdmi_get_modes(struct drm_connector *connector)
@@ -2123,8 +2127,8 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
2123 */ 2127 */
2124 if (crtc) 2128 if (crtc)
2125 funcs = crtc->helper_private; 2129 funcs = crtc->helper_private;
2126 if (funcs && funcs->dpms) 2130 if (funcs && funcs->disable)
2127 (*funcs->dpms)(crtc, mode); 2131 (*funcs->disable)(crtc);
2128 2132
2129 hdmi_poweroff(hdata); 2133 hdmi_poweroff(hdata);
2130 break; 2134 break;
@@ -2356,20 +2360,13 @@ static int hdmi_probe(struct platform_device *pdev)
2356 hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI; 2360 hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
2357 hdata->display.ops = &hdmi_display_ops; 2361 hdata->display.ops = &hdmi_display_ops;
2358 2362
2359 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
2360 hdata->display.type);
2361 if (ret)
2362 return ret;
2363
2364 mutex_init(&hdata->hdmi_mutex); 2363 mutex_init(&hdata->hdmi_mutex);
2365 2364
2366 platform_set_drvdata(pdev, hdata); 2365 platform_set_drvdata(pdev, hdata);
2367 2366
2368 match = of_match_node(hdmi_match_types, dev->of_node); 2367 match = of_match_node(hdmi_match_types, dev->of_node);
2369 if (!match) { 2368 if (!match)
2370 ret = -ENODEV; 2369 return -ENODEV;
2371 goto err_del_component;
2372 }
2373 2370
2374 drv_data = (struct hdmi_driver_data *)match->data; 2371 drv_data = (struct hdmi_driver_data *)match->data;
2375 hdata->type = drv_data->type; 2372 hdata->type = drv_data->type;
@@ -2389,13 +2386,13 @@ static int hdmi_probe(struct platform_device *pdev)
2389 hdata->regs = devm_ioremap_resource(dev, res); 2386 hdata->regs = devm_ioremap_resource(dev, res);
2390 if (IS_ERR(hdata->regs)) { 2387 if (IS_ERR(hdata->regs)) {
2391 ret = PTR_ERR(hdata->regs); 2388 ret = PTR_ERR(hdata->regs);
2392 goto err_del_component; 2389 return ret;
2393 } 2390 }
2394 2391
2395 ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD"); 2392 ret = devm_gpio_request(dev, hdata->hpd_gpio, "HPD");
2396 if (ret) { 2393 if (ret) {
2397 DRM_ERROR("failed to request HPD gpio\n"); 2394 DRM_ERROR("failed to request HPD gpio\n");
2398 goto err_del_component; 2395 return ret;
2399 } 2396 }
2400 2397
2401 ddc_node = hdmi_legacy_ddc_dt_binding(dev); 2398 ddc_node = hdmi_legacy_ddc_dt_binding(dev);
@@ -2406,8 +2403,7 @@ static int hdmi_probe(struct platform_device *pdev)
2406 ddc_node = of_parse_phandle(dev->of_node, "ddc", 0); 2403 ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
2407 if (!ddc_node) { 2404 if (!ddc_node) {
2408 DRM_ERROR("Failed to find ddc node in device tree\n"); 2405 DRM_ERROR("Failed to find ddc node in device tree\n");
2409 ret = -ENODEV; 2406 return -ENODEV;
2410 goto err_del_component;
2411 } 2407 }
2412 2408
2413out_get_ddc_adpt: 2409out_get_ddc_adpt:
@@ -2491,9 +2487,6 @@ err_hdmiphy:
2491err_ddc: 2487err_ddc:
2492 put_device(&hdata->ddc_adpt->dev); 2488 put_device(&hdata->ddc_adpt->dev);
2493 2489
2494err_del_component:
2495 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
2496
2497 return ret; 2490 return ret;
2498} 2491}
2499 2492
@@ -2513,7 +2506,6 @@ static int hdmi_remove(struct platform_device *pdev)
2513 pm_runtime_disable(&pdev->dev); 2506 pm_runtime_disable(&pdev->dev);
2514 component_del(&pdev->dev, &hdmi_component_ops); 2507 component_del(&pdev->dev, &hdmi_component_ops);
2515 2508
2516 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
2517 return 0; 2509 return 0;
2518} 2510}
2519 2511
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 8874c1fcb3ab..cae98db33062 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -882,10 +882,12 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
882 } 882 }
883 } 883 }
884 884
885 if (!is_drm_iommu_supported(mixer_ctx->drm_dev)) 885 ret = drm_iommu_attach_device_if_possible(mixer_ctx->crtc, drm_dev,
886 return 0; 886 mixer_ctx->dev);
887 if (ret)
888 priv->pipe--;
887 889
888 return drm_iommu_attach_device(mixer_ctx->drm_dev, mixer_ctx->dev); 890 return ret;
889} 891}
890 892
891static void mixer_ctx_remove(struct mixer_context *mixer_ctx) 893static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
@@ -937,8 +939,6 @@ static void mixer_win_commit(struct exynos_drm_crtc *crtc, unsigned int win)
937 vp_video_buffer(mixer_ctx, win); 939 vp_video_buffer(mixer_ctx, win);
938 else 940 else
939 mixer_graph_buffer(mixer_ctx, win); 941 mixer_graph_buffer(mixer_ctx, win);
940
941 mixer_ctx->planes[win].enabled = true;
942} 942}
943 943
944static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win) 944static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
@@ -952,7 +952,6 @@ static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
952 mutex_lock(&mixer_ctx->mixer_mutex); 952 mutex_lock(&mixer_ctx->mixer_mutex);
953 if (!mixer_ctx->powered) { 953 if (!mixer_ctx->powered) {
954 mutex_unlock(&mixer_ctx->mixer_mutex); 954 mutex_unlock(&mixer_ctx->mixer_mutex);
955 mixer_ctx->planes[win].resume = false;
956 return; 955 return;
957 } 956 }
958 mutex_unlock(&mixer_ctx->mixer_mutex); 957 mutex_unlock(&mixer_ctx->mixer_mutex);
@@ -964,8 +963,6 @@ static void mixer_win_disable(struct exynos_drm_crtc *crtc, unsigned int win)
964 963
965 mixer_vsync_set_update(mixer_ctx, true); 964 mixer_vsync_set_update(mixer_ctx, true);
966 spin_unlock_irqrestore(&res->reg_slock, flags); 965 spin_unlock_irqrestore(&res->reg_slock, flags);
967
968 mixer_ctx->planes[win].enabled = false;
969} 966}
970 967
971static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc) 968static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
@@ -1000,36 +997,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_crtc *crtc)
1000 drm_vblank_put(mixer_ctx->drm_dev, mixer_ctx->pipe); 997 drm_vblank_put(mixer_ctx->drm_dev, mixer_ctx->pipe);
1001} 998}
1002 999
1003static void mixer_window_suspend(struct mixer_context *ctx) 1000static void mixer_enable(struct exynos_drm_crtc *crtc)
1004{
1005 struct exynos_drm_plane *plane;
1006 int i;
1007
1008 for (i = 0; i < MIXER_WIN_NR; i++) {
1009 plane = &ctx->planes[i];
1010 plane->resume = plane->enabled;
1011 mixer_win_disable(ctx->crtc, i);
1012 }
1013 mixer_wait_for_vblank(ctx->crtc);
1014}
1015
1016static void mixer_window_resume(struct mixer_context *ctx)
1017{
1018 struct exynos_drm_plane *plane;
1019 int i;
1020
1021 for (i = 0; i < MIXER_WIN_NR; i++) {
1022 plane = &ctx->planes[i];
1023 plane->enabled = plane->resume;
1024 plane->resume = false;
1025 if (plane->enabled)
1026 mixer_win_commit(ctx->crtc, i);
1027 }
1028}
1029
1030static void mixer_poweron(struct mixer_context *ctx)
1031{ 1001{
1002 struct mixer_context *ctx = crtc->ctx;
1032 struct mixer_resources *res = &ctx->mixer_res; 1003 struct mixer_resources *res = &ctx->mixer_res;
1004 int ret;
1033 1005
1034 mutex_lock(&ctx->mixer_mutex); 1006 mutex_lock(&ctx->mixer_mutex);
1035 if (ctx->powered) { 1007 if (ctx->powered) {
@@ -1041,12 +1013,32 @@ static void mixer_poweron(struct mixer_context *ctx)
1041 1013
1042 pm_runtime_get_sync(ctx->dev); 1014 pm_runtime_get_sync(ctx->dev);
1043 1015
1044 clk_prepare_enable(res->mixer); 1016 ret = clk_prepare_enable(res->mixer);
1045 clk_prepare_enable(res->hdmi); 1017 if (ret < 0) {
1018 DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
1019 return;
1020 }
1021 ret = clk_prepare_enable(res->hdmi);
1022 if (ret < 0) {
1023 DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
1024 return;
1025 }
1046 if (ctx->vp_enabled) { 1026 if (ctx->vp_enabled) {
1047 clk_prepare_enable(res->vp); 1027 ret = clk_prepare_enable(res->vp);
1048 if (ctx->has_sclk) 1028 if (ret < 0) {
1049 clk_prepare_enable(res->sclk_mixer); 1029 DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
1030 ret);
1031 return;
1032 }
1033 if (ctx->has_sclk) {
1034 ret = clk_prepare_enable(res->sclk_mixer);
1035 if (ret < 0) {
1036 DRM_ERROR("Failed to prepare_enable the " \
1037 "sclk_mixer clk [%d]\n",
1038 ret);
1039 return;
1040 }
1041 }
1050 } 1042 }
1051 1043
1052 mutex_lock(&ctx->mixer_mutex); 1044 mutex_lock(&ctx->mixer_mutex);
@@ -1057,13 +1049,13 @@ static void mixer_poweron(struct mixer_context *ctx)
1057 1049
1058 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 1050 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
1059 mixer_win_reset(ctx); 1051 mixer_win_reset(ctx);
1060
1061 mixer_window_resume(ctx);
1062} 1052}
1063 1053
1064static void mixer_poweroff(struct mixer_context *ctx) 1054static void mixer_disable(struct exynos_drm_crtc *crtc)
1065{ 1055{
1056 struct mixer_context *ctx = crtc->ctx;
1066 struct mixer_resources *res = &ctx->mixer_res; 1057 struct mixer_resources *res = &ctx->mixer_res;
1058 int i;
1067 1059
1068 mutex_lock(&ctx->mixer_mutex); 1060 mutex_lock(&ctx->mixer_mutex);
1069 if (!ctx->powered) { 1061 if (!ctx->powered) {
@@ -1074,7 +1066,9 @@ static void mixer_poweroff(struct mixer_context *ctx)
1074 1066
1075 mixer_stop(ctx); 1067 mixer_stop(ctx);
1076 mixer_regs_dump(ctx); 1068 mixer_regs_dump(ctx);
1077 mixer_window_suspend(ctx); 1069
1070 for (i = 0; i < MIXER_WIN_NR; i++)
1071 mixer_win_disable(crtc, i);
1078 1072
1079 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 1073 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
1080 1074
@@ -1093,23 +1087,6 @@ static void mixer_poweroff(struct mixer_context *ctx)
1093 pm_runtime_put_sync(ctx->dev); 1087 pm_runtime_put_sync(ctx->dev);
1094} 1088}
1095 1089
1096static void mixer_dpms(struct exynos_drm_crtc *crtc, int mode)
1097{
1098 switch (mode) {
1099 case DRM_MODE_DPMS_ON:
1100 mixer_poweron(crtc->ctx);
1101 break;
1102 case DRM_MODE_DPMS_STANDBY:
1103 case DRM_MODE_DPMS_SUSPEND:
1104 case DRM_MODE_DPMS_OFF:
1105 mixer_poweroff(crtc->ctx);
1106 break;
1107 default:
1108 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
1109 break;
1110 }
1111}
1112
1113/* Only valid for Mixer version 16.0.33.0 */ 1090/* Only valid for Mixer version 16.0.33.0 */
1114int mixer_check_mode(struct drm_display_mode *mode) 1091int mixer_check_mode(struct drm_display_mode *mode)
1115{ 1092{
@@ -1131,7 +1108,8 @@ int mixer_check_mode(struct drm_display_mode *mode)
1131} 1108}
1132 1109
1133static const struct exynos_drm_crtc_ops mixer_crtc_ops = { 1110static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
1134 .dpms = mixer_dpms, 1111 .enable = mixer_enable,
1112 .disable = mixer_disable,
1135 .enable_vblank = mixer_enable_vblank, 1113 .enable_vblank = mixer_enable_vblank,
1136 .disable_vblank = mixer_disable_vblank, 1114 .disable_vblank = mixer_disable_vblank,
1137 .wait_for_vblank = mixer_wait_for_vblank, 1115 .wait_for_vblank = mixer_wait_for_vblank,
@@ -1280,18 +1258,9 @@ static int mixer_probe(struct platform_device *pdev)
1280 1258
1281 platform_set_drvdata(pdev, ctx); 1259 platform_set_drvdata(pdev, ctx);
1282 1260
1283 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
1284 EXYNOS_DISPLAY_TYPE_HDMI);
1285 if (ret)
1286 return ret;
1287
1288 ret = component_add(&pdev->dev, &mixer_component_ops); 1261 ret = component_add(&pdev->dev, &mixer_component_ops);
1289 if (ret) { 1262 if (!ret)
1290 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC); 1263 pm_runtime_enable(dev);
1291 return ret;
1292 }
1293
1294 pm_runtime_enable(dev);
1295 1264
1296 return ret; 1265 return ret;
1297} 1266}
@@ -1301,7 +1270,6 @@ static int mixer_remove(struct platform_device *pdev)
1301 pm_runtime_disable(&pdev->dev); 1270 pm_runtime_disable(&pdev->dev);
1302 1271
1303 component_del(&pdev->dev, &mixer_component_ops); 1272 component_del(&pdev->dev, &mixer_component_ops);
1304 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
1305 1273
1306 return 0; 1274 return 0;
1307} 1275}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 9605ff8f2fcd..306d9e4e5cf3 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -123,7 +123,7 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
123 CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ), 123 CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
124 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ), 124 CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
125 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W, 125 CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
126 .reg = { .offset = 1, .mask = 0x007FFFFC } ), 126 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
127 CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B, 127 CMD( MI_STORE_REGISTER_MEM(1), SMI, !F, 0xFF, W | B,
128 .reg = { .offset = 1, .mask = 0x007FFFFC }, 128 .reg = { .offset = 1, .mask = 0x007FFFFC },
129 .bits = {{ 129 .bits = {{
@@ -395,16 +395,38 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
395 395
396/* 396/*
397 * Register whitelists, sorted by increasing register offset. 397 * Register whitelists, sorted by increasing register offset.
398 */
399
400/*
401 * An individual whitelist entry granting access to register addr. If
402 * mask is non-zero the argument of immediate register writes will be
403 * AND-ed with mask, and the command will be rejected if the result
404 * doesn't match value.
405 *
406 * Registers with non-zero mask are only allowed to be written using
407 * LRI.
408 */
409struct drm_i915_reg_descriptor {
410 u32 addr;
411 u32 mask;
412 u32 value;
413};
414
415/* Convenience macro for adding 32-bit registers. */
416#define REG32(address, ...) \
417 { .addr = address, __VA_ARGS__ }
418
419/*
420 * Convenience macro for adding 64-bit registers.
398 * 421 *
399 * Some registers that userspace accesses are 64 bits. The register 422 * Some registers that userspace accesses are 64 bits. The register
400 * access commands only allow 32-bit accesses. Hence, we have to include 423 * access commands only allow 32-bit accesses. Hence, we have to include
401 * entries for both halves of the 64-bit registers. 424 * entries for both halves of the 64-bit registers.
402 */ 425 */
426#define REG64(addr) \
427 REG32(addr), REG32(addr + sizeof(u32))
403 428
404/* Convenience macro for adding 64-bit registers */ 429static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
405#define REG64(addr) (addr), (addr + sizeof(u32))
406
407static const u32 gen7_render_regs[] = {
408 REG64(GPGPU_THREADS_DISPATCHED), 430 REG64(GPGPU_THREADS_DISPATCHED),
409 REG64(HS_INVOCATION_COUNT), 431 REG64(HS_INVOCATION_COUNT),
410 REG64(DS_INVOCATION_COUNT), 432 REG64(DS_INVOCATION_COUNT),
@@ -417,15 +439,15 @@ static const u32 gen7_render_regs[] = {
417 REG64(CL_PRIMITIVES_COUNT), 439 REG64(CL_PRIMITIVES_COUNT),
418 REG64(PS_INVOCATION_COUNT), 440 REG64(PS_INVOCATION_COUNT),
419 REG64(PS_DEPTH_COUNT), 441 REG64(PS_DEPTH_COUNT),
420 OACONTROL, /* Only allowed for LRI and SRM. See below. */ 442 REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */
421 REG64(MI_PREDICATE_SRC0), 443 REG64(MI_PREDICATE_SRC0),
422 REG64(MI_PREDICATE_SRC1), 444 REG64(MI_PREDICATE_SRC1),
423 GEN7_3DPRIM_END_OFFSET, 445 REG32(GEN7_3DPRIM_END_OFFSET),
424 GEN7_3DPRIM_START_VERTEX, 446 REG32(GEN7_3DPRIM_START_VERTEX),
425 GEN7_3DPRIM_VERTEX_COUNT, 447 REG32(GEN7_3DPRIM_VERTEX_COUNT),
426 GEN7_3DPRIM_INSTANCE_COUNT, 448 REG32(GEN7_3DPRIM_INSTANCE_COUNT),
427 GEN7_3DPRIM_START_INSTANCE, 449 REG32(GEN7_3DPRIM_START_INSTANCE),
428 GEN7_3DPRIM_BASE_VERTEX, 450 REG32(GEN7_3DPRIM_BASE_VERTEX),
429 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)), 451 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
430 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)), 452 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
431 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)), 453 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
@@ -434,33 +456,41 @@ static const u32 gen7_render_regs[] = {
434 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)), 456 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(1)),
435 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)), 457 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(2)),
436 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)), 458 REG64(GEN7_SO_PRIM_STORAGE_NEEDED(3)),
437 GEN7_SO_WRITE_OFFSET(0), 459 REG32(GEN7_SO_WRITE_OFFSET(0)),
438 GEN7_SO_WRITE_OFFSET(1), 460 REG32(GEN7_SO_WRITE_OFFSET(1)),
439 GEN7_SO_WRITE_OFFSET(2), 461 REG32(GEN7_SO_WRITE_OFFSET(2)),
440 GEN7_SO_WRITE_OFFSET(3), 462 REG32(GEN7_SO_WRITE_OFFSET(3)),
441 GEN7_L3SQCREG1, 463 REG32(GEN7_L3SQCREG1),
442 GEN7_L3CNTLREG2, 464 REG32(GEN7_L3CNTLREG2),
443 GEN7_L3CNTLREG3, 465 REG32(GEN7_L3CNTLREG3),
466 REG32(HSW_SCRATCH1,
467 .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE,
468 .value = 0),
469 REG32(HSW_ROW_CHICKEN3,
470 .mask = ~(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE << 16 |
471 HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
472 .value = 0),
444}; 473};
445 474
446static const u32 gen7_blt_regs[] = { 475static const struct drm_i915_reg_descriptor gen7_blt_regs[] = {
447 BCS_SWCTRL, 476 REG32(BCS_SWCTRL),
448}; 477};
449 478
450static const u32 ivb_master_regs[] = { 479static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
451 FORCEWAKE_MT, 480 REG32(FORCEWAKE_MT),
452 DERRMR, 481 REG32(DERRMR),
453 GEN7_PIPE_DE_LOAD_SL(PIPE_A), 482 REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
454 GEN7_PIPE_DE_LOAD_SL(PIPE_B), 483 REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
455 GEN7_PIPE_DE_LOAD_SL(PIPE_C), 484 REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
456}; 485};
457 486
458static const u32 hsw_master_regs[] = { 487static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
459 FORCEWAKE_MT, 488 REG32(FORCEWAKE_MT),
460 DERRMR, 489 REG32(DERRMR),
461}; 490};
462 491
463#undef REG64 492#undef REG64
493#undef REG32
464 494
465static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) 495static u32 gen7_render_get_cmd_length_mask(u32 cmd_header)
466{ 496{
@@ -550,14 +580,16 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
550 return ret; 580 return ret;
551} 581}
552 582
553static bool check_sorted(int ring_id, const u32 *reg_table, int reg_count) 583static bool check_sorted(int ring_id,
584 const struct drm_i915_reg_descriptor *reg_table,
585 int reg_count)
554{ 586{
555 int i; 587 int i;
556 u32 previous = 0; 588 u32 previous = 0;
557 bool ret = true; 589 bool ret = true;
558 590
559 for (i = 0; i < reg_count; i++) { 591 for (i = 0; i < reg_count; i++) {
560 u32 curr = reg_table[i]; 592 u32 curr = reg_table[i].addr;
561 593
562 if (curr < previous) { 594 if (curr < previous) {
563 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n", 595 DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
@@ -804,18 +836,20 @@ find_cmd(struct intel_engine_cs *ring,
804 return default_desc; 836 return default_desc;
805} 837}
806 838
807static bool valid_reg(const u32 *table, int count, u32 addr) 839static const struct drm_i915_reg_descriptor *
840find_reg(const struct drm_i915_reg_descriptor *table,
841 int count, u32 addr)
808{ 842{
809 if (table && count != 0) { 843 if (table) {
810 int i; 844 int i;
811 845
812 for (i = 0; i < count; i++) { 846 for (i = 0; i < count; i++) {
813 if (table[i] == addr) 847 if (table[i].addr == addr)
814 return true; 848 return &table[i];
815 } 849 }
816 } 850 }
817 851
818 return false; 852 return NULL;
819} 853}
820 854
821static u32 *vmap_batch(struct drm_i915_gem_object *obj, 855static u32 *vmap_batch(struct drm_i915_gem_object *obj,
@@ -934,7 +968,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
934 968
935static bool check_cmd(const struct intel_engine_cs *ring, 969static bool check_cmd(const struct intel_engine_cs *ring,
936 const struct drm_i915_cmd_descriptor *desc, 970 const struct drm_i915_cmd_descriptor *desc,
937 const u32 *cmd, 971 const u32 *cmd, u32 length,
938 const bool is_master, 972 const bool is_master,
939 bool *oacontrol_set) 973 bool *oacontrol_set)
940{ 974{
@@ -950,38 +984,70 @@ static bool check_cmd(const struct intel_engine_cs *ring,
950 } 984 }
951 985
952 if (desc->flags & CMD_DESC_REGISTER) { 986 if (desc->flags & CMD_DESC_REGISTER) {
953 u32 reg_addr = cmd[desc->reg.offset] & desc->reg.mask;
954
955 /* 987 /*
956 * OACONTROL requires some special handling for writes. We 988 * Get the distance between individual register offset
957 * want to make sure that any batch which enables OA also 989 * fields if the command can perform more than one
958 * disables it before the end of the batch. The goal is to 990 * access at a time.
959 * prevent one process from snooping on the perf data from
960 * another process. To do that, we need to check the value
961 * that will be written to the register. Hence, limit
962 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
963 */ 991 */
964 if (reg_addr == OACONTROL) { 992 const u32 step = desc->reg.step ? desc->reg.step : length;
965 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) { 993 u32 offset;
966 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n"); 994
995 for (offset = desc->reg.offset; offset < length;
996 offset += step) {
997 const u32 reg_addr = cmd[offset] & desc->reg.mask;
998 const struct drm_i915_reg_descriptor *reg =
999 find_reg(ring->reg_table, ring->reg_count,
1000 reg_addr);
1001
1002 if (!reg && is_master)
1003 reg = find_reg(ring->master_reg_table,
1004 ring->master_reg_count,
1005 reg_addr);
1006
1007 if (!reg) {
1008 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
1009 reg_addr, *cmd, ring->id);
967 return false; 1010 return false;
968 } 1011 }
969 1012
970 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) 1013 /*
971 *oacontrol_set = (cmd[2] != 0); 1014 * OACONTROL requires some special handling for
972 } 1015 * writes. We want to make sure that any batch which
1016 * enables OA also disables it before the end of the
1017 * batch. The goal is to prevent one process from
1018 * snooping on the perf data from another process. To do
1019 * that, we need to check the value that will be written
1020 * to the register. Hence, limit OACONTROL writes to
1021 * only MI_LOAD_REGISTER_IMM commands.
1022 */
1023 if (reg_addr == OACONTROL) {
1024 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
1025 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
1026 return false;
1027 }
1028
1029 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
1030 *oacontrol_set = (cmd[offset + 1] != 0);
1031 }
973 1032
974 if (!valid_reg(ring->reg_table, 1033 /*
975 ring->reg_count, reg_addr)) { 1034 * Check the value written to the register against the
976 if (!is_master || 1035 * allowed mask/value pair given in the whitelist entry.
977 !valid_reg(ring->master_reg_table, 1036 */
978 ring->master_reg_count, 1037 if (reg->mask) {
979 reg_addr)) { 1038 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
980 DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n", 1039 DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
981 reg_addr, 1040 reg_addr);
982 *cmd, 1041 return false;
983 ring->id); 1042 }
984 return false; 1043
1044 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
1045 (offset + 2 > length ||
1046 (cmd[offset + 1] & reg->mask) != reg->value)) {
1047 DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
1048 reg_addr);
1049 return false;
1050 }
985 } 1051 }
986 } 1052 }
987 } 1053 }
@@ -1105,7 +1171,8 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1105 break; 1171 break;
1106 } 1172 }
1107 1173
1108 if (!check_cmd(ring, desc, cmd, is_master, &oacontrol_set)) { 1174 if (!check_cmd(ring, desc, cmd, length, is_master,
1175 &oacontrol_set)) {
1109 ret = -EINVAL; 1176 ret = -EINVAL;
1110 break; 1177 break;
1111 } 1178 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 88cc793c46d3..82bbe3f2a7e1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1725,12 +1725,15 @@ static int i915_sr_status(struct seq_file *m, void *unused)
1725 1725
1726 if (HAS_PCH_SPLIT(dev)) 1726 if (HAS_PCH_SPLIT(dev))
1727 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 1727 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1728 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 1728 else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
1729 IS_I945G(dev) || IS_I945GM(dev))
1729 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 1730 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1730 else if (IS_I915GM(dev)) 1731 else if (IS_I915GM(dev))
1731 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; 1732 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1732 else if (IS_PINEVIEW(dev)) 1733 else if (IS_PINEVIEW(dev))
1733 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; 1734 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1735 else if (IS_VALLEYVIEW(dev))
1736 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1734 1737
1735 intel_runtime_pm_put(dev_priv); 1738 intel_runtime_pm_put(dev_priv);
1736 1739
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 72f5a3f9dbf2..542fac628b28 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2300,10 +2300,15 @@ struct drm_i915_cmd_descriptor {
2300 * Describes where to find a register address in the command to check 2300 * Describes where to find a register address in the command to check
2301 * against the ring's register whitelist. Only valid if flags has the 2301 * against the ring's register whitelist. Only valid if flags has the
2302 * CMD_DESC_REGISTER bit set. 2302 * CMD_DESC_REGISTER bit set.
2303 *
2304 * A non-zero step value implies that the command may access multiple
2305 * registers in sequence (e.g. LRI), in that case step gives the
2306 * distance in dwords between individual offset fields.
2303 */ 2307 */
2304 struct { 2308 struct {
2305 u32 offset; 2309 u32 offset;
2306 u32 mask; 2310 u32 mask;
2311 u32 step;
2307 } reg; 2312 } reg;
2308 2313
2309#define MAX_CMD_DESC_BITMASKS 3 2314#define MAX_CMD_DESC_BITMASKS 3
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index be35f0486202..248fd1ac7b3a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2813,9 +2813,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2813{ 2813{
2814 WARN_ON(i915_verify_lists(ring->dev)); 2814 WARN_ON(i915_verify_lists(ring->dev));
2815 2815
2816 if (list_empty(&ring->active_list))
2817 return;
2818
2819 /* Retire requests first as we use it above for the early return. 2816 /* Retire requests first as we use it above for the early return.
2820 * If we retire requests last, we may use a later seqno and so clear 2817 * If we retire requests last, we may use a later seqno and so clear
2821 * the requests lists without clearing the active list, leading to 2818 * the requests lists without clearing the active list, leading to
@@ -3241,8 +3238,8 @@ int i915_vma_unbind(struct i915_vma *vma)
3241 } else if (vma->ggtt_view.pages) { 3238 } else if (vma->ggtt_view.pages) {
3242 sg_free_table(vma->ggtt_view.pages); 3239 sg_free_table(vma->ggtt_view.pages);
3243 kfree(vma->ggtt_view.pages); 3240 kfree(vma->ggtt_view.pages);
3244 vma->ggtt_view.pages = NULL;
3245 } 3241 }
3242 vma->ggtt_view.pages = NULL;
3246 } 3243 }
3247 3244
3248 drm_mm_remove_node(&vma->node); 3245 drm_mm_remove_node(&vma->node);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8867818b1401..d65cbe6afb92 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -157,9 +157,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
157 struct drm_i915_gem_object *obj; 157 struct drm_i915_gem_object *obj;
158 int ret; 158 int ret;
159 159
160 obj = i915_gem_object_create_stolen(dev, size); 160 obj = i915_gem_alloc_object(dev, size);
161 if (obj == NULL)
162 obj = i915_gem_alloc_object(dev, size);
163 if (obj == NULL) 161 if (obj == NULL)
164 return ERR_PTR(-ENOMEM); 162 return ERR_PTR(-ENOMEM);
165 163
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index bd0e4bda2c64..a7fa14516cda 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -32,6 +32,7 @@
32#include "i915_trace.h" 32#include "i915_trace.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35#include <linux/uaccess.h>
35 36
36#define __EXEC_OBJECT_HAS_PIN (1<<31) 37#define __EXEC_OBJECT_HAS_PIN (1<<31)
37#define __EXEC_OBJECT_HAS_FENCE (1<<30) 38#define __EXEC_OBJECT_HAS_FENCE (1<<30)
@@ -458,7 +459,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
458 } 459 }
459 460
460 /* We can't wait for rendering with pagefaults disabled */ 461 /* We can't wait for rendering with pagefaults disabled */
461 if (obj->active && in_atomic()) 462 if (obj->active && pagefault_disabled())
462 return -EFAULT; 463 return -EFAULT;
463 464
464 if (use_cpu_reloc(obj)) 465 if (use_cpu_reloc(obj))
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 619dad1b2386..dcc6a88c560e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -516,17 +516,17 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
516 struct page *page_table; 516 struct page *page_table;
517 517
518 if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) 518 if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
519 continue; 519 break;
520 520
521 pd = ppgtt->pdp.page_directory[pdpe]; 521 pd = ppgtt->pdp.page_directory[pdpe];
522 522
523 if (WARN_ON(!pd->page_table[pde])) 523 if (WARN_ON(!pd->page_table[pde]))
524 continue; 524 break;
525 525
526 pt = pd->page_table[pde]; 526 pt = pd->page_table[pde];
527 527
528 if (WARN_ON(!pt->page)) 528 if (WARN_ON(!pt->page))
529 continue; 529 break;
530 530
531 page_table = pt->page; 531 page_table = pt->page;
532 532
@@ -2546,6 +2546,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
2546 struct drm_i915_private *dev_priv = dev->dev_private; 2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547 struct drm_i915_gem_object *obj; 2547 struct drm_i915_gem_object *obj;
2548 struct i915_address_space *vm; 2548 struct i915_address_space *vm;
2549 struct i915_vma *vma;
2550 bool flush;
2549 2551
2550 i915_check_and_clear_faults(dev); 2552 i915_check_and_clear_faults(dev);
2551 2553
@@ -2555,16 +2557,23 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
2555 dev_priv->gtt.base.total, 2557 dev_priv->gtt.base.total,
2556 true); 2558 true);
2557 2559
2560 /* Cache flush objects bound into GGTT and rebind them. */
2561 vm = &dev_priv->gtt.base;
2558 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 2562 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2559 struct i915_vma *vma = i915_gem_obj_to_vma(obj, 2563 flush = false;
2560 &dev_priv->gtt.base); 2564 list_for_each_entry(vma, &obj->vma_list, vma_link) {
2561 if (!vma) 2565 if (vma->vm != vm)
2562 continue; 2566 continue;
2563 2567
2564 i915_gem_clflush_object(obj, obj->pin_display); 2568 WARN_ON(i915_vma_bind(vma, obj->cache_level,
2565 WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); 2569 PIN_UPDATE));
2566 }
2567 2570
2571 flush = true;
2572 }
2573
2574 if (flush)
2575 i915_gem_clflush_object(obj, obj->pin_display);
2576 }
2568 2577
2569 if (INTEL_INFO(dev)->gen >= 8) { 2578 if (INTEL_INFO(dev)->gen >= 8) {
2570 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 2579 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..d61e74a08f82 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -183,8 +183,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
183 if (IS_GEN4(dev)) { 183 if (IS_GEN4(dev)) {
184 uint32_t ddc2 = I915_READ(DCC2); 184 uint32_t ddc2 = I915_READ(DCC2);
185 185
186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) 186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
187 /* Since the swizzling may vary within an
188 * object, we have no idea what the swizzling
189 * is for any page in particular. Thus we
190 * cannot migrate tiled pages using the GPU,
191 * nor can we tell userspace what the exact
192 * swizzling is for any object.
193 */
187 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; 194 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
195 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
196 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
197 }
188 } 198 }
189 199
190 if (dcc == 0xffffffff) { 200 if (dcc == 0xffffffff) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6d3fead3a358..2030f602cbf8 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3491,6 +3491,7 @@ enum skl_disp_power_wells {
3491#define BLM_POLARITY_PNV (1 << 0) /* pnv only */ 3491#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
3492 3492
3493#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260) 3493#define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260)
3494#define BLM_HISTOGRAM_ENABLE (1 << 31)
3494 3495
3495/* New registers for PCH-split platforms. Safe where new bits show up, the 3496/* New registers for PCH-split platforms. Safe where new bits show up, the
3496 * register layout machtes with gen4 BLC_PWM_CTL[12]. */ 3497 * register layout machtes with gen4 BLC_PWM_CTL[12]. */
@@ -6941,6 +6942,9 @@ enum skl_disp_power_wells {
6941#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4)) 6942#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
6942#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4)) 6943#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
6943 6944
6945#define HSW_AUD_CHICKENBIT 0x65f10
6946#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
6947
6944/* HSW Power Wells */ 6948/* HSW Power Wells */
6945#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ 6949#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
6946#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ 6950#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index c4312177b0ee..3da9b8409f20 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -470,6 +470,32 @@ static void i915_audio_component_put_power(struct device *dev)
470 intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO); 470 intel_display_power_put(dev_to_i915(dev), POWER_DOMAIN_AUDIO);
471} 471}
472 472
473static void i915_audio_component_codec_wake_override(struct device *dev,
474 bool enable)
475{
476 struct drm_i915_private *dev_priv = dev_to_i915(dev);
477 u32 tmp;
478
479 if (!IS_SKYLAKE(dev_priv))
480 return;
481
482 /*
483 * Enable/disable generating the codec wake signal, overriding the
484 * internal logic to generate the codec wake to controller.
485 */
486 tmp = I915_READ(HSW_AUD_CHICKENBIT);
487 tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
488 I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
489 usleep_range(1000, 1500);
490
491 if (enable) {
492 tmp = I915_READ(HSW_AUD_CHICKENBIT);
493 tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
494 I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
495 usleep_range(1000, 1500);
496 }
497}
498
473/* Get CDCLK in kHz */ 499/* Get CDCLK in kHz */
474static int i915_audio_component_get_cdclk_freq(struct device *dev) 500static int i915_audio_component_get_cdclk_freq(struct device *dev)
475{ 501{
@@ -491,6 +517,7 @@ static const struct i915_audio_component_ops i915_audio_component_ops = {
491 .owner = THIS_MODULE, 517 .owner = THIS_MODULE,
492 .get_power = i915_audio_component_get_power, 518 .get_power = i915_audio_component_get_power,
493 .put_power = i915_audio_component_put_power, 519 .put_power = i915_audio_component_put_power,
520 .codec_wake_override = i915_audio_component_codec_wake_override,
494 .get_cdclk_freq = i915_audio_component_get_cdclk_freq, 521 .get_cdclk_freq = i915_audio_component_get_cdclk_freq,
495}; 522};
496 523
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4e3f302d86f7..ba9321998a41 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -87,7 +87,8 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
87 struct intel_crtc_state *pipe_config); 87 struct intel_crtc_state *pipe_config);
88 88
89static int intel_set_mode(struct drm_crtc *crtc, 89static int intel_set_mode(struct drm_crtc *crtc,
90 struct drm_atomic_state *state); 90 struct drm_atomic_state *state,
91 bool force_restore);
91static int intel_framebuffer_init(struct drm_device *dev, 92static int intel_framebuffer_init(struct drm_device *dev,
92 struct intel_framebuffer *ifb, 93 struct intel_framebuffer *ifb,
93 struct drm_mode_fb_cmd2 *mode_cmd, 94 struct drm_mode_fb_cmd2 *mode_cmd,
@@ -4853,6 +4854,9 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4853 struct intel_plane *intel_plane; 4854 struct intel_plane *intel_plane;
4854 int pipe = intel_crtc->pipe; 4855 int pipe = intel_crtc->pipe;
4855 4856
4857 if (!intel_crtc->active)
4858 return;
4859
4856 intel_crtc_wait_for_pending_flips(crtc); 4860 intel_crtc_wait_for_pending_flips(crtc);
4857 4861
4858 intel_pre_disable_primary(crtc); 4862 intel_pre_disable_primary(crtc);
@@ -7886,7 +7890,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7886 int pipe = pipe_config->cpu_transcoder; 7890 int pipe = pipe_config->cpu_transcoder;
7887 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7891 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7888 intel_clock_t clock; 7892 intel_clock_t clock;
7889 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2; 7893 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7890 int refclk = 100000; 7894 int refclk = 100000;
7891 7895
7892 mutex_lock(&dev_priv->sb_lock); 7896 mutex_lock(&dev_priv->sb_lock);
@@ -7894,10 +7898,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7894 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7898 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7895 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7899 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7896 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7900 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7901 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7897 mutex_unlock(&dev_priv->sb_lock); 7902 mutex_unlock(&dev_priv->sb_lock);
7898 7903
7899 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7904 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7900 clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff); 7905 clock.m2 = (pll_dw0 & 0xff) << 22;
7906 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7907 clock.m2 |= pll_dw2 & 0x3fffff;
7901 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7908 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7902 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7909 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7903 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7910 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
@@ -10096,7 +10103,7 @@ retry:
10096 10103
10097 drm_mode_copy(&crtc_state->base.mode, mode); 10104 drm_mode_copy(&crtc_state->base.mode, mode);
10098 10105
10099 if (intel_set_mode(crtc, state)) { 10106 if (intel_set_mode(crtc, state, true)) {
10100 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 10107 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10101 if (old->release_fb) 10108 if (old->release_fb)
10102 old->release_fb->funcs->destroy(old->release_fb); 10109 old->release_fb->funcs->destroy(old->release_fb);
@@ -10170,7 +10177,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
10170 if (ret) 10177 if (ret)
10171 goto fail; 10178 goto fail;
10172 10179
10173 ret = intel_set_mode(crtc, state); 10180 ret = intel_set_mode(crtc, state, true);
10174 if (ret) 10181 if (ret)
10175 goto fail; 10182 goto fail;
10176 10183
@@ -11385,10 +11392,6 @@ static void intel_modeset_fixup_state(struct drm_atomic_state *state)
11385 crtc->base.enabled = crtc->base.state->enable; 11392 crtc->base.enabled = crtc->base.state->enable;
11386 crtc->config = to_intel_crtc_state(crtc->base.state); 11393 crtc->config = to_intel_crtc_state(crtc->base.state);
11387 } 11394 }
11388
11389 /* Copy the new configuration to the staged state, to keep the few
11390 * pieces of code that haven't been converted yet happy */
11391 intel_modeset_update_staged_output_state(state->dev);
11392} 11395}
11393 11396
11394static void 11397static void
@@ -11870,15 +11873,15 @@ intel_modeset_update_state(struct drm_atomic_state *state)
11870 if (!intel_encoder->base.crtc) 11873 if (!intel_encoder->base.crtc)
11871 continue; 11874 continue;
11872 11875
11873 for_each_crtc_in_state(state, crtc, crtc_state, i) 11876 for_each_crtc_in_state(state, crtc, crtc_state, i) {
11874 if (crtc == intel_encoder->base.crtc) 11877 if (crtc != intel_encoder->base.crtc)
11875 break; 11878 continue;
11876 11879
11877 if (crtc != intel_encoder->base.crtc) 11880 if (crtc_state->enable && needs_modeset(crtc_state))
11878 continue; 11881 intel_encoder->connectors_active = false;
11879 11882
11880 if (crtc_state->enable && needs_modeset(crtc_state)) 11883 break;
11881 intel_encoder->connectors_active = false; 11884 }
11882 } 11885 }
11883 11886
11884 drm_atomic_helper_swap_state(state->dev, state); 11887 drm_atomic_helper_swap_state(state->dev, state);
@@ -11893,24 +11896,24 @@ intel_modeset_update_state(struct drm_atomic_state *state)
11893 if (!connector->encoder || !connector->encoder->crtc) 11896 if (!connector->encoder || !connector->encoder->crtc)
11894 continue; 11897 continue;
11895 11898
11896 for_each_crtc_in_state(state, crtc, crtc_state, i) 11899 for_each_crtc_in_state(state, crtc, crtc_state, i) {
11897 if (crtc == connector->encoder->crtc) 11900 if (crtc != connector->encoder->crtc)
11898 break; 11901 continue;
11899 11902
11900 if (crtc != connector->encoder->crtc) 11903 if (crtc->state->enable && needs_modeset(crtc->state)) {
11901 continue; 11904 struct drm_property *dpms_property =
11905 dev->mode_config.dpms_property;
11902 11906
11903 if (crtc->state->enable && needs_modeset(crtc->state)) { 11907 connector->dpms = DRM_MODE_DPMS_ON;
11904 struct drm_property *dpms_property = 11908 drm_object_property_set_value(&connector->base,
11905 dev->mode_config.dpms_property; 11909 dpms_property,
11910 DRM_MODE_DPMS_ON);
11906 11911
11907 connector->dpms = DRM_MODE_DPMS_ON; 11912 intel_encoder = to_intel_encoder(connector->encoder);
11908 drm_object_property_set_value(&connector->base, 11913 intel_encoder->connectors_active = true;
11909 dpms_property, 11914 }
11910 DRM_MODE_DPMS_ON);
11911 11915
11912 intel_encoder = to_intel_encoder(connector->encoder); 11916 break;
11913 intel_encoder->connectors_active = true;
11914 } 11917 }
11915 } 11918 }
11916 11919
@@ -12646,20 +12649,24 @@ static int __intel_set_mode(struct drm_crtc *modeset_crtc,
12646} 12649}
12647 12650
12648static int intel_set_mode_with_config(struct drm_crtc *crtc, 12651static int intel_set_mode_with_config(struct drm_crtc *crtc,
12649 struct intel_crtc_state *pipe_config) 12652 struct intel_crtc_state *pipe_config,
12653 bool force_restore)
12650{ 12654{
12651 int ret; 12655 int ret;
12652 12656
12653 ret = __intel_set_mode(crtc, pipe_config); 12657 ret = __intel_set_mode(crtc, pipe_config);
12654 12658
12655 if (ret == 0) 12659 if (ret == 0 && force_restore) {
12660 intel_modeset_update_staged_output_state(crtc->dev);
12656 intel_modeset_check_state(crtc->dev); 12661 intel_modeset_check_state(crtc->dev);
12662 }
12657 12663
12658 return ret; 12664 return ret;
12659} 12665}
12660 12666
12661static int intel_set_mode(struct drm_crtc *crtc, 12667static int intel_set_mode(struct drm_crtc *crtc,
12662 struct drm_atomic_state *state) 12668 struct drm_atomic_state *state,
12669 bool force_restore)
12663{ 12670{
12664 struct intel_crtc_state *pipe_config; 12671 struct intel_crtc_state *pipe_config;
12665 int ret = 0; 12672 int ret = 0;
@@ -12670,7 +12677,7 @@ static int intel_set_mode(struct drm_crtc *crtc,
12670 goto out; 12677 goto out;
12671 } 12678 }
12672 12679
12673 ret = intel_set_mode_with_config(crtc, pipe_config); 12680 ret = intel_set_mode_with_config(crtc, pipe_config, force_restore);
12674 if (ret) 12681 if (ret)
12675 goto out; 12682 goto out;
12676 12683
@@ -12682,7 +12689,6 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
12682{ 12689{
12683 struct drm_device *dev = crtc->dev; 12690 struct drm_device *dev = crtc->dev;
12684 struct drm_atomic_state *state; 12691 struct drm_atomic_state *state;
12685 struct intel_crtc *intel_crtc;
12686 struct intel_encoder *encoder; 12692 struct intel_encoder *encoder;
12687 struct intel_connector *connector; 12693 struct intel_connector *connector;
12688 struct drm_connector_state *connector_state; 12694 struct drm_connector_state *connector_state;
@@ -12725,29 +12731,23 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
12725 } 12731 }
12726 } 12732 }
12727 12733
12728 for_each_intel_crtc(dev, intel_crtc) { 12734 crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
12729 if (intel_crtc->new_enabled == intel_crtc->base.enabled) 12735 if (IS_ERR(crtc_state)) {
12730 continue; 12736 DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
12731 12737 crtc->base.id, PTR_ERR(crtc_state));
12732 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 12738 drm_atomic_state_free(state);
12733 if (IS_ERR(crtc_state)) { 12739 return;
12734 DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n", 12740 }
12735 intel_crtc->base.base.id,
12736 PTR_ERR(crtc_state));
12737 continue;
12738 }
12739 12741
12740 crtc_state->base.active = crtc_state->base.enable = 12742 crtc_state->base.active = crtc_state->base.enable =
12741 intel_crtc->new_enabled; 12743 to_intel_crtc(crtc)->new_enabled;
12742 12744
12743 if (&intel_crtc->base == crtc) 12745 drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
12744 drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
12745 }
12746 12746
12747 intel_modeset_setup_plane_state(state, crtc, &crtc->mode, 12747 intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
12748 crtc->primary->fb, crtc->x, crtc->y); 12748 crtc->primary->fb, crtc->x, crtc->y);
12749 12749
12750 ret = intel_set_mode(crtc, state); 12750 ret = intel_set_mode(crtc, state, false);
12751 if (ret) 12751 if (ret)
12752 drm_atomic_state_free(state); 12752 drm_atomic_state_free(state);
12753} 12753}
@@ -12947,7 +12947,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
12947 12947
12948 primary_plane_was_visible = primary_plane_visible(set->crtc); 12948 primary_plane_was_visible = primary_plane_visible(set->crtc);
12949 12949
12950 ret = intel_set_mode_with_config(set->crtc, pipe_config); 12950 ret = intel_set_mode_with_config(set->crtc, pipe_config, true);
12951 12951
12952 if (ret == 0 && 12952 if (ret == 0 &&
12953 pipe_config->base.enable && 12953 pipe_config->base.enable &&
@@ -13276,7 +13276,7 @@ intel_check_primary_plane(struct drm_plane *plane,
13276 if (ret) 13276 if (ret)
13277 return ret; 13277 return ret;
13278 13278
13279 if (intel_crtc->active) { 13279 if (crtc_state->base.active) {
13280 struct intel_plane_state *old_state = 13280 struct intel_plane_state *old_state =
13281 to_intel_plane_state(plane->state); 13281 to_intel_plane_state(plane->state);
13282 13282
@@ -13309,6 +13309,16 @@ intel_check_primary_plane(struct drm_plane *plane,
13309 intel_crtc->atomic.wait_vblank = true; 13309 intel_crtc->atomic.wait_vblank = true;
13310 } 13310 }
13311 13311
13312 /*
13313 * FIXME: Actually if we will still have any other plane enabled
13314 * on the pipe we could let IPS enabled still, but for
13315 * now lets consider that when we make primary invisible
13316 * by setting DSPCNTR to 0 on update_primary_plane function
13317 * IPS needs to be disable.
13318 */
13319 if (!state->visible || !fb)
13320 intel_crtc->atomic.disable_ips = true;
13321
13312 intel_crtc->atomic.fb_bits |= 13322 intel_crtc->atomic.fb_bits |=
13313 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 13323 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
13314 13324
@@ -13406,6 +13416,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc)
13406 if (intel_crtc->atomic.disable_fbc) 13416 if (intel_crtc->atomic.disable_fbc)
13407 intel_fbc_disable(dev); 13417 intel_fbc_disable(dev);
13408 13418
13419 if (intel_crtc->atomic.disable_ips)
13420 hsw_disable_ips(intel_crtc);
13421
13409 if (intel_crtc->atomic.pre_disable_primary) 13422 if (intel_crtc->atomic.pre_disable_primary)
13410 intel_pre_disable_primary(crtc); 13423 intel_pre_disable_primary(crtc);
13411 13424
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 280c282da9bd..6e8faa253792 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -893,10 +893,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
893 continue; 893 continue;
894 } 894 }
895 if (status & DP_AUX_CH_CTL_DONE) 895 if (status & DP_AUX_CH_CTL_DONE)
896 break; 896 goto done;
897 } 897 }
898 if (status & DP_AUX_CH_CTL_DONE)
899 break;
900 } 898 }
901 899
902 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 900 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
@@ -905,6 +903,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
905 goto out; 903 goto out;
906 } 904 }
907 905
906done:
908 /* Check for timeout or receive error. 907 /* Check for timeout or receive error.
909 * Timeouts occur when the sink is not connected 908 * Timeouts occur when the sink is not connected
910 */ 909 */
@@ -1141,6 +1140,9 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
1141static void 1140static void
1142hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw) 1141hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
1143{ 1142{
1143 memset(&pipe_config->dpll_hw_state, 0,
1144 sizeof(pipe_config->dpll_hw_state));
1145
1144 switch (link_bw) { 1146 switch (link_bw) {
1145 case DP_LINK_BW_1_62: 1147 case DP_LINK_BW_1_62:
1146 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; 1148 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2afb31a46275..105928382e21 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -485,6 +485,7 @@ struct intel_crtc_atomic_commit {
485 /* Sleepable operations to perform before commit */ 485 /* Sleepable operations to perform before commit */
486 bool wait_for_flips; 486 bool wait_for_flips;
487 bool disable_fbc; 487 bool disable_fbc;
488 bool disable_ips;
488 bool pre_disable_primary; 489 bool pre_disable_primary;
489 bool update_wm; 490 bool update_wm;
490 unsigned disabled_planes; 491 unsigned disabled_planes;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 92072f56e418..a64f26c670af 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -486,7 +486,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
486 struct intel_gmbus, 486 struct intel_gmbus,
487 adapter); 487 adapter);
488 struct drm_i915_private *dev_priv = bus->dev_priv; 488 struct drm_i915_private *dev_priv = bus->dev_priv;
489 int i, reg_offset; 489 int i = 0, inc, try = 0, reg_offset;
490 int ret = 0; 490 int ret = 0;
491 491
492 intel_aux_display_runtime_get(dev_priv); 492 intel_aux_display_runtime_get(dev_priv);
@@ -499,12 +499,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
499 499
500 reg_offset = dev_priv->gpio_mmio_base; 500 reg_offset = dev_priv->gpio_mmio_base;
501 501
502retry:
502 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 503 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
503 504
504 for (i = 0; i < num; i++) { 505 for (; i < num; i += inc) {
506 inc = 1;
505 if (gmbus_is_index_read(msgs, i, num)) { 507 if (gmbus_is_index_read(msgs, i, num)) {
506 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); 508 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
507 i += 1; /* set i to the index of the read xfer */ 509 inc = 2; /* an index read is two msgs */
508 } else if (msgs[i].flags & I2C_M_RD) { 510 } else if (msgs[i].flags & I2C_M_RD) {
509 ret = gmbus_xfer_read(dev_priv, &msgs[i], 0); 511 ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
510 } else { 512 } else {
@@ -576,6 +578,18 @@ clear_err:
576 adapter->name, msgs[i].addr, 578 adapter->name, msgs[i].addr,
577 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len); 579 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
578 580
581 /*
582 * Passive adapters sometimes NAK the first probe. Retry the first
583 * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
584 * has retries internally. See also the retry loop in
585 * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
586 */
587 if (ret == -ENXIO && i == 0 && try++ == 0) {
588 DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
589 adapter->name);
590 goto retry;
591 }
592
579 goto out; 593 goto out;
580 594
581timeout: 595timeout:
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9f5485ddcbe6..9b74ffae5f5a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1085,6 +1085,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1085 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1085 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1086 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1086 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
1087 1087
1088 if (ring->status_page.obj) {
1089 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
1090 (u32)ring->status_page.gfx_addr);
1091 POSTING_READ(RING_HWS_PGA(ring->mmio_base));
1092 }
1093
1088 I915_WRITE(RING_MODE_GEN7(ring), 1094 I915_WRITE(RING_MODE_GEN7(ring),
1089 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | 1095 _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
1090 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); 1096 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 71e87abdcae7..481337436f72 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -396,16 +396,6 @@ int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
396 return -EINVAL; 396 return -EINVAL;
397} 397}
398 398
399/*
400 * If the vendor backlight interface is not in use and ACPI backlight interface
401 * is broken, do not bother processing backlight change requests from firmware.
402 */
403static bool should_ignore_backlight_request(void)
404{
405 return acpi_video_backlight_support() &&
406 !acpi_video_verify_backlight_support();
407}
408
409static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 399static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
410{ 400{
411 struct drm_i915_private *dev_priv = dev->dev_private; 401 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -414,7 +404,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
414 404
415 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 405 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
416 406
417 if (should_ignore_backlight_request()) { 407 if (acpi_video_get_backlight_type() == acpi_backlight_native) {
418 DRM_DEBUG_KMS("opregion backlight request ignored\n"); 408 DRM_DEBUG_KMS("opregion backlight request ignored\n");
419 return 0; 409 return 0;
420 } 410 }
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 7d83527f95f7..55aad2322e10 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -907,6 +907,14 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
907 907
908 /* XXX: combine this into above write? */ 908 /* XXX: combine this into above write? */
909 intel_panel_actually_set_backlight(connector, panel->backlight.level); 909 intel_panel_actually_set_backlight(connector, panel->backlight.level);
910
911 /*
912 * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
913 * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
914 * that has backlight.
915 */
916 if (IS_GEN2(dev))
917 I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
910} 918}
911 919
912static void i965_enable_backlight(struct intel_connector *connector) 920static void i965_enable_backlight(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d934f857394d..3817a6f00d9e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
901 GEN6_WIZ_HASHING_MASK, 901 GEN6_WIZ_HASHING_MASK,
902 GEN6_WIZ_HASHING_16x4); 902 GEN6_WIZ_HASHING_16x4);
903 903
904 if (INTEL_REVID(dev) == SKL_REVID_C0 ||
905 INTEL_REVID(dev) == SKL_REVID_D0)
906 /* WaBarrierPerformanceFixDisable:skl */
907 WA_SET_BIT_MASKED(HDC_CHICKEN0,
908 HDC_FENCE_DEST_SLM_DISABLE |
909 HDC_BARRIER_PERFORMANCE_DISABLE);
910
911 return 0; 904 return 0;
912} 905}
913 906
@@ -1030,6 +1023,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
1030 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1023 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1031 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1024 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1032 1025
1026 if (INTEL_REVID(dev) == SKL_REVID_C0 ||
1027 INTEL_REVID(dev) == SKL_REVID_D0)
1028 /* WaBarrierPerformanceFixDisable:skl */
1029 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1030 HDC_FENCE_DEST_SLM_DISABLE |
1031 HDC_BARRIER_PERFORMANCE_DISABLE);
1032
1033 if (INTEL_REVID(dev) <= SKL_REVID_D0) { 1033 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
1034 /* 1034 /*
1035 *Use Force Non-Coherent whenever executing a 3D context. This 1035 *Use Force Non-Coherent whenever executing a 3D context. This
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 39f6dfc0ee54..e539314ae87e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -118,6 +118,7 @@ struct intel_ringbuffer {
118}; 118};
119 119
120struct intel_context; 120struct intel_context;
121struct drm_i915_reg_descriptor;
121 122
122struct intel_engine_cs { 123struct intel_engine_cs {
123 const char *name; 124 const char *name;
@@ -300,14 +301,14 @@ struct intel_engine_cs {
300 /* 301 /*
301 * Table of registers allowed in commands that read/write registers. 302 * Table of registers allowed in commands that read/write registers.
302 */ 303 */
303 const u32 *reg_table; 304 const struct drm_i915_reg_descriptor *reg_table;
304 int reg_count; 305 int reg_count;
305 306
306 /* 307 /*
307 * Table of registers allowed in commands that read/write registers, but 308 * Table of registers allowed in commands that read/write registers, but
308 * only from the DRM master. 309 * only from the DRM master.
309 */ 310 */
310 const u32 *master_reg_table; 311 const struct drm_i915_reg_descriptor *master_reg_table;
311 int master_reg_count; 312 int master_reg_count;
312 313
313 /* 314 /*
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index d24ef75596a1..aa2fd751609c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2554,7 +2554,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2554 2554
2555 DRM_DEBUG_KMS("initialising analog device %d\n", device); 2555 DRM_DEBUG_KMS("initialising analog device %d\n", device);
2556 2556
2557 intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL); 2557 intel_sdvo_connector = intel_sdvo_connector_alloc();
2558 if (!intel_sdvo_connector) 2558 if (!intel_sdvo_connector)
2559 return false; 2559 return false;
2560 2560
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 6e84df9369a6..ad4b9010dfb0 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1526,6 +1526,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
1526 return MODE_BANDWIDTH; 1526 return MODE_BANDWIDTH;
1527 } 1527 }
1528 1528
1529 if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
1530 (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
1531 return MODE_H_ILLEGAL;
1532 }
1533
1529 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 || 1534 if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
1530 mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 || 1535 mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
1531 mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 || 1536 mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 0a6f6764a37c..08ba8d0d93f5 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -46,3 +46,10 @@ config DRM_MSM_DSI
46 Choose this option if you have a need for MIPI DSI connector 46 Choose this option if you have a need for MIPI DSI connector
47 support. 47 support.
48 48
49config DRM_MSM_DSI_PLL
50 bool "Enable DSI PLL driver in MSM DRM"
51 depends on DRM_MSM_DSI && COMMON_CLK
52 default y
53 help
54 Choose this option to enable DSI PLL driver which provides DSI
55 source clocks under common clock framework.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index ab2086783fee..16a81b94d6f0 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,4 +1,5 @@
1ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm 1ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
2ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
2 3
3msm-y := \ 4msm-y := \
4 adreno/adreno_device.o \ 5 adreno/adreno_device.o \
@@ -50,10 +51,14 @@ msm-y := \
50 51
51msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o 52msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
52msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o 53msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
54
53msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \ 55msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
54 dsi/dsi_host.o \ 56 dsi/dsi_host.o \
55 dsi/dsi_manager.o \ 57 dsi/dsi_manager.o \
56 dsi/dsi_phy.o \ 58 dsi/dsi_phy.o \
57 mdp/mdp5/mdp5_cmd_encoder.o 59 mdp/mdp5/mdp5_cmd_encoder.o
58 60
61msm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
62 dsi/pll/dsi_pll_28nm.o
63
59obj-$(CONFIG_DRM_MSM) += msm.o 64obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index edc845fffdf4..23176e402796 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index e91a739452d7..1c599e5cf318 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -12,11 +12,11 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
21 21
22Permission is hereby granted, free of charge, to any person obtaining 22Permission is hereby granted, free of charge, to any person obtaining
@@ -130,6 +130,10 @@ enum a3xx_tex_fmt {
130 TFMT_I420_Y = 24, 130 TFMT_I420_Y = 24,
131 TFMT_I420_U = 26, 131 TFMT_I420_U = 26,
132 TFMT_I420_V = 27, 132 TFMT_I420_V = 27,
133 TFMT_ATC_RGB = 32,
134 TFMT_ATC_RGBA_EXPLICIT = 33,
135 TFMT_ETC1 = 34,
136 TFMT_ATC_RGBA_INTERPOLATED = 35,
133 TFMT_DXT1 = 36, 137 TFMT_DXT1 = 36,
134 TFMT_DXT3 = 37, 138 TFMT_DXT3 = 37,
135 TFMT_DXT5 = 38, 139 TFMT_DXT5 = 38,
@@ -178,10 +182,13 @@ enum a3xx_tex_fmt {
178 TFMT_32_SINT = 92, 182 TFMT_32_SINT = 92,
179 TFMT_32_32_SINT = 93, 183 TFMT_32_32_SINT = 93,
180 TFMT_32_32_32_32_SINT = 95, 184 TFMT_32_32_32_32_SINT = 95,
181 TFMT_RGTC2_SNORM = 112, 185 TFMT_ETC2_RG11_SNORM = 112,
182 TFMT_RGTC2_UNORM = 113, 186 TFMT_ETC2_RG11_UNORM = 113,
183 TFMT_RGTC1_SNORM = 114, 187 TFMT_ETC2_R11_SNORM = 114,
184 TFMT_RGTC1_UNORM = 115, 188 TFMT_ETC2_R11_UNORM = 115,
189 TFMT_ETC2_RGBA8 = 116,
190 TFMT_ETC2_RGB8A1 = 117,
191 TFMT_ETC2_RGB8 = 118,
185}; 192};
186 193
187enum a3xx_tex_fetchsize { 194enum a3xx_tex_fetchsize {
@@ -209,14 +216,24 @@ enum a3xx_color_fmt {
209 RB_R10G10B10A2_UNORM = 16, 216 RB_R10G10B10A2_UNORM = 16,
210 RB_A8_UNORM = 20, 217 RB_A8_UNORM = 20,
211 RB_R8_UNORM = 21, 218 RB_R8_UNORM = 21,
219 RB_R16_FLOAT = 24,
220 RB_R16G16_FLOAT = 25,
212 RB_R16G16B16A16_FLOAT = 27, 221 RB_R16G16B16A16_FLOAT = 27,
213 RB_R11G11B10_FLOAT = 28, 222 RB_R11G11B10_FLOAT = 28,
223 RB_R16_SNORM = 32,
224 RB_R16G16_SNORM = 33,
225 RB_R16G16B16A16_SNORM = 35,
226 RB_R16_UNORM = 36,
227 RB_R16G16_UNORM = 37,
228 RB_R16G16B16A16_UNORM = 39,
214 RB_R16_SINT = 40, 229 RB_R16_SINT = 40,
215 RB_R16G16_SINT = 41, 230 RB_R16G16_SINT = 41,
216 RB_R16G16B16A16_SINT = 43, 231 RB_R16G16B16A16_SINT = 43,
217 RB_R16_UINT = 44, 232 RB_R16_UINT = 44,
218 RB_R16G16_UINT = 45, 233 RB_R16G16_UINT = 45,
219 RB_R16G16B16A16_UINT = 47, 234 RB_R16G16B16A16_UINT = 47,
235 RB_R32_FLOAT = 48,
236 RB_R32G32_FLOAT = 49,
220 RB_R32G32B32A32_FLOAT = 51, 237 RB_R32G32B32A32_FLOAT = 51,
221 RB_R32_SINT = 52, 238 RB_R32_SINT = 52,
222 RB_R32G32_SINT = 53, 239 RB_R32G32_SINT = 53,
@@ -265,6 +282,12 @@ enum a3xx_intp_mode {
265 FLAT = 1, 282 FLAT = 1,
266}; 283};
267 284
285enum a3xx_repl_mode {
286 S = 1,
287 T = 2,
288 ONE_T = 3,
289};
290
268enum a3xx_tex_filter { 291enum a3xx_tex_filter {
269 A3XX_TEX_NEAREST = 0, 292 A3XX_TEX_NEAREST = 0,
270 A3XX_TEX_LINEAR = 1, 293 A3XX_TEX_LINEAR = 1,
@@ -751,7 +774,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
751#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 774#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
752static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) 775static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
753{ 776{
754 return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; 777 return ((((int32_t)(val * 64.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
755} 778}
756 779
757#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 780#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
@@ -854,6 +877,12 @@ static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode va
854{ 877{
855 return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK; 878 return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
856} 879}
880#define A3XX_RB_MODE_CONTROL_MRT__MASK 0x00003000
881#define A3XX_RB_MODE_CONTROL_MRT__SHIFT 12
882static inline uint32_t A3XX_RB_MODE_CONTROL_MRT(uint32_t val)
883{
884 return ((val) << A3XX_RB_MODE_CONTROL_MRT__SHIFT) & A3XX_RB_MODE_CONTROL_MRT__MASK;
885}
857#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000 886#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
858#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000 887#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
859 888
@@ -1246,9 +1275,21 @@ static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op v
1246 1275
1247#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105 1276#define REG_A3XX_RB_STENCIL_CLEAR 0x00002105
1248 1277
1249#define REG_A3XX_RB_STENCIL_BUF_INFO 0x00002106 1278#define REG_A3XX_RB_STENCIL_INFO 0x00002106
1279#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff800
1280#define A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 11
1281static inline uint32_t A3XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
1282{
1283 return ((val >> 12) << A3XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A3XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
1284}
1250 1285
1251#define REG_A3XX_RB_STENCIL_BUF_PITCH 0x00002107 1286#define REG_A3XX_RB_STENCIL_PITCH 0x00002107
1287#define A3XX_RB_STENCIL_PITCH__MASK 0xffffffff
1288#define A3XX_RB_STENCIL_PITCH__SHIFT 0
1289static inline uint32_t A3XX_RB_STENCIL_PITCH(uint32_t val)
1290{
1291 return ((val >> 3) << A3XX_RB_STENCIL_PITCH__SHIFT) & A3XX_RB_STENCIL_PITCH__MASK;
1292}
1252 1293
1253#define REG_A3XX_RB_STENCILREFMASK 0x00002108 1294#define REG_A3XX_RB_STENCILREFMASK 0x00002108
1254#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff 1295#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
@@ -1356,6 +1397,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
1356{ 1397{
1357 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK; 1398 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
1358} 1399}
1400#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_ENABLE 0x00001000
1359#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000 1401#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
1360#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 1402#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1361#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 1403#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
@@ -1805,6 +1847,102 @@ static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
1805static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; } 1847static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1806 1848
1807static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; } 1849static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1850#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK 0x00000003
1851#define A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT 0
1852static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C0(enum a3xx_repl_mode val)
1853{
1854 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C0__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C0__MASK;
1855}
1856#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK 0x0000000c
1857#define A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT 2
1858static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C1(enum a3xx_repl_mode val)
1859{
1860 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C1__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C1__MASK;
1861}
1862#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK 0x00000030
1863#define A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT 4
1864static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C2(enum a3xx_repl_mode val)
1865{
1866 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C2__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C2__MASK;
1867}
1868#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK 0x000000c0
1869#define A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT 6
1870static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C3(enum a3xx_repl_mode val)
1871{
1872 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C3__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C3__MASK;
1873}
1874#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK 0x00000300
1875#define A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT 8
1876static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C4(enum a3xx_repl_mode val)
1877{
1878 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C4__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C4__MASK;
1879}
1880#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK 0x00000c00
1881#define A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT 10
1882static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C5(enum a3xx_repl_mode val)
1883{
1884 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C5__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C5__MASK;
1885}
1886#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK 0x00003000
1887#define A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT 12
1888static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C6(enum a3xx_repl_mode val)
1889{
1890 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C6__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C6__MASK;
1891}
1892#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK 0x0000c000
1893#define A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT 14
1894static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C7(enum a3xx_repl_mode val)
1895{
1896 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C7__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C7__MASK;
1897}
1898#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK 0x00030000
1899#define A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT 16
1900static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C8(enum a3xx_repl_mode val)
1901{
1902 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C8__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C8__MASK;
1903}
1904#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK 0x000c0000
1905#define A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT 18
1906static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_C9(enum a3xx_repl_mode val)
1907{
1908 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_C9__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_C9__MASK;
1909}
1910#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK 0x00300000
1911#define A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT 20
1912static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CA(enum a3xx_repl_mode val)
1913{
1914 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CA__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CA__MASK;
1915}
1916#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK 0x00c00000
1917#define A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT 22
1918static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CB(enum a3xx_repl_mode val)
1919{
1920 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CB__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CB__MASK;
1921}
1922#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK 0x03000000
1923#define A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT 24
1924static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CC(enum a3xx_repl_mode val)
1925{
1926 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CC__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CC__MASK;
1927}
1928#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK 0x0c000000
1929#define A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT 26
1930static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CD(enum a3xx_repl_mode val)
1931{
1932 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CD__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CD__MASK;
1933}
1934#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK 0x30000000
1935#define A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT 28
1936static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CE(enum a3xx_repl_mode val)
1937{
1938 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CE__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CE__MASK;
1939}
1940#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK 0xc0000000
1941#define A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT 30
1942static inline uint32_t A3XX_VPC_VARYING_PS_REPL_MODE_CF(enum a3xx_repl_mode val)
1943{
1944 return ((val) << A3XX_VPC_VARYING_PS_REPL_MODE_CF__SHIFT) & A3XX_VPC_VARYING_PS_REPL_MODE_CF__MASK;
1945}
1808 1946
1809#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a 1947#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
1810 1948
@@ -2107,6 +2245,12 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
2107#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9 2245#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
2108 2246
2109#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec 2247#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
2248#define A3XX_SP_FS_OUTPUT_REG_MRT__MASK 0x00000003
2249#define A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0
2250static inline uint32_t A3XX_SP_FS_OUTPUT_REG_MRT(uint32_t val)
2251{
2252 return ((val) << A3XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A3XX_SP_FS_OUTPUT_REG_MRT__MASK;
2253}
2110#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080 2254#define A3XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
2111#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00 2255#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
2112#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8 2256#define A3XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
@@ -2661,7 +2805,7 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2661} 2805}
2662 2806
2663#define REG_A3XX_TEX_CONST_3 0x00000003 2807#define REG_A3XX_TEX_CONST_3 0x00000003
2664#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0000000f 2808#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x00007fff
2665#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0 2809#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
2666static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val) 2810static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
2667{ 2811{
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b66c53bdc039..fd266ed963b6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -93,7 +93,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
93 /* Set up AOOO: */ 93 /* Set up AOOO: */
94 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c); 94 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
95 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c); 95 gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
96 96 } else if (adreno_is_a306(adreno_gpu)) {
97 gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
98 gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
99 gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a);
97 } else if (adreno_is_a320(adreno_gpu)) { 100 } else if (adreno_is_a320(adreno_gpu)) {
98 /* Set up 16 deep read/write request queues: */ 101 /* Set up 16 deep read/write request queues: */
99 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010); 102 gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
@@ -186,7 +189,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
186 gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001); 189 gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
187 190
188 /* Enable Clock gating: */ 191 /* Enable Clock gating: */
189 if (adreno_is_a320(adreno_gpu)) 192 if (adreno_is_a306(adreno_gpu))
193 gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
194 else if (adreno_is_a320(adreno_gpu))
190 gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff); 195 gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
191 else if (adreno_is_a330v2(adreno_gpu)) 196 else if (adreno_is_a330v2(adreno_gpu))
192 gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa); 197 gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
@@ -271,7 +276,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
271 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]); 276 gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
272 277
273 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */ 278 /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
274 if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu)) { 279 if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) ||
280 adreno_is_a320(adreno_gpu)) {
275 gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS, 281 gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
276 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) | 282 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
277 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) | 283 AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
@@ -295,9 +301,12 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
295 301
296static void a3xx_recover(struct msm_gpu *gpu) 302static void a3xx_recover(struct msm_gpu *gpu)
297{ 303{
304 adreno_dump_info(gpu);
305
298 /* dump registers before resetting gpu, if enabled: */ 306 /* dump registers before resetting gpu, if enabled: */
299 if (hang_debug) 307 if (hang_debug)
300 a3xx_dump(gpu); 308 a3xx_dump(gpu);
309
301 gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1); 310 gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 1);
302 gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD); 311 gpu_read(gpu, REG_A3XX_RBBM_SW_RESET_CMD);
303 gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0); 312 gpu_write(gpu, REG_A3XX_RBBM_SW_RESET_CMD, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 755723fd8ba5..3f06ecf62583 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -12,11 +12,11 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
21 21
22Permission is hereby granted, free of charge, to any person obtaining 22Permission is hereby granted, free of charge, to any person obtaining
@@ -43,10 +43,40 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 43
44enum a4xx_color_fmt { 44enum a4xx_color_fmt {
45 RB4_A8_UNORM = 1, 45 RB4_A8_UNORM = 1,
46 RB4_R8_UNORM = 2,
47 RB4_R4G4B4A4_UNORM = 8,
48 RB4_R5G5B5A1_UNORM = 10,
46 RB4_R5G6R5_UNORM = 14, 49 RB4_R5G6R5_UNORM = 14,
47 RB4_Z16_UNORM = 15, 50 RB4_R8G8_UNORM = 15,
51 RB4_R8G8_SNORM = 16,
52 RB4_R8G8_UINT = 17,
53 RB4_R8G8_SINT = 18,
54 RB4_R16_FLOAT = 21,
55 RB4_R16_UINT = 22,
56 RB4_R16_SINT = 23,
48 RB4_R8G8B8_UNORM = 25, 57 RB4_R8G8B8_UNORM = 25,
49 RB4_R8G8B8A8_UNORM = 26, 58 RB4_R8G8B8A8_UNORM = 26,
59 RB4_R8G8B8A8_SNORM = 28,
60 RB4_R8G8B8A8_UINT = 29,
61 RB4_R8G8B8A8_SINT = 30,
62 RB4_R10G10B10A2_UNORM = 31,
63 RB4_R10G10B10A2_UINT = 34,
64 RB4_R11G11B10_FLOAT = 39,
65 RB4_R16G16_FLOAT = 42,
66 RB4_R16G16_UINT = 43,
67 RB4_R16G16_SINT = 44,
68 RB4_R32_FLOAT = 45,
69 RB4_R32_UINT = 46,
70 RB4_R32_SINT = 47,
71 RB4_R16G16B16A16_FLOAT = 54,
72 RB4_R16G16B16A16_UINT = 55,
73 RB4_R16G16B16A16_SINT = 56,
74 RB4_R32G32_FLOAT = 57,
75 RB4_R32G32_UINT = 58,
76 RB4_R32G32_SINT = 59,
77 RB4_R32G32B32A32_FLOAT = 60,
78 RB4_R32G32B32A32_UINT = 61,
79 RB4_R32G32B32A32_SINT = 62,
50}; 80};
51 81
52enum a4xx_tile_mode { 82enum a4xx_tile_mode {
@@ -91,7 +121,14 @@ enum a4xx_vtx_fmt {
91 VFMT4_16_16_UNORM = 29, 121 VFMT4_16_16_UNORM = 29,
92 VFMT4_16_16_16_UNORM = 30, 122 VFMT4_16_16_16_UNORM = 30,
93 VFMT4_16_16_16_16_UNORM = 31, 123 VFMT4_16_16_16_16_UNORM = 31,
124 VFMT4_32_UINT = 32,
125 VFMT4_32_32_UINT = 33,
126 VFMT4_32_32_32_UINT = 34,
127 VFMT4_32_32_32_32_UINT = 35,
128 VFMT4_32_SINT = 36,
94 VFMT4_32_32_SINT = 37, 129 VFMT4_32_32_SINT = 37,
130 VFMT4_32_32_32_SINT = 38,
131 VFMT4_32_32_32_32_SINT = 39,
95 VFMT4_8_UINT = 40, 132 VFMT4_8_UINT = 40,
96 VFMT4_8_8_UINT = 41, 133 VFMT4_8_8_UINT = 41,
97 VFMT4_8_8_8_UINT = 42, 134 VFMT4_8_8_8_UINT = 42,
@@ -125,12 +162,57 @@ enum a4xx_tex_fmt {
125 TFMT4_8_UNORM = 4, 162 TFMT4_8_UNORM = 4,
126 TFMT4_8_8_UNORM = 14, 163 TFMT4_8_8_UNORM = 14,
127 TFMT4_8_8_8_8_UNORM = 28, 164 TFMT4_8_8_8_8_UNORM = 28,
165 TFMT4_8_8_SNORM = 15,
166 TFMT4_8_8_8_8_SNORM = 29,
167 TFMT4_8_8_UINT = 16,
168 TFMT4_8_8_8_8_UINT = 30,
169 TFMT4_8_8_SINT = 17,
170 TFMT4_8_8_8_8_SINT = 31,
171 TFMT4_16_UINT = 21,
172 TFMT4_16_16_UINT = 41,
173 TFMT4_16_16_16_16_UINT = 54,
174 TFMT4_16_SINT = 22,
175 TFMT4_16_16_SINT = 42,
176 TFMT4_16_16_16_16_SINT = 55,
177 TFMT4_32_UINT = 44,
178 TFMT4_32_32_UINT = 57,
179 TFMT4_32_32_32_32_UINT = 64,
180 TFMT4_32_SINT = 45,
181 TFMT4_32_32_SINT = 58,
182 TFMT4_32_32_32_32_SINT = 65,
128 TFMT4_16_FLOAT = 20, 183 TFMT4_16_FLOAT = 20,
129 TFMT4_16_16_FLOAT = 40, 184 TFMT4_16_16_FLOAT = 40,
130 TFMT4_16_16_16_16_FLOAT = 53, 185 TFMT4_16_16_16_16_FLOAT = 53,
131 TFMT4_32_FLOAT = 43, 186 TFMT4_32_FLOAT = 43,
132 TFMT4_32_32_FLOAT = 56, 187 TFMT4_32_32_FLOAT = 56,
133 TFMT4_32_32_32_32_FLOAT = 63, 188 TFMT4_32_32_32_32_FLOAT = 63,
189 TFMT4_9_9_9_E5_FLOAT = 32,
190 TFMT4_11_11_10_FLOAT = 37,
191 TFMT4_ATC_RGB = 100,
192 TFMT4_ATC_RGBA_EXPLICIT = 101,
193 TFMT4_ATC_RGBA_INTERPOLATED = 102,
194 TFMT4_ETC2_RG11_UNORM = 103,
195 TFMT4_ETC2_RG11_SNORM = 104,
196 TFMT4_ETC2_R11_UNORM = 105,
197 TFMT4_ETC2_R11_SNORM = 106,
198 TFMT4_ETC1 = 107,
199 TFMT4_ETC2_RGB8 = 108,
200 TFMT4_ETC2_RGBA8 = 109,
201 TFMT4_ETC2_RGB8A1 = 110,
202 TFMT4_ASTC_4x4 = 111,
203 TFMT4_ASTC_5x4 = 112,
204 TFMT4_ASTC_5x5 = 113,
205 TFMT4_ASTC_6x5 = 114,
206 TFMT4_ASTC_6x6 = 115,
207 TFMT4_ASTC_8x5 = 116,
208 TFMT4_ASTC_8x6 = 117,
209 TFMT4_ASTC_8x8 = 118,
210 TFMT4_ASTC_10x5 = 119,
211 TFMT4_ASTC_10x6 = 120,
212 TFMT4_ASTC_10x8 = 121,
213 TFMT4_ASTC_10x10 = 122,
214 TFMT4_ASTC_12x10 = 123,
215 TFMT4_ASTC_12x12 = 124,
134}; 216};
135 217
136enum a4xx_tex_fetchsize { 218enum a4xx_tex_fetchsize {
@@ -147,9 +229,16 @@ enum a4xx_depth_format {
147 DEPTH4_24_8 = 2, 229 DEPTH4_24_8 = 2,
148}; 230};
149 231
232enum a4xx_tess_spacing {
233 EQUAL_SPACING = 0,
234 ODD_SPACING = 2,
235 EVEN_SPACING = 3,
236};
237
150enum a4xx_tex_filter { 238enum a4xx_tex_filter {
151 A4XX_TEX_NEAREST = 0, 239 A4XX_TEX_NEAREST = 0,
152 A4XX_TEX_LINEAR = 1, 240 A4XX_TEX_LINEAR = 1,
241 A4XX_TEX_ANISO = 2,
153}; 242};
154 243
155enum a4xx_tex_clamp { 244enum a4xx_tex_clamp {
@@ -159,6 +248,14 @@ enum a4xx_tex_clamp {
159 A4XX_TEX_CLAMP_NONE = 3, 248 A4XX_TEX_CLAMP_NONE = 3,
160}; 249};
161 250
251enum a4xx_tex_aniso {
252 A4XX_TEX_ANISO_1 = 0,
253 A4XX_TEX_ANISO_2 = 1,
254 A4XX_TEX_ANISO_4 = 2,
255 A4XX_TEX_ANISO_8 = 3,
256 A4XX_TEX_ANISO_16 = 4,
257};
258
162enum a4xx_tex_swiz { 259enum a4xx_tex_swiz {
163 A4XX_TEX_X = 0, 260 A4XX_TEX_X = 0,
164 A4XX_TEX_Y = 1, 261 A4XX_TEX_Y = 1,
@@ -279,13 +376,16 @@ static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
279#define A4XX_RB_RENDER_CONTROL2_YCOORD 0x00000002 376#define A4XX_RB_RENDER_CONTROL2_YCOORD 0x00000002
280#define A4XX_RB_RENDER_CONTROL2_ZCOORD 0x00000004 377#define A4XX_RB_RENDER_CONTROL2_ZCOORD 0x00000004
281#define A4XX_RB_RENDER_CONTROL2_WCOORD 0x00000008 378#define A4XX_RB_RENDER_CONTROL2_WCOORD 0x00000008
379#define A4XX_RB_RENDER_CONTROL2_SAMPLEMASK 0x00000010
282#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020 380#define A4XX_RB_RENDER_CONTROL2_FACENESS 0x00000020
381#define A4XX_RB_RENDER_CONTROL2_SAMPLEID 0x00000040
283#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380 382#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
284#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7 383#define A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT 7
285static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val) 384static inline uint32_t A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES(uint32_t val)
286{ 385{
287 return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK; 386 return ((val) << A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_RENDER_CONTROL2_MSAA_SAMPLES__MASK;
288} 387}
388#define A4XX_RB_RENDER_CONTROL2_SAMPLEID_HR 0x00000800
289#define A4XX_RB_RENDER_CONTROL2_VARYING 0x00001000 389#define A4XX_RB_RENDER_CONTROL2_VARYING 0x00001000
290 390
291static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; } 391static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
@@ -310,6 +410,12 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val
310{ 410{
311 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK; 411 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
312} 412}
413#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
414#define A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
415static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a4xx_tile_mode val)
416{
417 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
418}
313#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600 419#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600
314#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9 420#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9
315static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val) 421static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
@@ -322,6 +428,7 @@ static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
322{ 428{
323 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; 429 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
324} 430}
431#define A4XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00002000
325#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000 432#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000
326#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14 433#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
327static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) 434static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
@@ -449,7 +556,12 @@ static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare
449} 556}
450 557
451#define REG_A4XX_RB_FS_OUTPUT 0x000020f9 558#define REG_A4XX_RB_FS_OUTPUT 0x000020f9
452#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND 0x00000001 559#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK 0x000000ff
560#define A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT 0
561static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val)
562{
563 return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
564}
453#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100 565#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
454#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000 566#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
455#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16 567#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
@@ -458,12 +570,54 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
458 return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK; 570 return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
459} 571}
460 572
461#define REG_A4XX_RB_RENDER_CONTROL3 0x000020fb 573#define REG_A4XX_RB_RENDER_COMPONENTS 0x000020fb
462#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK 0x0000001f 574#define A4XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
463#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT 0 575#define A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
464static inline uint32_t A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE(uint32_t val) 576static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
465{ 577{
466 return ((val) << A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT) & A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK; 578 return ((val) << A4XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT0__MASK;
579}
580#define A4XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
581#define A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
582static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
583{
584 return ((val) << A4XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT1__MASK;
585}
586#define A4XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
587#define A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
588static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
589{
590 return ((val) << A4XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT2__MASK;
591}
592#define A4XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
593#define A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
594static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
595{
596 return ((val) << A4XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT3__MASK;
597}
598#define A4XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
599#define A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
600static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
601{
602 return ((val) << A4XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT4__MASK;
603}
604#define A4XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
605#define A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
606static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
607{
608 return ((val) << A4XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT5__MASK;
609}
610#define A4XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
611#define A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
612static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
613{
614 return ((val) << A4XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT6__MASK;
615}
616#define A4XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
617#define A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
618static inline uint32_t A4XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
619{
620 return ((val) << A4XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A4XX_RB_RENDER_COMPONENTS_RT7__MASK;
467} 621}
468 622
469#define REG_A4XX_RB_COPY_CONTROL 0x000020fc 623#define REG_A4XX_RB_COPY_CONTROL 0x000020fc
@@ -547,7 +701,12 @@ static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val)
547} 701}
548 702
549#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100 703#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100
550#define A4XX_RB_FS_OUTPUT_REG_COLOR_PIPE_ENABLE 0x00000001 704#define A4XX_RB_FS_OUTPUT_REG_MRT__MASK 0x0000000f
705#define A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT 0
706static inline uint32_t A4XX_RB_FS_OUTPUT_REG_MRT(uint32_t val)
707{
708 return ((val) << A4XX_RB_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_RB_FS_OUTPUT_REG_MRT__MASK;
709}
551#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020 710#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020
552 711
553#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101 712#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101
@@ -930,6 +1089,10 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
930 1089
931#define REG_A4XX_CP_IB2_BUFSZ 0x00000209 1090#define REG_A4XX_CP_IB2_BUFSZ 0x00000209
932 1091
1092#define REG_A4XX_CP_ME_NRT_ADDR 0x0000020c
1093
1094#define REG_A4XX_CP_ME_NRT_DATA 0x0000020d
1095
933#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217 1096#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217
934 1097
935#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219 1098#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219
@@ -940,9 +1103,9 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
940 1103
941#define REG_A4XX_CP_ROQ_DATA 0x0000021d 1104#define REG_A4XX_CP_ROQ_DATA 0x0000021d
942 1105
943#define REG_A4XX_CP_MEQ_ADDR 0x0000021e 1106#define REG_A4XX_CP_MEQ_ADDR 0x0000021e
944 1107
945#define REG_A4XX_CP_MEQ_DATA 0x0000021f 1108#define REG_A4XX_CP_MEQ_DATA 0x0000021f
946 1109
947#define REG_A4XX_CP_MERCIU_ADDR 0x00000220 1110#define REG_A4XX_CP_MERCIU_ADDR 0x00000220
948 1111
@@ -1004,12 +1167,17 @@ static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578
1004 1167
1005#define REG_A4XX_SP_VS_STATUS 0x00000ec0 1168#define REG_A4XX_SP_VS_STATUS 0x00000ec0
1006 1169
1170#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3
1171
1007#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf 1172#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
1008 1173
1009#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0 1174#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
1010#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000 1175#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000
1011 1176
1012#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1 1177#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1
1178#define A4XX_SP_INSTR_CACHE_CTRL_VS_BUFFER 0x00000080
1179#define A4XX_SP_INSTR_CACHE_CTRL_FS_BUFFER 0x00000100
1180#define A4XX_SP_INSTR_CACHE_CTRL_INSTR_BUFFER 0x00000400
1013 1181
1014#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4 1182#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4
1015#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001 1183#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
@@ -1229,6 +1397,12 @@ static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1229#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef 1397#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef
1230 1398
1231#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0 1399#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0
1400#define A4XX_SP_FS_OUTPUT_REG_MRT__MASK 0x0000000f
1401#define A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT 0
1402static inline uint32_t A4XX_SP_FS_OUTPUT_REG_MRT(uint32_t val)
1403{
1404 return ((val) << A4XX_SP_FS_OUTPUT_REG_MRT__SHIFT) & A4XX_SP_FS_OUTPUT_REG_MRT__MASK;
1405}
1232#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080 1406#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
1233#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00 1407#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
1234#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8 1408#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
@@ -1236,6 +1410,12 @@ static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
1236{ 1410{
1237 return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK; 1411 return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
1238} 1412}
1413#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK 0xff000000
1414#define A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT 24
1415static inline uint32_t A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID(uint32_t val)
1416{
1417 return ((val) << A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_SAMPLEMASK_REGID__MASK;
1418}
1239 1419
1240static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; } 1420static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
1241 1421
@@ -1254,6 +1434,20 @@ static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
1254 return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK; 1434 return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
1255} 1435}
1256 1436
1437#define REG_A4XX_SP_CS_CTRL_REG0 0x00002300
1438
1439#define REG_A4XX_SP_CS_OBJ_OFFSET_REG 0x00002301
1440
1441#define REG_A4XX_SP_CS_OBJ_START 0x00002302
1442
1443#define REG_A4XX_SP_CS_PVT_MEM_PARAM 0x00002303
1444
1445#define REG_A4XX_SP_CS_PVT_MEM_ADDR 0x00002304
1446
1447#define REG_A4XX_SP_CS_PVT_MEM_SIZE 0x00002305
1448
1449#define REG_A4XX_SP_CS_LENGTH_REG 0x00002306
1450
1257#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d 1451#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d
1258#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 1452#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1259#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 1453#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
@@ -1268,6 +1462,14 @@ static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1268 return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; 1462 return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1269} 1463}
1270 1464
1465#define REG_A4XX_SP_HS_OBJ_START 0x0000230e
1466
1467#define REG_A4XX_SP_HS_PVT_MEM_PARAM 0x0000230f
1468
1469#define REG_A4XX_SP_HS_PVT_MEM_ADDR 0x00002310
1470
1471#define REG_A4XX_SP_HS_LENGTH_REG 0x00002312
1472
1271#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334 1473#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
1272#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 1474#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1273#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 1475#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
@@ -1282,6 +1484,14 @@ static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1282 return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; 1484 return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1283} 1485}
1284 1486
1487#define REG_A4XX_SP_DS_OBJ_START 0x00002335
1488
1489#define REG_A4XX_SP_DS_PVT_MEM_PARAM 0x00002336
1490
1491#define REG_A4XX_SP_DS_PVT_MEM_ADDR 0x00002337
1492
1493#define REG_A4XX_SP_DS_LENGTH_REG 0x00002339
1494
1285#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b 1495#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
1286#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000 1496#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1287#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16 1497#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
@@ -1296,6 +1506,12 @@ static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1296 return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK; 1506 return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1297} 1507}
1298 1508
1509#define REG_A4XX_SP_GS_OBJ_START 0x0000235c
1510
1511#define REG_A4XX_SP_GS_PVT_MEM_PARAM 0x0000235d
1512
1513#define REG_A4XX_SP_GS_PVT_MEM_ADDR 0x0000235e
1514
1299#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360 1515#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360
1300 1516
1301#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60 1517#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60
@@ -1418,6 +1634,10 @@ static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0
1418 1634
1419#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a 1635#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
1420 1636
1637#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0
1638
1639#define REG_A4XX_VGT_EVENT_INITIATOR 0x000021d9
1640
1421#define REG_A4XX_VFD_CONTROL_0 0x00002200 1641#define REG_A4XX_VFD_CONTROL_0 0x00002200
1422#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff 1642#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff
1423#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0 1643#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
@@ -1554,10 +1774,54 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
1554 1774
1555#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00 1775#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00
1556 1776
1777#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03
1778
1557#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b 1779#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
1558 1780
1559#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380 1781#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
1560 1782
1783#define REG_A4XX_TPL1_TP_TEX_COUNT 0x00002381
1784#define A4XX_TPL1_TP_TEX_COUNT_VS__MASK 0x000000ff
1785#define A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT 0
1786static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_VS(uint32_t val)
1787{
1788 return ((val) << A4XX_TPL1_TP_TEX_COUNT_VS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_VS__MASK;
1789}
1790#define A4XX_TPL1_TP_TEX_COUNT_HS__MASK 0x0000ff00
1791#define A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT 8
1792static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_HS(uint32_t val)
1793{
1794 return ((val) << A4XX_TPL1_TP_TEX_COUNT_HS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_HS__MASK;
1795}
1796#define A4XX_TPL1_TP_TEX_COUNT_DS__MASK 0x00ff0000
1797#define A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT 16
1798static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_DS(uint32_t val)
1799{
1800 return ((val) << A4XX_TPL1_TP_TEX_COUNT_DS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_DS__MASK;
1801}
1802#define A4XX_TPL1_TP_TEX_COUNT_GS__MASK 0xff000000
1803#define A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT 24
1804static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
1805{
1806 return ((val) << A4XX_TPL1_TP_TEX_COUNT_GS__SHIFT) & A4XX_TPL1_TP_TEX_COUNT_GS__MASK;
1807}
1808
1809#define REG_A4XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002384
1810
1811#define REG_A4XX_TPL1_TP_HS_BORDER_COLOR_BASE_ADDR 0x00002387
1812
1813#define REG_A4XX_TPL1_TP_DS_BORDER_COLOR_BASE_ADDR 0x0000238a
1814
1815#define REG_A4XX_TPL1_TP_GS_BORDER_COLOR_BASE_ADDR 0x0000238d
1816
1817#define REG_A4XX_TPL1_TP_FS_TEX_COUNT 0x000023a0
1818
1819#define REG_A4XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x000023a1
1820
1821#define REG_A4XX_TPL1_TP_CS_BORDER_COLOR_BASE_ADDR 0x000023a4
1822
1823#define REG_A4XX_TPL1_TP_CS_SAMPLER_BASE_ADDR 0x000023a5
1824
1561#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6 1825#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6
1562 1826
1563#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80 1827#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80
@@ -1676,6 +1940,14 @@ static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
1676 return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; 1940 return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
1677} 1941}
1678 1942
1943#define REG_A4XX_GRAS_SU_POLY_OFFSET_CLAMP 0x00002076
1944#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK 0xffffffff
1945#define A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT 0
1946static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_CLAMP(float val)
1947{
1948 return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_CLAMP__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_CLAMP__MASK;
1949}
1950
1679#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077 1951#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
1680#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003 1952#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
1681#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0 1953#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
@@ -1828,6 +2100,8 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
1828 2100
1829#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04 2101#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04
1830 2102
2103#define REG_A4XX_HLSQ_MODE_CONTROL 0x00000e05
2104
1831#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e 2105#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
1832 2106
1833#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0 2107#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
@@ -1867,7 +2141,12 @@ static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_COORDREGID(uint32_t val)
1867{ 2141{
1868 return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK; 2142 return ((val) << A4XX_HLSQ_CONTROL_1_REG_COORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_COORDREGID__MASK;
1869} 2143}
1870#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000 2144#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK 0xff000000
2145#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT 24
2146static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID(uint32_t val)
2147{
2148 return ((val) << A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_ZWCOORDREGID__MASK;
2149}
1871 2150
1872#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2 2151#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2
1873#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000 2152#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
@@ -1882,6 +2161,18 @@ static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
1882{ 2161{
1883 return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK; 2162 return ((val) << A4XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
1884} 2163}
2164#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK 0x0003fc00
2165#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT 10
2166static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID(uint32_t val)
2167{
2168 return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEID_REGID__MASK;
2169}
2170#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK 0x03fc0000
2171#define A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT 18
2172static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID(uint32_t val)
2173{
2174 return ((val) << A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_SAMPLEMASK_REGID__MASK;
2175}
1885 2176
1886#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3 2177#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
1887#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff 2178#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
@@ -1891,6 +2182,8 @@ static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
1891 return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK; 2182 return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK;
1892} 2183}
1893 2184
2185#define REG_A4XX_HLSQ_CONTROL_4_REG 0x000023c4
2186
1894#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5 2187#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5
1895#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff 2188#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1896#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0 2189#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
@@ -1904,6 +2197,7 @@ static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1904{ 2197{
1905 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 2198 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1906} 2199}
2200#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000
1907#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 2201#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1908#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 2202#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1909static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 2203static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
@@ -1930,6 +2224,7 @@ static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1930{ 2224{
1931 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 2225 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1932} 2226}
2227#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000
1933#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 2228#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1934#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 2229#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1935static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 2230static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
@@ -1956,6 +2251,7 @@ static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1956{ 2251{
1957 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 2252 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1958} 2253}
2254#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000
1959#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 2255#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1960#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 2256#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1961static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 2257static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
@@ -1982,6 +2278,7 @@ static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1982{ 2278{
1983 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 2279 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1984} 2280}
2281#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000
1985#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 2282#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1986#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 2283#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1987static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 2284static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
@@ -2008,6 +2305,7 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
2008{ 2305{
2009 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK; 2306 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
2010} 2307}
2308#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000
2011#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000 2309#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
2012#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17 2310#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
2013static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val) 2311static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
@@ -2021,6 +2319,36 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
2021 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK; 2319 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
2022} 2320}
2023 2321
2322#define REG_A4XX_HLSQ_CS_CONTROL 0x000023ca
2323
2324#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd
2325
2326#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce
2327
2328#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf
2329
2330#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0
2331
2332#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1
2333
2334#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2
2335
2336#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3
2337
2338#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4
2339
2340#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5
2341
2342#define REG_A4XX_HLSQ_CL_KERNEL_CONST 0x000023d6
2343
2344#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_X 0x000023d7
2345
2346#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Y 0x000023d8
2347
2348#define REG_A4XX_HLSQ_CL_KERNEL_GROUP_Z 0x000023d9
2349
2350#define REG_A4XX_HLSQ_CL_WG_OFFSET 0x000023da
2351
2024#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db 2352#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db
2025 2353
2026#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00 2354#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
@@ -2035,7 +2363,13 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
2035#define REG_A4XX_PC_BIN_BASE 0x000021c0 2363#define REG_A4XX_PC_BIN_BASE 0x000021c0
2036 2364
2037#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4 2365#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
2038#define A4XX_PC_PRIM_VTX_CNTL_VAROUT 0x00000001 2366#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
2367#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
2368static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val)
2369{
2370 return ((val) << A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT) & A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK;
2371}
2372#define A4XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
2039#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 2373#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
2040#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 2374#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
2041 2375
@@ -2044,8 +2378,45 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
2044#define REG_A4XX_PC_RESTART_INDEX 0x000021c6 2378#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
2045 2379
2046#define REG_A4XX_PC_GS_PARAM 0x000021e5 2380#define REG_A4XX_PC_GS_PARAM 0x000021e5
2381#define A4XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
2382#define A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
2383static inline uint32_t A4XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
2384{
2385 return ((val) << A4XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A4XX_PC_GS_PARAM_MAX_VERTICES__MASK;
2386}
2387#define A4XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
2388#define A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
2389static inline uint32_t A4XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
2390{
2391 return ((val) << A4XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A4XX_PC_GS_PARAM_INVOCATIONS__MASK;
2392}
2393#define A4XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
2394#define A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
2395static inline uint32_t A4XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
2396{
2397 return ((val) << A4XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_GS_PARAM_PRIMTYPE__MASK;
2398}
2399#define A4XX_PC_GS_PARAM_LAYER 0x80000000
2047 2400
2048#define REG_A4XX_PC_HS_PARAM 0x000021e7 2401#define REG_A4XX_PC_HS_PARAM 0x000021e7
2402#define A4XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
2403#define A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
2404static inline uint32_t A4XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
2405{
2406 return ((val) << A4XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A4XX_PC_HS_PARAM_VERTICES_OUT__MASK;
2407}
2408#define A4XX_PC_HS_PARAM_SPACING__MASK 0x00600000
2409#define A4XX_PC_HS_PARAM_SPACING__SHIFT 21
2410static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
2411{
2412 return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
2413}
2414#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000
2415#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23
2416static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
2417{
2418 return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
2419}
2049 2420
2050#define REG_A4XX_VBIF_VERSION 0x00003000 2421#define REG_A4XX_VBIF_VERSION 0x00003000
2051 2422
@@ -2074,16 +2445,10 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
2074 2445
2075#define REG_A4XX_UNKNOWN_0D01 0x00000d01 2446#define REG_A4XX_UNKNOWN_0D01 0x00000d01
2076 2447
2077#define REG_A4XX_UNKNOWN_0E05 0x00000e05
2078
2079#define REG_A4XX_UNKNOWN_0E42 0x00000e42 2448#define REG_A4XX_UNKNOWN_0E42 0x00000e42
2080 2449
2081#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2 2450#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2
2082 2451
2083#define REG_A4XX_UNKNOWN_0EC3 0x00000ec3
2084
2085#define REG_A4XX_UNKNOWN_0F03 0x00000f03
2086
2087#define REG_A4XX_UNKNOWN_2001 0x00002001 2452#define REG_A4XX_UNKNOWN_2001 0x00002001
2088 2453
2089#define REG_A4XX_UNKNOWN_209B 0x0000209b 2454#define REG_A4XX_UNKNOWN_209B 0x0000209b
@@ -2124,10 +2489,6 @@ static inline uint32_t A4XX_UNKNOWN_20F7(float val)
2124 2489
2125#define REG_A4XX_UNKNOWN_22D7 0x000022d7 2490#define REG_A4XX_UNKNOWN_22D7 0x000022d7
2126 2491
2127#define REG_A4XX_UNKNOWN_2381 0x00002381
2128
2129#define REG_A4XX_UNKNOWN_23A0 0x000023a0
2130
2131#define REG_A4XX_TEX_SAMP_0 0x00000000 2492#define REG_A4XX_TEX_SAMP_0 0x00000000
2132#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001 2493#define A4XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
2133#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006 2494#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
@@ -2160,6 +2521,12 @@ static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val)
2160{ 2521{
2161 return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK; 2522 return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK;
2162} 2523}
2524#define A4XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
2525#define A4XX_TEX_SAMP_0_ANISO__SHIFT 14
2526static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val)
2527{
2528 return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
2529}
2163 2530
2164#define REG_A4XX_TEX_SAMP_1 0x00000001 2531#define REG_A4XX_TEX_SAMP_1 0x00000001
2165#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e 2532#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
@@ -2185,6 +2552,7 @@ static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
2185 2552
2186#define REG_A4XX_TEX_CONST_0 0x00000000 2553#define REG_A4XX_TEX_CONST_0 0x00000000
2187#define A4XX_TEX_CONST_0_TILED 0x00000001 2554#define A4XX_TEX_CONST_0_TILED 0x00000001
2555#define A4XX_TEX_CONST_0_SRGB 0x00000004
2188#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070 2556#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
2189#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4 2557#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4
2190static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val) 2558static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 91221836c5ad..a53f1be05f75 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -268,6 +268,8 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
268 268
269static void a4xx_recover(struct msm_gpu *gpu) 269static void a4xx_recover(struct msm_gpu *gpu)
270{ 270{
271 adreno_dump_info(gpu);
272
271 /* dump registers before resetting gpu, if enabled: */ 273 /* dump registers before resetting gpu, if enabled: */
272 if (hang_debug) 274 if (hang_debug)
273 a4xx_dump(gpu); 275 a4xx_dump(gpu);
@@ -505,7 +507,6 @@ static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
505 507
506static void a4xx_dump(struct msm_gpu *gpu) 508static void a4xx_dump(struct msm_gpu *gpu)
507{ 509{
508 adreno_dump(gpu);
509 printk("status: %08x\n", 510 printk("status: %08x\n",
510 gpu_read(gpu, REG_A4XX_RBBM_STATUS)); 511 gpu_read(gpu, REG_A4XX_RBBM_STATUS));
511 adreno_dump(gpu); 512 adreno_dump(gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index 8531beb982e7..9562a1fa552b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -12,9 +12,9 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index be83dee83d08..1ea2df524fac 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -19,7 +19,7 @@
19 19
20#include "adreno_gpu.h" 20#include "adreno_gpu.h"
21 21
22#if defined(CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF) 22#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
23# include <mach/kgsl.h> 23# include <mach/kgsl.h>
24#endif 24#endif
25 25
@@ -42,6 +42,14 @@ static const struct adreno_info gpulist[] = {
42 .gmem = SZ_256K, 42 .gmem = SZ_256K,
43 .init = a3xx_gpu_init, 43 .init = a3xx_gpu_init,
44 }, { 44 }, {
45 .rev = ADRENO_REV(3, 0, 6, 0),
46 .revn = 307, /* because a305c is revn==306 */
47 .name = "A306",
48 .pm4fw = "a300_pm4.fw",
49 .pfpfw = "a300_pfp.fw",
50 .gmem = SZ_128K,
51 .init = a3xx_gpu_init,
52 }, {
45 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID), 53 .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
46 .revn = 320, 54 .revn = 320,
47 .name = "A320", 55 .name = "A320",
@@ -240,7 +248,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
240 config.rev = ADRENO_REV(3, 0, 5, 0); 248 config.rev = ADRENO_REV(3, 0, 5, 0);
241 249
242 } 250 }
243# ifdef CONFIG_MSM_BUS_SCALING 251# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
244 config.bus_scale_table = pdata->bus_scale_table; 252 config.bus_scale_table = pdata->bus_scale_table;
245# endif 253# endif
246#endif 254#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index bbdcab0a56c1..a3b54cc76495 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -176,6 +176,17 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
176 OUT_PKT3(ring, CP_INTERRUPT, 1); 176 OUT_PKT3(ring, CP_INTERRUPT, 1);
177 OUT_RING(ring, 0x80000000); 177 OUT_RING(ring, 0x80000000);
178 178
179 /* Workaround for missing irq issue on 8x16/a306. Unsure if the
180 * root cause is a platform issue or some a306 quirk, but this
181 * keeps things humming along:
182 */
183 if (adreno_is_a306(adreno_gpu)) {
184 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
185 OUT_RING(ring, 0x00000000);
186 OUT_PKT3(ring, CP_INTERRUPT, 1);
187 OUT_RING(ring, 0x80000000);
188 }
189
179#if 0 190#if 0
180 if (adreno_is_a3xx(adreno_gpu)) { 191 if (adreno_is_a3xx(adreno_gpu)) {
181 /* Dummy set-constant to trigger context rollover */ 192 /* Dummy set-constant to trigger context rollover */
@@ -249,8 +260,13 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
249} 260}
250#endif 261#endif
251 262
252/* would be nice to not have to duplicate the _show() stuff with printk(): */ 263/* Dump common gpu status and scratch registers on any hang, to make
253void adreno_dump(struct msm_gpu *gpu) 264 * the hangcheck logs more useful. The scratch registers seem always
265 * safe to read when GPU has hung (unlike some other regs, depending
266 * on how the GPU hung), and they are useful to match up to cmdstream
267 * dumps when debugging hangs:
268 */
269void adreno_dump_info(struct msm_gpu *gpu)
254{ 270{
255 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 271 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
256 int i; 272 int i;
@@ -266,6 +282,18 @@ void adreno_dump(struct msm_gpu *gpu)
266 printk("wptr: %d\n", adreno_gpu->memptrs->wptr); 282 printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
267 printk("rb wptr: %d\n", get_wptr(gpu->rb)); 283 printk("rb wptr: %d\n", get_wptr(gpu->rb));
268 284
285 for (i = 0; i < 8; i++) {
286 printk("CP_SCRATCH_REG%d: %u\n", i,
287 gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
288 }
289}
290
291/* would be nice to not have to duplicate the _show() stuff with printk(): */
292void adreno_dump(struct msm_gpu *gpu)
293{
294 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
295 int i;
296
269 /* dump these out in a form that can be parsed by demsm: */ 297 /* dump these out in a form that can be parsed by demsm: */
270 printk("IO:region %s 00000000 00020000\n", gpu->name); 298 printk("IO:region %s 00000000 00020000\n", gpu->name);
271 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { 299 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -317,7 +345,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
317 gpu->fast_rate = config->fast_rate; 345 gpu->fast_rate = config->fast_rate;
318 gpu->slow_rate = config->slow_rate; 346 gpu->slow_rate = config->slow_rate;
319 gpu->bus_freq = config->bus_freq; 347 gpu->bus_freq = config->bus_freq;
320#ifdef CONFIG_MSM_BUS_SCALING 348#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
321 gpu->bus_scale_table = config->bus_scale_table; 349 gpu->bus_scale_table = config->bus_scale_table;
322#endif 350#endif
323 351
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index a0cc30977e67..0a312e9d3afd 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -167,7 +167,7 @@ struct adreno_gpu {
167struct adreno_platform_config { 167struct adreno_platform_config {
168 struct adreno_rev rev; 168 struct adreno_rev rev;
169 uint32_t fast_rate, slow_rate, bus_freq; 169 uint32_t fast_rate, slow_rate, bus_freq;
170#ifdef CONFIG_MSM_BUS_SCALING 170#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
171 struct msm_bus_scale_pdata *bus_scale_table; 171 struct msm_bus_scale_pdata *bus_scale_table;
172#endif 172#endif
173}; 173};
@@ -197,6 +197,12 @@ static inline bool adreno_is_a305(struct adreno_gpu *gpu)
197 return gpu->revn == 305; 197 return gpu->revn == 305;
198} 198}
199 199
200static inline bool adreno_is_a306(struct adreno_gpu *gpu)
201{
202 /* yes, 307, because a305c is 306 */
203 return gpu->revn == 307;
204}
205
200static inline bool adreno_is_a320(struct adreno_gpu *gpu) 206static inline bool adreno_is_a320(struct adreno_gpu *gpu)
201{ 207{
202 return gpu->revn == 320; 208 return gpu->revn == 320;
@@ -233,6 +239,7 @@ void adreno_idle(struct msm_gpu *gpu);
233#ifdef CONFIG_DEBUG_FS 239#ifdef CONFIG_DEBUG_FS
234void adreno_show(struct msm_gpu *gpu, struct seq_file *m); 240void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
235#endif 241#endif
242void adreno_dump_info(struct msm_gpu *gpu);
236void adreno_dump(struct msm_gpu *gpu); 243void adreno_dump(struct msm_gpu *gpu);
237void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords); 244void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
238 245
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 6ffc4f6c8af1..bd5b23bf9041 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -12,11 +12,11 @@ The rules-ng-ng source files this header was generated from are:
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15085 bytes, from 2014-12-20 21:49:41) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14895 bytes, from 2015-04-19 15:23:28)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 64344 bytes, from 2014-12-12 20:22:26) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 66709 bytes, from 2015-04-12 18:16:35)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 51069 bytes, from 2014-12-21 15:51:54) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 60633 bytes, from 2015-05-20 14:48:19)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2015 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
21 21
22Permission is hereby granted, free of charge, to any person obtaining 22Permission is hereby granted, free of charge, to any person obtaining
@@ -76,16 +76,11 @@ enum pc_di_primtype {
76 DI_PT_LINELOOP = 7, 76 DI_PT_LINELOOP = 7,
77 DI_PT_RECTLIST = 8, 77 DI_PT_RECTLIST = 8,
78 DI_PT_POINTLIST_A3XX = 9, 78 DI_PT_POINTLIST_A3XX = 9,
79 DI_PT_QUADLIST = 13, 79 DI_PT_LINE_ADJ = 10,
80 DI_PT_QUADSTRIP = 14, 80 DI_PT_LINESTRIP_ADJ = 11,
81 DI_PT_POLYGON = 15, 81 DI_PT_TRI_ADJ = 12,
82 DI_PT_2D_COPY_RECT_LIST_V0 = 16, 82 DI_PT_TRISTRIP_ADJ = 13,
83 DI_PT_2D_COPY_RECT_LIST_V1 = 17, 83 DI_PT_PATCHES = 34,
84 DI_PT_2D_COPY_RECT_LIST_V2 = 18,
85 DI_PT_2D_COPY_RECT_LIST_V3 = 19,
86 DI_PT_2D_FILL_RECT_LIST = 20,
87 DI_PT_2D_LINE_STRIP = 21,
88 DI_PT_2D_TRI_STRIP = 22,
89}; 84};
90 85
91enum pc_di_src_sel { 86enum pc_di_src_sel {
@@ -192,6 +187,7 @@ enum adreno_state_block {
192 SB_FRAG_TEX = 2, 187 SB_FRAG_TEX = 2,
193 SB_FRAG_MIPADDR = 3, 188 SB_FRAG_MIPADDR = 3,
194 SB_VERT_SHADER = 4, 189 SB_VERT_SHADER = 4,
190 SB_GEOM_SHADER = 5,
195 SB_FRAG_SHADER = 6, 191 SB_FRAG_SHADER = 6,
196}; 192};
197 193
@@ -382,12 +378,19 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
382{ 378{
383 return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK; 379 return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
384} 380}
381#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100
385#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00 382#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
386#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10 383#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
387static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val) 384static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
388{ 385{
389 return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK; 386 return ((val) << CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK;
390} 387}
388#define CP_DRAW_INDX_OFFSET_0_TESS_MODE__MASK 0x01f00000
389#define CP_DRAW_INDX_OFFSET_0_TESS_MODE__SHIFT 20
390static inline uint32_t CP_DRAW_INDX_OFFSET_0_TESS_MODE(uint32_t val)
391{
392 return ((val) << CP_DRAW_INDX_OFFSET_0_TESS_MODE__SHIFT) & CP_DRAW_INDX_OFFSET_0_TESS_MODE__MASK;
393}
391 394
392#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001 395#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
393#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff 396#define CP_DRAW_INDX_OFFSET_1_NUM_INSTANCES__MASK 0xffffffff
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index ad50b80225f5..1f2561e2ff71 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -23,12 +23,47 @@ struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi)
23 msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID]; 23 msm_dsi->encoders[MSM_DSI_CMD_ENCODER_ID];
24} 24}
25 25
26static int dsi_get_phy(struct msm_dsi *msm_dsi)
27{
28 struct platform_device *pdev = msm_dsi->pdev;
29 struct platform_device *phy_pdev;
30 struct device_node *phy_node;
31
32 phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0);
33 if (!phy_node) {
34 dev_err(&pdev->dev, "cannot find phy device\n");
35 return -ENXIO;
36 }
37
38 phy_pdev = of_find_device_by_node(phy_node);
39 if (phy_pdev)
40 msm_dsi->phy = platform_get_drvdata(phy_pdev);
41
42 of_node_put(phy_node);
43
44 if (!phy_pdev || !msm_dsi->phy) {
45 dev_err(&pdev->dev, "%s: phy driver is not ready\n", __func__);
46 return -EPROBE_DEFER;
47 }
48
49 msm_dsi->phy_dev = get_device(&phy_pdev->dev);
50
51 return 0;
52}
53
26static void dsi_destroy(struct msm_dsi *msm_dsi) 54static void dsi_destroy(struct msm_dsi *msm_dsi)
27{ 55{
28 if (!msm_dsi) 56 if (!msm_dsi)
29 return; 57 return;
30 58
31 msm_dsi_manager_unregister(msm_dsi); 59 msm_dsi_manager_unregister(msm_dsi);
60
61 if (msm_dsi->phy_dev) {
62 put_device(msm_dsi->phy_dev);
63 msm_dsi->phy = NULL;
64 msm_dsi->phy_dev = NULL;
65 }
66
32 if (msm_dsi->host) { 67 if (msm_dsi->host) {
33 msm_dsi_host_destroy(msm_dsi->host); 68 msm_dsi_host_destroy(msm_dsi->host);
34 msm_dsi->host = NULL; 69 msm_dsi->host = NULL;
@@ -43,7 +78,6 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev)
43 int ret; 78 int ret;
44 79
45 if (!pdev) { 80 if (!pdev) {
46 dev_err(&pdev->dev, "no dsi device\n");
47 ret = -ENXIO; 81 ret = -ENXIO;
48 goto fail; 82 goto fail;
49 } 83 }
@@ -63,6 +97,11 @@ static struct msm_dsi *dsi_init(struct platform_device *pdev)
63 if (ret) 97 if (ret)
64 goto fail; 98 goto fail;
65 99
100 /* GET dsi PHY */
101 ret = dsi_get_phy(msm_dsi);
102 if (ret)
103 goto fail;
104
66 /* Register to dsi manager */ 105 /* Register to dsi manager */
67 ret = msm_dsi_manager_register(msm_dsi); 106 ret = msm_dsi_manager_register(msm_dsi);
68 if (ret) 107 if (ret)
@@ -142,12 +181,14 @@ static struct platform_driver dsi_driver = {
142void __init msm_dsi_register(void) 181void __init msm_dsi_register(void)
143{ 182{
144 DBG(""); 183 DBG("");
184 msm_dsi_phy_driver_register();
145 platform_driver_register(&dsi_driver); 185 platform_driver_register(&dsi_driver);
146} 186}
147 187
148void __exit msm_dsi_unregister(void) 188void __exit msm_dsi_unregister(void)
149{ 189{
150 DBG(""); 190 DBG("");
191 msm_dsi_phy_driver_unregister();
151 platform_driver_unregister(&dsi_driver); 192 platform_driver_unregister(&dsi_driver);
152} 193}
153 194
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 10f54d4e379a..92d697de4858 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -14,6 +14,7 @@
14#ifndef __DSI_CONNECTOR_H__ 14#ifndef __DSI_CONNECTOR_H__
15#define __DSI_CONNECTOR_H__ 15#define __DSI_CONNECTOR_H__
16 16
17#include <linux/of_platform.h>
17#include <linux/platform_device.h> 18#include <linux/platform_device.h>
18 19
19#include "drm_crtc.h" 20#include "drm_crtc.h"
@@ -38,6 +39,28 @@
38#define DSI_ENCODER_MASTER DSI_1 39#define DSI_ENCODER_MASTER DSI_1
39#define DSI_ENCODER_SLAVE DSI_0 40#define DSI_ENCODER_SLAVE DSI_0
40 41
42enum msm_dsi_phy_type {
43 MSM_DSI_PHY_28NM_HPM,
44 MSM_DSI_PHY_28NM_LP,
45 MSM_DSI_PHY_MAX
46};
47
48#define DSI_DEV_REGULATOR_MAX 8
49
50/* Regulators for DSI devices */
51struct dsi_reg_entry {
52 char name[32];
53 int min_voltage;
54 int max_voltage;
55 int enable_load;
56 int disable_load;
57};
58
59struct dsi_reg_config {
60 int num;
61 struct dsi_reg_entry regs[DSI_DEV_REGULATOR_MAX];
62};
63
41struct msm_dsi { 64struct msm_dsi {
42 struct drm_device *dev; 65 struct drm_device *dev;
43 struct platform_device *pdev; 66 struct platform_device *pdev;
@@ -49,6 +72,8 @@ struct msm_dsi {
49 struct msm_dsi_phy *phy; 72 struct msm_dsi_phy *phy;
50 struct drm_panel *panel; 73 struct drm_panel *panel;
51 unsigned long panel_flags; 74 unsigned long panel_flags;
75
76 struct device *phy_dev;
52 bool phy_enabled; 77 bool phy_enabled;
53 78
54 /* the encoders we are hooked to (outside of dsi block) */ 79 /* the encoders we are hooked to (outside of dsi block) */
@@ -73,6 +98,29 @@ void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
73/* msm dsi */ 98/* msm dsi */
74struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi); 99struct drm_encoder *msm_dsi_get_encoder(struct msm_dsi *msm_dsi);
75 100
101/* dsi pll */
102struct msm_dsi_pll;
103#ifdef CONFIG_DRM_MSM_DSI_PLL
104struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
105 enum msm_dsi_phy_type type, int dsi_id);
106void msm_dsi_pll_destroy(struct msm_dsi_pll *pll);
107int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
108 struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
109#else
110static inline struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
111 enum msm_dsi_phy_type type, int id) {
112 return ERR_PTR(-ENODEV);
113}
114static inline void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
115{
116}
117static inline int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
118 struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
119{
120 return -ENODEV;
121}
122#endif
123
76/* dsi host */ 124/* dsi host */
77int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host, 125int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
78 const struct mipi_dsi_msg *msg); 126 const struct mipi_dsi_msg *msg);
@@ -94,6 +142,8 @@ struct drm_panel *msm_dsi_host_get_panel(struct mipi_dsi_host *host,
94 unsigned long *panel_flags); 142 unsigned long *panel_flags);
95int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer); 143int msm_dsi_host_register(struct mipi_dsi_host *host, bool check_defer);
96void msm_dsi_host_unregister(struct mipi_dsi_host *host); 144void msm_dsi_host_unregister(struct mipi_dsi_host *host);
145int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
146 struct msm_dsi_pll *src_pll);
97void msm_dsi_host_destroy(struct mipi_dsi_host *host); 147void msm_dsi_host_destroy(struct mipi_dsi_host *host);
98int msm_dsi_host_modeset_init(struct mipi_dsi_host *host, 148int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
99 struct drm_device *dev); 149 struct drm_device *dev);
@@ -101,17 +151,14 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi);
101 151
102/* dsi phy */ 152/* dsi phy */
103struct msm_dsi_phy; 153struct msm_dsi_phy;
104enum msm_dsi_phy_type { 154void msm_dsi_phy_driver_register(void);
105 MSM_DSI_PHY_UNKNOWN, 155void msm_dsi_phy_driver_unregister(void);
106 MSM_DSI_PHY_28NM,
107 MSM_DSI_PHY_MAX
108};
109struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev,
110 enum msm_dsi_phy_type type, int id);
111int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, 156int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
112 const unsigned long bit_rate, const unsigned long esc_rate); 157 const unsigned long bit_rate, const unsigned long esc_rate);
113int msm_dsi_phy_disable(struct msm_dsi_phy *phy); 158int msm_dsi_phy_disable(struct msm_dsi_phy *phy);
114void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, 159void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
115 u32 *clk_pre, u32 *clk_post); 160 u32 *clk_pre, u32 *clk_post);
161struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy);
162
116#endif /* __DSI_CONNECTOR_H__ */ 163#endif /* __DSI_CONNECTOR_H__ */
117 164
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 1dcfae265e98..9791ea04bcbc 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,8 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /usr2/hali/local/envytools/envytools/rnndb/dsi/dsi.xml ( 18681 bytes, from 2015-03-04 23:08:31) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
12- /usr2/hali/local/envytools/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-01-28 21:43:22) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
13 22
14Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
15- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
@@ -394,6 +403,9 @@ static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
394#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001 403#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
395#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010 404#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
396 405
406#define REG_DSI_LANE_CTRL 0x000000a8
407#define DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST 0x10000000
408
397#define REG_DSI_LANE_SWAP_CTRL 0x000000ac 409#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
398#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007 410#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__MASK 0x00000007
399#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0 411#define DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL__SHIFT 0
@@ -835,5 +847,152 @@ static inline uint32_t DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(uint32_t val)
835 847
836#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018 848#define REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG 0x00000018
837 849
850#define REG_DSI_28nm_PHY_PLL_REFCLK_CFG 0x00000000
851#define DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR 0x00000001
852
853#define REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
854
855#define REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
856
857#define REG_DSI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
858
859#define REG_DSI_28nm_PHY_PLL_VREG_CFG 0x00000010
860#define DSI_28nm_PHY_PLL_VREG_CFG_POSTDIV1_BYPASS_B 0x00000002
861
862#define REG_DSI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
863
864#define REG_DSI_28nm_PHY_PLL_DMUX_CFG 0x00000018
865
866#define REG_DSI_28nm_PHY_PLL_AMUX_CFG 0x0000001c
867
868#define REG_DSI_28nm_PHY_PLL_GLB_CFG 0x00000020
869#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
870#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
871#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
872#define DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
873
874#define REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
875
876#define REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
877
878#define REG_DSI_28nm_PHY_PLL_LPFR_CFG 0x0000002c
879
880#define REG_DSI_28nm_PHY_PLL_LPFC1_CFG 0x00000030
881
882#define REG_DSI_28nm_PHY_PLL_LPFC2_CFG 0x00000034
883
884#define REG_DSI_28nm_PHY_PLL_SDM_CFG0 0x00000038
885#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK 0x0000003f
886#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT 0
887static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(uint32_t val)
888{
889 return ((val) << DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV__MASK;
890}
891#define DSI_28nm_PHY_PLL_SDM_CFG0_BYP 0x00000040
892
893#define REG_DSI_28nm_PHY_PLL_SDM_CFG1 0x0000003c
894#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK 0x0000003f
895#define DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT 0
896static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(uint32_t val)
897{
898 return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
899}
900#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK 0x00000040
901#define DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT 6
902static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN(uint32_t val)
903{
904 return ((val) << DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG1_DITHER_EN__MASK;
905}
906
907#define REG_DSI_28nm_PHY_PLL_SDM_CFG2 0x00000040
908#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK 0x000000ff
909#define DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT 0
910static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(uint32_t val)
911{
912 return ((val) << DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0__MASK;
913}
914
915#define REG_DSI_28nm_PHY_PLL_SDM_CFG3 0x00000044
916#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK 0x000000ff
917#define DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT 0
918static inline uint32_t DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(uint32_t val)
919{
920 return ((val) << DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__SHIFT) & DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8__MASK;
921}
922
923#define REG_DSI_28nm_PHY_PLL_SDM_CFG4 0x00000048
924
925#define REG_DSI_28nm_PHY_PLL_SSC_CFG0 0x0000004c
926
927#define REG_DSI_28nm_PHY_PLL_SSC_CFG1 0x00000050
928
929#define REG_DSI_28nm_PHY_PLL_SSC_CFG2 0x00000054
930
931#define REG_DSI_28nm_PHY_PLL_SSC_CFG3 0x00000058
932
933#define REG_DSI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
934
935#define REG_DSI_28nm_PHY_PLL_LKDET_CFG1 0x00000060
936
937#define REG_DSI_28nm_PHY_PLL_LKDET_CFG2 0x00000064
938
939#define REG_DSI_28nm_PHY_PLL_TEST_CFG 0x00000068
940#define DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
941
942#define REG_DSI_28nm_PHY_PLL_CAL_CFG0 0x0000006c
943
944#define REG_DSI_28nm_PHY_PLL_CAL_CFG1 0x00000070
945
946#define REG_DSI_28nm_PHY_PLL_CAL_CFG2 0x00000074
947
948#define REG_DSI_28nm_PHY_PLL_CAL_CFG3 0x00000078
949
950#define REG_DSI_28nm_PHY_PLL_CAL_CFG4 0x0000007c
951
952#define REG_DSI_28nm_PHY_PLL_CAL_CFG5 0x00000080
953
954#define REG_DSI_28nm_PHY_PLL_CAL_CFG6 0x00000084
955
956#define REG_DSI_28nm_PHY_PLL_CAL_CFG7 0x00000088
957
958#define REG_DSI_28nm_PHY_PLL_CAL_CFG8 0x0000008c
959
960#define REG_DSI_28nm_PHY_PLL_CAL_CFG9 0x00000090
961
962#define REG_DSI_28nm_PHY_PLL_CAL_CFG10 0x00000094
963
964#define REG_DSI_28nm_PHY_PLL_CAL_CFG11 0x00000098
965
966#define REG_DSI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
967
968#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
969
970#define REG_DSI_28nm_PHY_PLL_CTRL_42 0x000000a4
971
972#define REG_DSI_28nm_PHY_PLL_CTRL_43 0x000000a8
973
974#define REG_DSI_28nm_PHY_PLL_CTRL_44 0x000000ac
975
976#define REG_DSI_28nm_PHY_PLL_CTRL_45 0x000000b0
977
978#define REG_DSI_28nm_PHY_PLL_CTRL_46 0x000000b4
979
980#define REG_DSI_28nm_PHY_PLL_CTRL_47 0x000000b8
981
982#define REG_DSI_28nm_PHY_PLL_CTRL_48 0x000000bc
983
984#define REG_DSI_28nm_PHY_PLL_STATUS 0x000000c0
985#define DSI_28nm_PHY_PLL_STATUS_PLL_RDY 0x00000001
986
987#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS0 0x000000c4
988
989#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS1 0x000000c8
990
991#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS2 0x000000cc
992
993#define REG_DSI_28nm_PHY_PLL_DEBUG_BUS3 0x000000d0
994
995#define REG_DSI_28nm_PHY_PLL_CTRL_54 0x000000d4
996
838 997
839#endif /* DSI_XML */ 998#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 649d20d29f92..de0400923303 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -15,6 +15,7 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18#include <linux/gpio/consumer.h>
18#include <linux/interrupt.h> 19#include <linux/interrupt.h>
19#include <linux/of_device.h> 20#include <linux/of_device.h>
20#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
@@ -36,35 +37,19 @@
36 37
37#define DSI_6G_REG_SHIFT 4 38#define DSI_6G_REG_SHIFT 4
38 39
39#define DSI_REGULATOR_MAX 8
40struct dsi_reg_entry {
41 char name[32];
42 int min_voltage;
43 int max_voltage;
44 int enable_load;
45 int disable_load;
46};
47
48struct dsi_reg_config {
49 int num;
50 struct dsi_reg_entry regs[DSI_REGULATOR_MAX];
51};
52
53struct dsi_config { 40struct dsi_config {
54 u32 major; 41 u32 major;
55 u32 minor; 42 u32 minor;
56 u32 io_offset; 43 u32 io_offset;
57 enum msm_dsi_phy_type phy_type;
58 struct dsi_reg_config reg_cfg; 44 struct dsi_reg_config reg_cfg;
59}; 45};
60 46
61static const struct dsi_config dsi_cfgs[] = { 47static const struct dsi_config dsi_cfgs[] = {
62 {MSM_DSI_VER_MAJOR_V2, 0, 0, MSM_DSI_PHY_UNKNOWN}, 48 {MSM_DSI_VER_MAJOR_V2, 0, 0, {0,} },
63 { /* 8974 v1 */ 49 { /* 8974 v1 */
64 .major = MSM_DSI_VER_MAJOR_6G, 50 .major = MSM_DSI_VER_MAJOR_6G,
65 .minor = MSM_DSI_6G_VER_MINOR_V1_0, 51 .minor = MSM_DSI_6G_VER_MINOR_V1_0,
66 .io_offset = DSI_6G_REG_SHIFT, 52 .io_offset = DSI_6G_REG_SHIFT,
67 .phy_type = MSM_DSI_PHY_28NM,
68 .reg_cfg = { 53 .reg_cfg = {
69 .num = 4, 54 .num = 4,
70 .regs = { 55 .regs = {
@@ -79,7 +64,6 @@ static const struct dsi_config dsi_cfgs[] = {
79 .major = MSM_DSI_VER_MAJOR_6G, 64 .major = MSM_DSI_VER_MAJOR_6G,
80 .minor = MSM_DSI_6G_VER_MINOR_V1_1, 65 .minor = MSM_DSI_6G_VER_MINOR_V1_1,
81 .io_offset = DSI_6G_REG_SHIFT, 66 .io_offset = DSI_6G_REG_SHIFT,
82 .phy_type = MSM_DSI_PHY_28NM,
83 .reg_cfg = { 67 .reg_cfg = {
84 .num = 4, 68 .num = 4,
85 .regs = { 69 .regs = {
@@ -94,7 +78,6 @@ static const struct dsi_config dsi_cfgs[] = {
94 .major = MSM_DSI_VER_MAJOR_6G, 78 .major = MSM_DSI_VER_MAJOR_6G,
95 .minor = MSM_DSI_6G_VER_MINOR_V1_1_1, 79 .minor = MSM_DSI_6G_VER_MINOR_V1_1_1,
96 .io_offset = DSI_6G_REG_SHIFT, 80 .io_offset = DSI_6G_REG_SHIFT,
97 .phy_type = MSM_DSI_PHY_28NM,
98 .reg_cfg = { 81 .reg_cfg = {
99 .num = 4, 82 .num = 4,
100 .regs = { 83 .regs = {
@@ -109,7 +92,6 @@ static const struct dsi_config dsi_cfgs[] = {
109 .major = MSM_DSI_VER_MAJOR_6G, 92 .major = MSM_DSI_VER_MAJOR_6G,
110 .minor = MSM_DSI_6G_VER_MINOR_V1_2, 93 .minor = MSM_DSI_6G_VER_MINOR_V1_2,
111 .io_offset = DSI_6G_REG_SHIFT, 94 .io_offset = DSI_6G_REG_SHIFT,
112 .phy_type = MSM_DSI_PHY_28NM,
113 .reg_cfg = { 95 .reg_cfg = {
114 .num = 4, 96 .num = 4,
115 .regs = { 97 .regs = {
@@ -124,7 +106,6 @@ static const struct dsi_config dsi_cfgs[] = {
124 .major = MSM_DSI_VER_MAJOR_6G, 106 .major = MSM_DSI_VER_MAJOR_6G,
125 .minor = MSM_DSI_6G_VER_MINOR_V1_3_1, 107 .minor = MSM_DSI_6G_VER_MINOR_V1_3_1,
126 .io_offset = DSI_6G_REG_SHIFT, 108 .io_offset = DSI_6G_REG_SHIFT,
127 .phy_type = MSM_DSI_PHY_28NM,
128 .reg_cfg = { 109 .reg_cfg = {
129 .num = 4, 110 .num = 4,
130 .regs = { 111 .regs = {
@@ -197,7 +178,7 @@ struct msm_dsi_host {
197 int id; 178 int id;
198 179
199 void __iomem *ctrl_base; 180 void __iomem *ctrl_base;
200 struct regulator_bulk_data supplies[DSI_REGULATOR_MAX]; 181 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
201 struct clk *mdp_core_clk; 182 struct clk *mdp_core_clk;
202 struct clk *ahb_clk; 183 struct clk *ahb_clk;
203 struct clk *axi_clk; 184 struct clk *axi_clk;
@@ -205,6 +186,9 @@ struct msm_dsi_host {
205 struct clk *byte_clk; 186 struct clk *byte_clk;
206 struct clk *esc_clk; 187 struct clk *esc_clk;
207 struct clk *pixel_clk; 188 struct clk *pixel_clk;
189 struct clk *byte_clk_src;
190 struct clk *pixel_clk_src;
191
208 u32 byte_clk_rate; 192 u32 byte_clk_rate;
209 193
210 struct gpio_desc *disp_en_gpio; 194 struct gpio_desc *disp_en_gpio;
@@ -273,7 +257,7 @@ static const struct dsi_config *dsi_get_config(struct msm_dsi_host *msm_host)
273 u32 major = 0, minor = 0; 257 u32 major = 0, minor = 0;
274 258
275 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc"); 259 gdsc_reg = regulator_get(&msm_host->pdev->dev, "gdsc");
276 if (IS_ERR_OR_NULL(gdsc_reg)) { 260 if (IS_ERR(gdsc_reg)) {
277 pr_err("%s: cannot get gdsc\n", __func__); 261 pr_err("%s: cannot get gdsc\n", __func__);
278 goto fail; 262 goto fail;
279 } 263 }
@@ -463,6 +447,22 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
463 goto exit; 447 goto exit;
464 } 448 }
465 449
450 msm_host->byte_clk_src = devm_clk_get(dev, "byte_clk_src");
451 if (IS_ERR(msm_host->byte_clk_src)) {
452 ret = PTR_ERR(msm_host->byte_clk_src);
453 pr_err("%s: can't find byte_clk_src. ret=%d\n", __func__, ret);
454 msm_host->byte_clk_src = NULL;
455 goto exit;
456 }
457
458 msm_host->pixel_clk_src = devm_clk_get(dev, "pixel_clk_src");
459 if (IS_ERR(msm_host->pixel_clk_src)) {
460 ret = PTR_ERR(msm_host->pixel_clk_src);
461 pr_err("%s: can't find pixel_clk_src. ret=%d\n", __func__, ret);
462 msm_host->pixel_clk_src = NULL;
463 goto exit;
464 }
465
466exit: 466exit:
467 return ret; 467 return ret;
468} 468}
@@ -787,6 +787,11 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
787 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL, 787 dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
788 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123)); 788 DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
789 } 789 }
790
791 if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
792 dsi_write(msm_host, REG_DSI_LANE_CTRL,
793 DSI_LANE_CTRL_CLKLN_HS_FORCE_REQUEST);
794
790 data |= DSI_CTRL_ENABLE; 795 data |= DSI_CTRL_ENABLE;
791 796
792 dsi_write(msm_host, REG_DSI_CTRL, data); 797 dsi_write(msm_host, REG_DSI_CTRL, data);
@@ -1345,36 +1350,19 @@ static irqreturn_t dsi_host_irq(int irq, void *ptr)
1345static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host, 1350static int dsi_host_init_panel_gpios(struct msm_dsi_host *msm_host,
1346 struct device *panel_device) 1351 struct device *panel_device)
1347{ 1352{
1348 int ret; 1353 msm_host->disp_en_gpio = devm_gpiod_get_optional(panel_device,
1349 1354 "disp-enable",
1350 msm_host->disp_en_gpio = devm_gpiod_get(panel_device, 1355 GPIOD_OUT_LOW);
1351 "disp-enable");
1352 if (IS_ERR(msm_host->disp_en_gpio)) { 1356 if (IS_ERR(msm_host->disp_en_gpio)) {
1353 DBG("cannot get disp-enable-gpios %ld", 1357 DBG("cannot get disp-enable-gpios %ld",
1354 PTR_ERR(msm_host->disp_en_gpio)); 1358 PTR_ERR(msm_host->disp_en_gpio));
1355 msm_host->disp_en_gpio = NULL; 1359 return PTR_ERR(msm_host->disp_en_gpio);
1356 }
1357 if (msm_host->disp_en_gpio) {
1358 ret = gpiod_direction_output(msm_host->disp_en_gpio, 0);
1359 if (ret) {
1360 pr_err("cannot set dir to disp-en-gpios %d\n", ret);
1361 return ret;
1362 }
1363 } 1360 }
1364 1361
1365 msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te"); 1362 msm_host->te_gpio = devm_gpiod_get(panel_device, "disp-te", GPIOD_IN);
1366 if (IS_ERR(msm_host->te_gpio)) { 1363 if (IS_ERR(msm_host->te_gpio)) {
1367 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio)); 1364 DBG("cannot get disp-te-gpios %ld", PTR_ERR(msm_host->te_gpio));
1368 msm_host->te_gpio = NULL; 1365 return PTR_ERR(msm_host->te_gpio);
1369 }
1370
1371 if (msm_host->te_gpio) {
1372 ret = gpiod_direction_input(msm_host->te_gpio);
1373 if (ret) {
1374 pr_err("%s: cannot set dir to disp-te-gpios, %d\n",
1375 __func__, ret);
1376 return ret;
1377 }
1378 } 1366 }
1379 1367
1380 return 0; 1368 return 0;
@@ -1508,13 +1496,6 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1508 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); 1496 msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
1509 INIT_WORK(&msm_host->err_work, dsi_err_worker); 1497 INIT_WORK(&msm_host->err_work, dsi_err_worker);
1510 1498
1511 msm_dsi->phy = msm_dsi_phy_init(pdev, msm_host->cfg->phy_type,
1512 msm_host->id);
1513 if (!msm_dsi->phy) {
1514 ret = -EINVAL;
1515 pr_err("%s: phy init failed\n", __func__);
1516 goto fail;
1517 }
1518 msm_dsi->host = &msm_host->base; 1499 msm_dsi->host = &msm_host->base;
1519 msm_dsi->id = msm_host->id; 1500 msm_dsi->id = msm_host->id;
1520 1501
@@ -1824,6 +1805,39 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
1824 wmb(); 1805 wmb();
1825} 1806}
1826 1807
1808int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
1809 struct msm_dsi_pll *src_pll)
1810{
1811 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
1812 struct clk *byte_clk_provider, *pixel_clk_provider;
1813 int ret;
1814
1815 ret = msm_dsi_pll_get_clk_provider(src_pll,
1816 &byte_clk_provider, &pixel_clk_provider);
1817 if (ret) {
1818 pr_info("%s: can't get provider from pll, don't set parent\n",
1819 __func__);
1820 return 0;
1821 }
1822
1823 ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
1824 if (ret) {
1825 pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
1826 __func__, ret);
1827 goto exit;
1828 }
1829
1830 ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
1831 if (ret) {
1832 pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
1833 __func__, ret);
1834 goto exit;
1835 }
1836
1837exit:
1838 return ret;
1839}
1840
1827int msm_dsi_host_enable(struct mipi_dsi_host *host) 1841int msm_dsi_host_enable(struct mipi_dsi_host *host)
1828{ 1842{
1829 struct msm_dsi_host *msm_host = to_msm_dsi_host(host); 1843 struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0a40f3c64e8b..87ac6612b6f8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -60,6 +60,53 @@ static int dsi_mgr_parse_dual_panel(struct device_node *np, int id)
60 return 0; 60 return 0;
61} 61}
62 62
63static int dsi_mgr_host_register(int id)
64{
65 struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
66 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
67 struct msm_dsi *clk_master_dsi = dsi_mgr_get_dsi(DSI_CLOCK_MASTER);
68 struct msm_dsi_pll *src_pll;
69 int ret;
70
71 if (!IS_DUAL_PANEL()) {
72 ret = msm_dsi_host_register(msm_dsi->host, true);
73 if (ret)
74 return ret;
75
76 src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
77 ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
78 } else if (!other_dsi) {
79 ret = 0;
80 } else {
81 struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
82 msm_dsi : other_dsi;
83 struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
84 other_dsi : msm_dsi;
85 /* Register slave host first, so that slave DSI device
86 * has a chance to probe, and do not block the master
87 * DSI device's probe.
88 * Also, do not check defer for the slave host,
89 * because only master DSI device adds the panel to global
90 * panel list. The panel's device is the master DSI device.
91 */
92 ret = msm_dsi_host_register(sdsi->host, false);
93 if (ret)
94 return ret;
95 ret = msm_dsi_host_register(mdsi->host, true);
96 if (ret)
97 return ret;
98
99 /* PLL0 is to drive both 2 DSI link clocks in Dual DSI mode. */
100 src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
101 ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
102 if (ret)
103 return ret;
104 ret = msm_dsi_host_set_src_pll(other_dsi->host, src_pll);
105 }
106
107 return ret;
108}
109
63struct dsi_connector { 110struct dsi_connector {
64 struct drm_connector base; 111 struct drm_connector base;
65 int id; 112 int id;
@@ -652,7 +699,6 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
652{ 699{
653 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb; 700 struct msm_dsi_manager *msm_dsim = &msm_dsim_glb;
654 int id = msm_dsi->id; 701 int id = msm_dsi->id;
655 struct msm_dsi *other_dsi = dsi_mgr_get_other_dsi(id);
656 int ret; 702 int ret;
657 703
658 if (id > DSI_MAX) { 704 if (id > DSI_MAX) {
@@ -670,31 +716,20 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
670 ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id); 716 ret = dsi_mgr_parse_dual_panel(msm_dsi->pdev->dev.of_node, id);
671 if (ret) { 717 if (ret) {
672 pr_err("%s: failed to parse dual panel info\n", __func__); 718 pr_err("%s: failed to parse dual panel info\n", __func__);
673 return ret; 719 goto fail;
674 } 720 }
675 721
676 if (!IS_DUAL_PANEL()) { 722 ret = dsi_mgr_host_register(id);
677 ret = msm_dsi_host_register(msm_dsi->host, true); 723 if (ret) {
678 } else if (!other_dsi) { 724 pr_err("%s: failed to register mipi dsi host for DSI %d\n",
679 return 0; 725 __func__, id);
680 } else { 726 goto fail;
681 struct msm_dsi *mdsi = IS_MASTER_PANEL(id) ?
682 msm_dsi : other_dsi;
683 struct msm_dsi *sdsi = IS_MASTER_PANEL(id) ?
684 other_dsi : msm_dsi;
685 /* Register slave host first, so that slave DSI device
686 * has a chance to probe, and do not block the master
687 * DSI device's probe.
688 * Also, do not check defer for the slave host,
689 * because only master DSI device adds the panel to global
690 * panel list. The panel's device is the master DSI device.
691 */
692 ret = msm_dsi_host_register(sdsi->host, false);
693 if (ret)
694 return ret;
695 ret = msm_dsi_host_register(mdsi->host, true);
696 } 727 }
697 728
729 return 0;
730
731fail:
732 msm_dsim->dsi[id] = NULL;
698 return ret; 733 return ret;
699} 734}
700 735
diff --git a/drivers/gpu/drm/msm/dsi/dsi_phy.c b/drivers/gpu/drm/msm/dsi/dsi_phy.c
index f0cea8927388..2d3b33ce1cc5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_phy.c
@@ -11,12 +11,27 @@
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 */ 12 */
13 13
14#include <linux/platform_device.h>
15#include <linux/regulator/consumer.h>
16
14#include "dsi.h" 17#include "dsi.h"
15#include "dsi.xml.h" 18#include "dsi.xml.h"
16 19
17#define dsi_phy_read(offset) msm_readl((offset)) 20#define dsi_phy_read(offset) msm_readl((offset))
18#define dsi_phy_write(offset, data) msm_writel((data), (offset)) 21#define dsi_phy_write(offset, data) msm_writel((data), (offset))
19 22
23struct dsi_phy_ops {
24 int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
25 const unsigned long bit_rate, const unsigned long esc_rate);
26 int (*disable)(struct msm_dsi_phy *phy);
27};
28
29struct dsi_phy_cfg {
30 enum msm_dsi_phy_type type;
31 struct dsi_reg_config reg_cfg;
32 struct dsi_phy_ops ops;
33};
34
20struct dsi_dphy_timing { 35struct dsi_dphy_timing {
21 u32 clk_pre; 36 u32 clk_pre;
22 u32 clk_post; 37 u32 clk_post;
@@ -34,15 +49,106 @@ struct dsi_dphy_timing {
34}; 49};
35 50
36struct msm_dsi_phy { 51struct msm_dsi_phy {
52 struct platform_device *pdev;
37 void __iomem *base; 53 void __iomem *base;
38 void __iomem *reg_base; 54 void __iomem *reg_base;
39 int id; 55 int id;
56
57 struct clk *ahb_clk;
58 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
59
40 struct dsi_dphy_timing timing; 60 struct dsi_dphy_timing timing;
41 int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel, 61 const struct dsi_phy_cfg *cfg;
42 const unsigned long bit_rate, const unsigned long esc_rate); 62
43 int (*disable)(struct msm_dsi_phy *phy); 63 struct msm_dsi_pll *pll;
44}; 64};
45 65
66static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
67{
68 struct regulator_bulk_data *s = phy->supplies;
69 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
70 struct device *dev = &phy->pdev->dev;
71 int num = phy->cfg->reg_cfg.num;
72 int i, ret;
73
74 for (i = 0; i < num; i++)
75 s[i].supply = regs[i].name;
76
77 ret = devm_regulator_bulk_get(&phy->pdev->dev, num, s);
78 if (ret < 0) {
79 dev_err(dev, "%s: failed to init regulator, ret=%d\n",
80 __func__, ret);
81 return ret;
82 }
83
84 for (i = 0; i < num; i++) {
85 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
86 ret = regulator_set_voltage(s[i].consumer,
87 regs[i].min_voltage, regs[i].max_voltage);
88 if (ret < 0) {
89 dev_err(dev,
90 "regulator %d set voltage failed, %d\n",
91 i, ret);
92 return ret;
93 }
94 }
95 }
96
97 return 0;
98}
99
100static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
101{
102 struct regulator_bulk_data *s = phy->supplies;
103 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
104 int num = phy->cfg->reg_cfg.num;
105 int i;
106
107 DBG("");
108 for (i = num - 1; i >= 0; i--)
109 if (regs[i].disable_load >= 0)
110 regulator_set_load(s[i].consumer,
111 regs[i].disable_load);
112
113 regulator_bulk_disable(num, s);
114}
115
116static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
117{
118 struct regulator_bulk_data *s = phy->supplies;
119 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
120 struct device *dev = &phy->pdev->dev;
121 int num = phy->cfg->reg_cfg.num;
122 int ret, i;
123
124 DBG("");
125 for (i = 0; i < num; i++) {
126 if (regs[i].enable_load >= 0) {
127 ret = regulator_set_load(s[i].consumer,
128 regs[i].enable_load);
129 if (ret < 0) {
130 dev_err(dev,
131 "regulator %d set op mode failed, %d\n",
132 i, ret);
133 goto fail;
134 }
135 }
136 }
137
138 ret = regulator_bulk_enable(num, s);
139 if (ret < 0) {
140 dev_err(dev, "regulator enable failed, %d\n", ret);
141 goto fail;
142 }
143
144 return 0;
145
146fail:
147 for (i--; i >= 0; i--)
148 regulator_set_load(s[i].consumer, regs[i].disable_load);
149 return ret;
150}
151
46#define S_DIV_ROUND_UP(n, d) \ 152#define S_DIV_ROUND_UP(n, d) \
47 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d))) 153 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
48 154
@@ -284,59 +390,200 @@ static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
284 return 0; 390 return 0;
285} 391}
286 392
287#define dsi_phy_func_init(name) \ 393static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
288 do { \ 394{
289 phy->enable = dsi_##name##_phy_enable; \ 395 int ret;
290 phy->disable = dsi_##name##_phy_disable; \ 396
291 } while (0) 397 pm_runtime_get_sync(&phy->pdev->dev);
292 398
293struct msm_dsi_phy *msm_dsi_phy_init(struct platform_device *pdev, 399 ret = clk_prepare_enable(phy->ahb_clk);
294 enum msm_dsi_phy_type type, int id) 400 if (ret) {
401 pr_err("%s: can't enable ahb clk, %d\n", __func__, ret);
402 pm_runtime_put_sync(&phy->pdev->dev);
403 }
404
405 return ret;
406}
407
408static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
409{
410 clk_disable_unprepare(phy->ahb_clk);
411 pm_runtime_put_sync(&phy->pdev->dev);
412}
413
414static const struct dsi_phy_cfg dsi_phy_cfgs[MSM_DSI_PHY_MAX] = {
415 [MSM_DSI_PHY_28NM_HPM] = {
416 .type = MSM_DSI_PHY_28NM_HPM,
417 .reg_cfg = {
418 .num = 1,
419 .regs = {
420 {"vddio", 1800000, 1800000, 100000, 100},
421 },
422 },
423 .ops = {
424 .enable = dsi_28nm_phy_enable,
425 .disable = dsi_28nm_phy_disable,
426 }
427 },
428 [MSM_DSI_PHY_28NM_LP] = {
429 .type = MSM_DSI_PHY_28NM_LP,
430 .reg_cfg = {
431 .num = 1,
432 .regs = {
433 {"vddio", 1800000, 1800000, 100000, 100},
434 },
435 },
436 .ops = {
437 .enable = dsi_28nm_phy_enable,
438 .disable = dsi_28nm_phy_disable,
439 }
440 },
441};
442
443static const struct of_device_id dsi_phy_dt_match[] = {
444 { .compatible = "qcom,dsi-phy-28nm-hpm",
445 .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_HPM],},
446 { .compatible = "qcom,dsi-phy-28nm-lp",
447 .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_LP],},
448 {}
449};
450
451static int dsi_phy_driver_probe(struct platform_device *pdev)
295{ 452{
296 struct msm_dsi_phy *phy; 453 struct msm_dsi_phy *phy;
454 const struct of_device_id *match;
455 int ret;
297 456
298 phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); 457 phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
299 if (!phy) 458 if (!phy)
300 return NULL; 459 return -ENOMEM;
460
461 match = of_match_node(dsi_phy_dt_match, pdev->dev.of_node);
462 if (!match)
463 return -ENODEV;
464
465 phy->cfg = match->data;
466 phy->pdev = pdev;
467
468 ret = of_property_read_u32(pdev->dev.of_node,
469 "qcom,dsi-phy-index", &phy->id);
470 if (ret) {
471 dev_err(&pdev->dev,
472 "%s: PHY index not specified, ret=%d\n",
473 __func__, ret);
474 goto fail;
475 }
301 476
302 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY"); 477 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
303 if (IS_ERR_OR_NULL(phy->base)) { 478 if (IS_ERR(phy->base)) {
304 pr_err("%s: failed to map phy base\n", __func__); 479 dev_err(&pdev->dev, "%s: failed to map phy base\n", __func__);
305 return NULL; 480 ret = -ENOMEM;
481 goto fail;
306 } 482 }
307 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG"); 483 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
308 if (IS_ERR_OR_NULL(phy->reg_base)) { 484 if (IS_ERR(phy->reg_base)) {
309 pr_err("%s: failed to map phy regulator base\n", __func__); 485 dev_err(&pdev->dev,
310 return NULL; 486 "%s: failed to map phy regulator base\n", __func__);
487 ret = -ENOMEM;
488 goto fail;
311 } 489 }
312 490
313 switch (type) { 491 ret = dsi_phy_regulator_init(phy);
314 case MSM_DSI_PHY_28NM: 492 if (ret) {
315 dsi_phy_func_init(28nm); 493 dev_err(&pdev->dev, "%s: failed to init regulator\n", __func__);
316 break; 494 goto fail;
317 default: 495 }
318 pr_err("%s: unsupported type, %d\n", __func__, type); 496
319 return NULL; 497 phy->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
498 if (IS_ERR(phy->ahb_clk)) {
499 pr_err("%s: Unable to get ahb clk\n", __func__);
500 ret = PTR_ERR(phy->ahb_clk);
501 goto fail;
320 } 502 }
321 503
322 phy->id = id; 504 /* PLL init will call into clk_register which requires
505 * register access, so we need to enable power and ahb clock.
506 */
507 ret = dsi_phy_enable_resource(phy);
508 if (ret)
509 goto fail;
510
511 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
512 if (!phy->pll)
513 dev_info(&pdev->dev,
514 "%s: pll init failed, need separate pll clk driver\n",
515 __func__);
516
517 dsi_phy_disable_resource(phy);
518
519 platform_set_drvdata(pdev, phy);
520
521 return 0;
323 522
324 return phy; 523fail:
524 return ret;
525}
526
527static int dsi_phy_driver_remove(struct platform_device *pdev)
528{
529 struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
530
531 if (phy && phy->pll) {
532 msm_dsi_pll_destroy(phy->pll);
533 phy->pll = NULL;
534 }
535
536 platform_set_drvdata(pdev, NULL);
537
538 return 0;
539}
540
541static struct platform_driver dsi_phy_platform_driver = {
542 .probe = dsi_phy_driver_probe,
543 .remove = dsi_phy_driver_remove,
544 .driver = {
545 .name = "msm_dsi_phy",
546 .of_match_table = dsi_phy_dt_match,
547 },
548};
549
550void __init msm_dsi_phy_driver_register(void)
551{
552 platform_driver_register(&dsi_phy_platform_driver);
553}
554
555void __exit msm_dsi_phy_driver_unregister(void)
556{
557 platform_driver_unregister(&dsi_phy_platform_driver);
325} 558}
326 559
327int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel, 560int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
328 const unsigned long bit_rate, const unsigned long esc_rate) 561 const unsigned long bit_rate, const unsigned long esc_rate)
329{ 562{
330 if (!phy || !phy->enable) 563 int ret;
564
565 if (!phy || !phy->cfg->ops.enable)
331 return -EINVAL; 566 return -EINVAL;
332 return phy->enable(phy, is_dual_panel, bit_rate, esc_rate); 567
568 ret = dsi_phy_regulator_enable(phy);
569 if (ret) {
570 dev_err(&phy->pdev->dev, "%s: regulator enable failed, %d\n",
571 __func__, ret);
572 return ret;
573 }
574
575 return phy->cfg->ops.enable(phy, is_dual_panel, bit_rate, esc_rate);
333} 576}
334 577
335int msm_dsi_phy_disable(struct msm_dsi_phy *phy) 578int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
336{ 579{
337 if (!phy || !phy->disable) 580 if (!phy || !phy->cfg->ops.disable)
338 return -EINVAL; 581 return -EINVAL;
339 return phy->disable(phy); 582
583 phy->cfg->ops.disable(phy);
584 dsi_phy_regulator_disable(phy);
585
586 return 0;
340} 587}
341 588
342void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy, 589void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
@@ -350,3 +597,11 @@ void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
350 *clk_post = phy->timing.clk_post; 597 *clk_post = phy->timing.clk_post;
351} 598}
352 599
600struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
601{
602 if (!phy)
603 return NULL;
604
605 return phy->pll;
606}
607
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 695f99d4bec2..728152f3ef48 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -10,15 +10,15 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
22 22
23Copyright (C) 2013-2014 by the following authors: 23Copyright (C) 2013-2014 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
new file mode 100644
index 000000000000..509376fdd112
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -0,0 +1,164 @@
1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "dsi_pll.h"
15
16static int dsi_pll_enable(struct msm_dsi_pll *pll)
17{
18 int i, ret = 0;
19
20 /*
21 * Certain PLLs do not allow VCO rate update when it is on.
22 * Keep track of their status to turn on/off after set rate success.
23 */
24 if (unlikely(pll->pll_on))
25 return 0;
26
27 /* Try all enable sequences until one succeeds */
28 for (i = 0; i < pll->en_seq_cnt; i++) {
29 ret = pll->enable_seqs[i](pll);
30 DBG("DSI PLL %s after sequence #%d",
31 ret ? "unlocked" : "locked", i + 1);
32 if (!ret)
33 break;
34 }
35
36 if (ret) {
37 DRM_ERROR("DSI PLL failed to lock\n");
38 return ret;
39 }
40
41 pll->pll_on = true;
42
43 return 0;
44}
45
46static void dsi_pll_disable(struct msm_dsi_pll *pll)
47{
48 if (unlikely(!pll->pll_on))
49 return;
50
51 pll->disable_seq(pll);
52
53 pll->pll_on = false;
54}
55
56/*
57 * DSI PLL Helper functions
58 */
59long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
60 unsigned long rate, unsigned long *parent_rate)
61{
62 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
63
64 if (rate < pll->min_rate)
65 return pll->min_rate;
66 else if (rate > pll->max_rate)
67 return pll->max_rate;
68 else
69 return rate;
70}
71
72int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw)
73{
74 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
75 int ret;
76
77 /*
78 * Certain PLLs need to update the same VCO rate and registers
79 * after resume in suspend/resume scenario.
80 */
81 if (pll->restore_state) {
82 ret = pll->restore_state(pll);
83 if (ret)
84 goto error;
85 }
86
87 ret = dsi_pll_enable(pll);
88
89error:
90 return ret;
91}
92
93void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
94{
95 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
96
97 if (pll->save_state)
98 pll->save_state(pll);
99
100 dsi_pll_disable(pll);
101}
102
103void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
104 struct clk **clks, u32 num_clks)
105{
106 of_clk_del_provider(pdev->dev.of_node);
107
108 if (!num_clks || !clks)
109 return;
110
111 do {
112 clk_unregister(clks[--num_clks]);
113 clks[num_clks] = NULL;
114 } while (num_clks);
115}
116
117/*
118 * DSI PLL API
119 */
120int msm_dsi_pll_get_clk_provider(struct msm_dsi_pll *pll,
121 struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
122{
123 if (pll->get_provider)
124 return pll->get_provider(pll,
125 byte_clk_provider,
126 pixel_clk_provider);
127
128 return -EINVAL;
129}
130
131void msm_dsi_pll_destroy(struct msm_dsi_pll *pll)
132{
133 if (pll->destroy)
134 pll->destroy(pll);
135}
136
137struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
138 enum msm_dsi_phy_type type, int id)
139{
140 struct device *dev = &pdev->dev;
141 struct msm_dsi_pll *pll;
142
143 switch (type) {
144 case MSM_DSI_PHY_28NM_HPM:
145 case MSM_DSI_PHY_28NM_LP:
146 pll = msm_dsi_pll_28nm_init(pdev, type, id);
147 break;
148 default:
149 pll = ERR_PTR(-ENXIO);
150 break;
151 }
152
153 if (IS_ERR(pll)) {
154 dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
155 return NULL;
156 }
157
158 pll->type = type;
159
160 DBG("DSI:%d PLL registered", id);
161
162 return pll;
163}
164
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
new file mode 100644
index 000000000000..5a3bb241c039
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -0,0 +1,89 @@
1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __DSI_PLL_H__
15#define __DSI_PLL_H__
16
17#include <linux/clk.h>
18#include <linux/clk-provider.h>
19
20#include "dsi.h"
21
22#define NUM_DSI_CLOCKS_MAX 6
23#define MAX_DSI_PLL_EN_SEQS 10
24
25struct msm_dsi_pll {
26 enum msm_dsi_phy_type type;
27
28 struct clk_hw clk_hw;
29 bool pll_on;
30
31 unsigned long min_rate;
32 unsigned long max_rate;
33 u32 en_seq_cnt;
34
35 int (*enable_seqs[MAX_DSI_PLL_EN_SEQS])(struct msm_dsi_pll *pll);
36 void (*disable_seq)(struct msm_dsi_pll *pll);
37 int (*get_provider)(struct msm_dsi_pll *pll,
38 struct clk **byte_clk_provider,
39 struct clk **pixel_clk_provider);
40 void (*destroy)(struct msm_dsi_pll *pll);
41 void (*save_state)(struct msm_dsi_pll *pll);
42 int (*restore_state)(struct msm_dsi_pll *pll);
43};
44
45#define hw_clk_to_pll(x) container_of(x, struct msm_dsi_pll, clk_hw)
46
47static inline void pll_write(void __iomem *reg, u32 data)
48{
49 msm_writel(data, reg);
50}
51
52static inline u32 pll_read(const void __iomem *reg)
53{
54 return msm_readl(reg);
55}
56
57static inline void pll_write_udelay(void __iomem *reg, u32 data, u32 delay_us)
58{
59 pll_write(reg, data);
60 udelay(delay_us);
61}
62
63static inline void pll_write_ndelay(void __iomem *reg, u32 data, u32 delay_ns)
64{
65 pll_write((reg), data);
66 ndelay(delay_ns);
67}
68
69/*
70 * DSI PLL Helper functions
71 */
72
73/* clock callbacks */
74long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
75 unsigned long rate, unsigned long *parent_rate);
76int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw);
77void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw);
78/* misc */
79void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
80 struct clk **clks, u32 num_clks);
81
82/*
83 * Initialization for Each PLL Type
84 */
85struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
86 enum msm_dsi_phy_type type, int id);
87
88#endif /* __DSI_PLL_H__ */
89
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
new file mode 100644
index 000000000000..eb8ac3097ff5
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -0,0 +1,652 @@
1/*
2 * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/clk.h>
15#include <linux/clk-provider.h>
16
17#include "dsi_pll.h"
18#include "dsi.xml.h"
19
20/*
21 * DSI PLL 28nm - clock diagram (eg: DSI0):
22 *
23 * dsi0analog_postdiv_clk
24 * | dsi0indirect_path_div2_clk
25 * | |
26 * +------+ | +----+ | |\ dsi0byte_mux
27 * dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ |
28 * | +------+ +----+ | m| | +----+
29 * | | u|--o--| /4 |-- dsi0pllbyte
30 * | | x| +----+
31 * o--------------------------| /
32 * | |/
33 * | +------+
34 * o----------| DIV3 |------------------------- dsi0pll
35 * +------+
36 */
37
38#define POLL_MAX_READS 10
39#define POLL_TIMEOUT_US 50
40
41#define NUM_PROVIDED_CLKS 2
42
43#define VCO_REF_CLK_RATE 19200000
44#define VCO_MIN_RATE 350000000
45#define VCO_MAX_RATE 750000000
46
47#define DSI_BYTE_PLL_CLK 0
48#define DSI_PIXEL_PLL_CLK 1
49
50#define LPFR_LUT_SIZE 10
51struct lpfr_cfg {
52 unsigned long vco_rate;
53 u32 resistance;
54};
55
56/* Loop filter resistance: */
57static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
58 { 479500000, 8 },
59 { 480000000, 11 },
60 { 575500000, 8 },
61 { 576000000, 12 },
62 { 610500000, 8 },
63 { 659500000, 9 },
64 { 671500000, 10 },
65 { 672000000, 14 },
66 { 708500000, 10 },
67 { 750000000, 11 },
68};
69
70struct pll_28nm_cached_state {
71 unsigned long vco_rate;
72 u8 postdiv3;
73 u8 postdiv1;
74 u8 byte_mux;
75};
76
77struct dsi_pll_28nm {
78 struct msm_dsi_pll base;
79
80 int id;
81 struct platform_device *pdev;
82 void __iomem *mmio;
83
84 int vco_delay;
85
86 /* private clocks: */
87 struct clk *clks[NUM_DSI_CLOCKS_MAX];
88 u32 num_clks;
89
90 /* clock-provider: */
91 struct clk *provided_clks[NUM_PROVIDED_CLKS];
92 struct clk_onecell_data clk_data;
93
94 struct pll_28nm_cached_state cached_state;
95};
96
97#define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
98
99static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
100 u32 nb_tries, u32 timeout_us)
101{
102 bool pll_locked = false;
103 u32 val;
104
105 while (nb_tries--) {
106 val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);
107 pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
108
109 if (pll_locked)
110 break;
111
112 udelay(timeout_us);
113 }
114 DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
115
116 return pll_locked;
117}
118
119static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
120{
121 void __iomem *base = pll_28nm->mmio;
122
123 /*
124 * Add HW recommended delays after toggling the software
125 * reset bit off and back on.
126 */
127 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
128 DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
129 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
130}
131
132/*
133 * Clock Callbacks
134 */
135static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
136 unsigned long parent_rate)
137{
138 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
139 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
140 struct device *dev = &pll_28nm->pdev->dev;
141 void __iomem *base = pll_28nm->mmio;
142 unsigned long div_fbx1000, gen_vco_clk;
143 u32 refclk_cfg, frac_n_mode, frac_n_value;
144 u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
145 u32 cal_cfg10, cal_cfg11;
146 u32 rem;
147 int i;
148
149 VERB("rate=%lu, parent's=%lu", rate, parent_rate);
150
151 /* Force postdiv2 to be div-4 */
152 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
153
154 /* Configure the Loop filter resistance */
155 for (i = 0; i < LPFR_LUT_SIZE; i++)
156 if (rate <= lpfr_lut[i].vco_rate)
157 break;
158 if (i == LPFR_LUT_SIZE) {
159 dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
160 rate);
161 return -EINVAL;
162 }
163 pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
164
165 /* Loop filter capacitance values : c1 and c2 */
166 pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
167 pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
168
169 rem = rate % VCO_REF_CLK_RATE;
170 if (rem) {
171 refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
172 frac_n_mode = 1;
173 div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
174 gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
175 } else {
176 refclk_cfg = 0x0;
177 frac_n_mode = 0;
178 div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
179 gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
180 }
181
182 DBG("refclk_cfg = %d", refclk_cfg);
183
184 rem = div_fbx1000 % 1000;
185 frac_n_value = (rem << 16) / 1000;
186
187 DBG("div_fb = %lu", div_fbx1000);
188 DBG("frac_n_value = %d", frac_n_value);
189
190 DBG("Generated VCO Clock: %lu", gen_vco_clk);
191 rem = 0;
192 sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
193 sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
194 if (frac_n_mode) {
195 sdm_cfg0 = 0x0;
196 sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
197 sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
198 (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
199 sdm_cfg3 = frac_n_value >> 8;
200 sdm_cfg2 = frac_n_value & 0xff;
201 } else {
202 sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
203 sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
204 (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
205 sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
206 sdm_cfg2 = 0;
207 sdm_cfg3 = 0;
208 }
209
210 DBG("sdm_cfg0=%d", sdm_cfg0);
211 DBG("sdm_cfg1=%d", sdm_cfg1);
212 DBG("sdm_cfg2=%d", sdm_cfg2);
213 DBG("sdm_cfg3=%d", sdm_cfg3);
214
215 cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
216 cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
217 DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
218
219 pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
220 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b);
221 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06);
222 pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
223
224 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
225 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
226 DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
227 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
228 DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
229 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
230
231 /* Add hardware recommended delay for correct PLL configuration */
232 if (pll_28nm->vco_delay)
233 udelay(pll_28nm->vco_delay);
234
235 pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
236 pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
237 pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
238 pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0);
239 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12);
240 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30);
241 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00);
242 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60);
243 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00);
244 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff);
245 pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff);
246 pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20);
247
248 return 0;
249}
250
251static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
252{
253 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
254 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
255
256 return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
257 POLL_TIMEOUT_US);
258}
259
260static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
261 unsigned long parent_rate)
262{
263 struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
264 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
265 void __iomem *base = pll_28nm->mmio;
266 u32 sdm0, doubler, sdm_byp_div;
267 u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
268 u32 ref_clk = VCO_REF_CLK_RATE;
269 unsigned long vco_rate;
270
271 VERB("parent_rate=%lu", parent_rate);
272
273 /* Check to see if the ref clk doubler is enabled */
274 doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
275 DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
276 ref_clk += (doubler * VCO_REF_CLK_RATE);
277
278 /* see if it is integer mode or sdm mode */
279 sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
280 if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
281 /* integer mode */
282 sdm_byp_div = FIELD(
283 pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
284 DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
285 vco_rate = ref_clk * sdm_byp_div;
286 } else {
287 /* sdm mode */
288 sdm_dc_off = FIELD(
289 pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
290 DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
291 DBG("sdm_dc_off = %d", sdm_dc_off);
292 sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
293 DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
294 sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
295 DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
296 sdm_freq_seed = (sdm3 << 8) | sdm2;
297 DBG("sdm_freq_seed = %d", sdm_freq_seed);
298
299 vco_rate = (ref_clk * (sdm_dc_off + 1)) +
300 mult_frac(ref_clk, sdm_freq_seed, BIT(16));
301 DBG("vco rate = %lu", vco_rate);
302 }
303
304 DBG("returning vco rate = %lu", vco_rate);
305
306 return vco_rate;
307}
308
309static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
310 .round_rate = msm_dsi_pll_helper_clk_round_rate,
311 .set_rate = dsi_pll_28nm_clk_set_rate,
312 .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
313 .prepare = msm_dsi_pll_helper_clk_prepare,
314 .unprepare = msm_dsi_pll_helper_clk_unprepare,
315 .is_enabled = dsi_pll_28nm_clk_is_enabled,
316};
317
318/*
319 * PLL Callbacks
320 */
321static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
322{
323 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
324 struct device *dev = &pll_28nm->pdev->dev;
325 void __iomem *base = pll_28nm->mmio;
326 u32 max_reads = 5, timeout_us = 100;
327 bool locked;
328 u32 val;
329 int i;
330
331 DBG("id=%d", pll_28nm->id);
332
333 pll_28nm_software_reset(pll_28nm);
334
335 /*
336 * PLL power up sequence.
337 * Add necessary delays recommended by hardware.
338 */
339 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
340 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
341
342 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
343 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
344
345 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
346 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
347
348 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
349 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
350
351 for (i = 0; i < 2; i++) {
352 /* DSI Uniphy lock detect setting */
353 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
354 0x0c, 100);
355 pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
356
357 /* poll for PLL ready status */
358 locked = pll_28nm_poll_for_ready(pll_28nm,
359 max_reads, timeout_us);
360 if (locked)
361 break;
362
363 pll_28nm_software_reset(pll_28nm);
364
365 /*
366 * PLL power up sequence.
367 * Add necessary delays recommended by hardware.
368 */
369 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
370 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
371
372 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
373 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
374
375 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
376 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
377
378 val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
379 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
380
381 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
382 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
383
384 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
385 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
386 }
387
388 if (unlikely(!locked))
389 dev_err(dev, "DSI PLL lock failed\n");
390 else
391 DBG("DSI PLL Lock success");
392
393 return locked ? 0 : -EINVAL;
394}
395
396static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
397{
398 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
399 struct device *dev = &pll_28nm->pdev->dev;
400 void __iomem *base = pll_28nm->mmio;
401 bool locked;
402 u32 max_reads = 10, timeout_us = 50;
403 u32 val;
404
405 DBG("id=%d", pll_28nm->id);
406
407 pll_28nm_software_reset(pll_28nm);
408
409 /*
410 * PLL power up sequence.
411 * Add necessary delays recommended by hardware.
412 */
413 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
414
415 val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
416 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
417
418 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
419 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
420
421 val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
422 DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
423 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
424
425 /* DSI PLL toggle lock detect setting */
426 pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
427 pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
428
429 locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
430
431 if (unlikely(!locked))
432 dev_err(dev, "DSI PLL lock failed\n");
433 else
434 DBG("DSI PLL lock success");
435
436 return locked ? 0 : -EINVAL;
437}
438
439static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
440{
441 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
442
443 DBG("id=%d", pll_28nm->id);
444 pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
445}
446
447static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
448{
449 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
450 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
451 void __iomem *base = pll_28nm->mmio;
452
453 cached_state->postdiv3 =
454 pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
455 cached_state->postdiv1 =
456 pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
457 cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
458 cached_state->vco_rate = __clk_get_rate(pll->clk_hw.clk);
459}
460
461static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
462{
463 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
464 struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
465 void __iomem *base = pll_28nm->mmio;
466 int ret;
467
468 if ((cached_state->vco_rate != 0) &&
469 (cached_state->vco_rate == __clk_get_rate(pll->clk_hw.clk))) {
470 ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
471 cached_state->vco_rate, 0);
472 if (ret) {
473 dev_err(&pll_28nm->pdev->dev,
474 "restore vco rate failed. ret=%d\n", ret);
475 return ret;
476 }
477
478 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
479 cached_state->postdiv3);
480 pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
481 cached_state->postdiv1);
482 pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
483 cached_state->byte_mux);
484
485 cached_state->vco_rate = 0;
486 }
487
488 return 0;
489}
490
491static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
492 struct clk **byte_clk_provider,
493 struct clk **pixel_clk_provider)
494{
495 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
496
497 if (byte_clk_provider)
498 *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
499 if (pixel_clk_provider)
500 *pixel_clk_provider =
501 pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
502
503 return 0;
504}
505
506static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
507{
508 struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
509 int i;
510
511 msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
512 pll_28nm->clks, pll_28nm->num_clks);
513
514 for (i = 0; i < NUM_PROVIDED_CLKS; i++)
515 pll_28nm->provided_clks[i] = NULL;
516
517 pll_28nm->num_clks = 0;
518 pll_28nm->clk_data.clks = NULL;
519 pll_28nm->clk_data.clk_num = 0;
520}
521
522static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
523{
524 char clk_name[32], parent1[32], parent2[32], vco_name[32];
525 struct clk_init_data vco_init = {
526 .parent_names = (const char *[]){ "xo" },
527 .num_parents = 1,
528 .name = vco_name,
529 .ops = &clk_ops_dsi_pll_28nm_vco,
530 };
531 struct device *dev = &pll_28nm->pdev->dev;
532 struct clk **clks = pll_28nm->clks;
533 struct clk **provided_clks = pll_28nm->provided_clks;
534 int num = 0;
535 int ret;
536
537 DBG("%d", pll_28nm->id);
538
539 snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
540 pll_28nm->base.clk_hw.init = &vco_init;
541 clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
542
543 snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
544 snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
545 clks[num++] = clk_register_divider(dev, clk_name,
546 parent1, CLK_SET_RATE_PARENT,
547 pll_28nm->mmio +
548 REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
549 0, 4, 0, NULL);
550
551 snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
552 snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
553 clks[num++] = clk_register_fixed_factor(dev, clk_name,
554 parent1, CLK_SET_RATE_PARENT,
555 1, 2);
556
557 snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
558 snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
559 clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
560 clk_register_divider(dev, clk_name,
561 parent1, 0, pll_28nm->mmio +
562 REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
563 0, 8, 0, NULL);
564
565 snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);
566 snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
567 snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
568 clks[num++] = clk_register_mux(dev, clk_name,
569 (const char *[]){
570 parent1, parent2
571 }, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
572 REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
573
574 snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
575 snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);
576 clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
577 clk_register_fixed_factor(dev, clk_name,
578 parent1, CLK_SET_RATE_PARENT, 1, 4);
579
580 pll_28nm->num_clks = num;
581
582 pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
583 pll_28nm->clk_data.clks = provided_clks;
584
585 ret = of_clk_add_provider(dev->of_node,
586 of_clk_src_onecell_get, &pll_28nm->clk_data);
587 if (ret) {
588 dev_err(dev, "failed to register clk provider: %d\n", ret);
589 return ret;
590 }
591
592 return 0;
593}
594
595struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
596 enum msm_dsi_phy_type type, int id)
597{
598 struct dsi_pll_28nm *pll_28nm;
599 struct msm_dsi_pll *pll;
600 int ret;
601
602 if (!pdev)
603 return ERR_PTR(-ENODEV);
604
605 pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
606 if (!pll_28nm)
607 return ERR_PTR(-ENOMEM);
608
609 pll_28nm->pdev = pdev;
610 pll_28nm->id = id;
611
612 pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
613 if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
614 dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
615 return ERR_PTR(-ENOMEM);
616 }
617
618 pll = &pll_28nm->base;
619 pll->min_rate = VCO_MIN_RATE;
620 pll->max_rate = VCO_MAX_RATE;
621 pll->get_provider = dsi_pll_28nm_get_provider;
622 pll->destroy = dsi_pll_28nm_destroy;
623 pll->disable_seq = dsi_pll_28nm_disable_seq;
624 pll->save_state = dsi_pll_28nm_save_state;
625 pll->restore_state = dsi_pll_28nm_restore_state;
626
627 if (type == MSM_DSI_PHY_28NM_HPM) {
628 pll_28nm->vco_delay = 1;
629
630 pll->en_seq_cnt = 3;
631 pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;
632 pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;
633 pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;
634 } else if (type == MSM_DSI_PHY_28NM_LP) {
635 pll_28nm->vco_delay = 1000;
636
637 pll->en_seq_cnt = 1;
638 pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
639 } else {
640 dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
641 return ERR_PTR(-EINVAL);
642 }
643
644 ret = pll_28nm_register(pll_28nm);
645 if (ret) {
646 dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
647 return ERR_PTR(ret);
648 }
649
650 return pll;
651}
652
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 50ff9851d73d..26f268e2dd3d 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -10,15 +10,15 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
22 22
23Copyright (C) 2013 by the following authors: 23Copyright (C) 2013 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index a29f1df15143..f9c71dceb5e2 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -10,17 +10,17 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
22 22
23Copyright (C) 2013-2014 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25 25
26Permission is hereby granted, free of charge, to any person obtaining 26Permission is hereby granted, free of charge, to any person obtaining
@@ -288,5 +288,92 @@ static inline uint32_t REG_EDP_PHY_LN_PD_CTL(uint32_t i0) { return 0x00000404 +
288 288
289#define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598 289#define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598
290 290
291#define REG_EDP_28nm_PHY_PLL_REFCLK_CFG 0x00000000
292
293#define REG_EDP_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
294
295#define REG_EDP_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
296
297#define REG_EDP_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
298
299#define REG_EDP_28nm_PHY_PLL_VREG_CFG 0x00000010
300
301#define REG_EDP_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
302
303#define REG_EDP_28nm_PHY_PLL_DMUX_CFG 0x00000018
304
305#define REG_EDP_28nm_PHY_PLL_AMUX_CFG 0x0000001c
306
307#define REG_EDP_28nm_PHY_PLL_GLB_CFG 0x00000020
308#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
309#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
310#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
311#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
312
313#define REG_EDP_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
314
315#define REG_EDP_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
316
317#define REG_EDP_28nm_PHY_PLL_LPFR_CFG 0x0000002c
318
319#define REG_EDP_28nm_PHY_PLL_LPFC1_CFG 0x00000030
320
321#define REG_EDP_28nm_PHY_PLL_LPFC2_CFG 0x00000034
322
323#define REG_EDP_28nm_PHY_PLL_SDM_CFG0 0x00000038
324
325#define REG_EDP_28nm_PHY_PLL_SDM_CFG1 0x0000003c
326
327#define REG_EDP_28nm_PHY_PLL_SDM_CFG2 0x00000040
328
329#define REG_EDP_28nm_PHY_PLL_SDM_CFG3 0x00000044
330
331#define REG_EDP_28nm_PHY_PLL_SDM_CFG4 0x00000048
332
333#define REG_EDP_28nm_PHY_PLL_SSC_CFG0 0x0000004c
334
335#define REG_EDP_28nm_PHY_PLL_SSC_CFG1 0x00000050
336
337#define REG_EDP_28nm_PHY_PLL_SSC_CFG2 0x00000054
338
339#define REG_EDP_28nm_PHY_PLL_SSC_CFG3 0x00000058
340
341#define REG_EDP_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
342
343#define REG_EDP_28nm_PHY_PLL_LKDET_CFG1 0x00000060
344
345#define REG_EDP_28nm_PHY_PLL_LKDET_CFG2 0x00000064
346
347#define REG_EDP_28nm_PHY_PLL_TEST_CFG 0x00000068
348#define EDP_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
349
350#define REG_EDP_28nm_PHY_PLL_CAL_CFG0 0x0000006c
351
352#define REG_EDP_28nm_PHY_PLL_CAL_CFG1 0x00000070
353
354#define REG_EDP_28nm_PHY_PLL_CAL_CFG2 0x00000074
355
356#define REG_EDP_28nm_PHY_PLL_CAL_CFG3 0x00000078
357
358#define REG_EDP_28nm_PHY_PLL_CAL_CFG4 0x0000007c
359
360#define REG_EDP_28nm_PHY_PLL_CAL_CFG5 0x00000080
361
362#define REG_EDP_28nm_PHY_PLL_CAL_CFG6 0x00000084
363
364#define REG_EDP_28nm_PHY_PLL_CAL_CFG7 0x00000088
365
366#define REG_EDP_28nm_PHY_PLL_CAL_CFG8 0x0000008c
367
368#define REG_EDP_28nm_PHY_PLL_CAL_CFG9 0x00000090
369
370#define REG_EDP_28nm_PHY_PLL_CAL_CFG10 0x00000094
371
372#define REG_EDP_28nm_PHY_PLL_CAL_CFG11 0x00000098
373
374#define REG_EDP_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
375
376#define REG_EDP_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
377
291 378
292#endif /* EDP_XML */ 379#endif /* EDP_XML */
diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c
index 208f9d47f82e..82789dd249ee 100644
--- a/drivers/gpu/drm/msm/edp/edp_aux.c
+++ b/drivers/gpu/drm/msm/edp/edp_aux.c
@@ -115,10 +115,12 @@ static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg)
115 * msm_edp_aux_ctrl() running concurrently in other threads, i.e. 115 * msm_edp_aux_ctrl() running concurrently in other threads, i.e.
116 * start transaction only when AUX channel is fully enabled. 116 * start transaction only when AUX channel is fully enabled.
117 */ 117 */
118ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg) 118static ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux,
119 struct drm_dp_aux_msg *msg)
119{ 120{
120 struct edp_aux *aux = to_edp_aux(drm_aux); 121 struct edp_aux *aux = to_edp_aux(drm_aux);
121 ssize_t ret; 122 ssize_t ret;
123 unsigned long time_left;
122 bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ); 124 bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
123 bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ); 125 bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
124 126
@@ -147,15 +149,17 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
147 goto unlock_exit; 149 goto unlock_exit;
148 150
149 DBG("wait_for_completion"); 151 DBG("wait_for_completion");
150 ret = wait_for_completion_timeout(&aux->msg_comp, 300); 152 time_left = wait_for_completion_timeout(&aux->msg_comp,
151 if (ret <= 0) { 153 msecs_to_jiffies(300));
154 if (!time_left) {
152 /* 155 /*
153 * Clear GO and reset AUX channel 156 * Clear GO and reset AUX channel
154 * to cancel the current transaction. 157 * to cancel the current transaction.
155 */ 158 */
156 edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0); 159 edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
157 msm_edp_aux_ctrl(aux, 1); 160 msm_edp_aux_ctrl(aux, 1);
158 pr_err("%s: aux timeout, %zd\n", __func__, ret); 161 pr_err("%s: aux timeout,\n", __func__);
162 ret = -ETIMEDOUT;
159 goto unlock_exit; 163 goto unlock_exit;
160 } 164 }
161 DBG("completion"); 165 DBG("completion");
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 29e52d7c61c0..7991069dd492 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -1018,7 +1018,7 @@ static void edp_ctrl_off_worker(struct work_struct *work)
1018{ 1018{
1019 struct edp_ctrl *ctrl = container_of( 1019 struct edp_ctrl *ctrl = container_of(
1020 work, struct edp_ctrl, off_work); 1020 work, struct edp_ctrl, off_work);
1021 int ret; 1021 unsigned long time_left;
1022 1022
1023 mutex_lock(&ctrl->dev_mutex); 1023 mutex_lock(&ctrl->dev_mutex);
1024 1024
@@ -1030,11 +1030,10 @@ static void edp_ctrl_off_worker(struct work_struct *work)
1030 reinit_completion(&ctrl->idle_comp); 1030 reinit_completion(&ctrl->idle_comp);
1031 edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE); 1031 edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE);
1032 1032
1033 ret = wait_for_completion_timeout(&ctrl->idle_comp, 1033 time_left = wait_for_completion_timeout(&ctrl->idle_comp,
1034 msecs_to_jiffies(500)); 1034 msecs_to_jiffies(500));
1035 if (ret <= 0) 1035 if (!time_left)
1036 DBG("%s: idle pattern timedout, %d\n", 1036 DBG("%s: idle pattern timedout\n", __func__);
1037 __func__, ret);
1038 1037
1039 edp_state_ctrl(ctrl, 0); 1038 edp_state_ctrl(ctrl, 0);
1040 1039
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 350988740e9f..e6f034808371 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -10,15 +10,15 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
22 22
23Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
@@ -750,5 +750,92 @@ static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
750 750
751#define REG_HDMI_8x74_BIST_PATN3 0x00000048 751#define REG_HDMI_8x74_BIST_PATN3 0x00000048
752 752
753#define REG_HDMI_28nm_PHY_PLL_REFCLK_CFG 0x00000000
754
755#define REG_HDMI_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
756
757#define REG_HDMI_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
758
759#define REG_HDMI_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
760
761#define REG_HDMI_28nm_PHY_PLL_VREG_CFG 0x00000010
762
763#define REG_HDMI_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
764
765#define REG_HDMI_28nm_PHY_PLL_DMUX_CFG 0x00000018
766
767#define REG_HDMI_28nm_PHY_PLL_AMUX_CFG 0x0000001c
768
769#define REG_HDMI_28nm_PHY_PLL_GLB_CFG 0x00000020
770#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
771#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
772#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
773#define HDMI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
774
775#define REG_HDMI_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
776
777#define REG_HDMI_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
778
779#define REG_HDMI_28nm_PHY_PLL_LPFR_CFG 0x0000002c
780
781#define REG_HDMI_28nm_PHY_PLL_LPFC1_CFG 0x00000030
782
783#define REG_HDMI_28nm_PHY_PLL_LPFC2_CFG 0x00000034
784
785#define REG_HDMI_28nm_PHY_PLL_SDM_CFG0 0x00000038
786
787#define REG_HDMI_28nm_PHY_PLL_SDM_CFG1 0x0000003c
788
789#define REG_HDMI_28nm_PHY_PLL_SDM_CFG2 0x00000040
790
791#define REG_HDMI_28nm_PHY_PLL_SDM_CFG3 0x00000044
792
793#define REG_HDMI_28nm_PHY_PLL_SDM_CFG4 0x00000048
794
795#define REG_HDMI_28nm_PHY_PLL_SSC_CFG0 0x0000004c
796
797#define REG_HDMI_28nm_PHY_PLL_SSC_CFG1 0x00000050
798
799#define REG_HDMI_28nm_PHY_PLL_SSC_CFG2 0x00000054
800
801#define REG_HDMI_28nm_PHY_PLL_SSC_CFG3 0x00000058
802
803#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
804
805#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG1 0x00000060
806
807#define REG_HDMI_28nm_PHY_PLL_LKDET_CFG2 0x00000064
808
809#define REG_HDMI_28nm_PHY_PLL_TEST_CFG 0x00000068
810#define HDMI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
811
812#define REG_HDMI_28nm_PHY_PLL_CAL_CFG0 0x0000006c
813
814#define REG_HDMI_28nm_PHY_PLL_CAL_CFG1 0x00000070
815
816#define REG_HDMI_28nm_PHY_PLL_CAL_CFG2 0x00000074
817
818#define REG_HDMI_28nm_PHY_PLL_CAL_CFG3 0x00000078
819
820#define REG_HDMI_28nm_PHY_PLL_CAL_CFG4 0x0000007c
821
822#define REG_HDMI_28nm_PHY_PLL_CAL_CFG5 0x00000080
823
824#define REG_HDMI_28nm_PHY_PLL_CAL_CFG6 0x00000084
825
826#define REG_HDMI_28nm_PHY_PLL_CAL_CFG7 0x00000088
827
828#define REG_HDMI_28nm_PHY_PLL_CAL_CFG8 0x0000008c
829
830#define REG_HDMI_28nm_PHY_PLL_CAL_CFG9 0x00000090
831
832#define REG_HDMI_28nm_PHY_PLL_CAL_CFG10 0x00000094
833
834#define REG_HDMI_28nm_PHY_PLL_CAL_CFG11 0x00000098
835
836#define REG_HDMI_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
837
838#define REG_HDMI_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
839
753 840
754#endif /* HDMI_XML */ 841#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index b62cdb968614..54aa93ff5473 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/pinctrl/consumer.h>
19 20
20#include "msm_kms.h" 21#include "msm_kms.h"
21#include "hdmi.h" 22#include "hdmi.h"
@@ -29,14 +30,14 @@ struct hdmi_connector {
29 30
30static int gpio_config(struct hdmi *hdmi, bool on) 31static int gpio_config(struct hdmi *hdmi, bool on)
31{ 32{
32 struct drm_device *dev = hdmi->dev; 33 struct device *dev = &hdmi->pdev->dev;
33 const struct hdmi_platform_config *config = hdmi->config; 34 const struct hdmi_platform_config *config = hdmi->config;
34 int ret; 35 int ret;
35 36
36 if (on) { 37 if (on) {
37 ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK"); 38 ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
38 if (ret) { 39 if (ret) {
39 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", 40 dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
40 "HDMI_DDC_CLK", config->ddc_clk_gpio, ret); 41 "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
41 goto error1; 42 goto error1;
42 } 43 }
@@ -44,7 +45,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
44 45
45 ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA"); 46 ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
46 if (ret) { 47 if (ret) {
47 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", 48 dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
48 "HDMI_DDC_DATA", config->ddc_data_gpio, ret); 49 "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
49 goto error2; 50 goto error2;
50 } 51 }
@@ -52,7 +53,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
52 53
53 ret = gpio_request(config->hpd_gpio, "HDMI_HPD"); 54 ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
54 if (ret) { 55 if (ret) {
55 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", 56 dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
56 "HDMI_HPD", config->hpd_gpio, ret); 57 "HDMI_HPD", config->hpd_gpio, ret);
57 goto error3; 58 goto error3;
58 } 59 }
@@ -62,7 +63,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
62 if (config->mux_en_gpio != -1) { 63 if (config->mux_en_gpio != -1) {
63 ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN"); 64 ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
64 if (ret) { 65 if (ret) {
65 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", 66 dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
66 "HDMI_MUX_EN", config->mux_en_gpio, ret); 67 "HDMI_MUX_EN", config->mux_en_gpio, ret);
67 goto error4; 68 goto error4;
68 } 69 }
@@ -72,7 +73,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
72 if (config->mux_sel_gpio != -1) { 73 if (config->mux_sel_gpio != -1) {
73 ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL"); 74 ret = gpio_request(config->mux_sel_gpio, "HDMI_MUX_SEL");
74 if (ret) { 75 if (ret) {
75 dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n", 76 dev_err(dev, "'%s'(%d) gpio_request failed: %d\n",
76 "HDMI_MUX_SEL", config->mux_sel_gpio, ret); 77 "HDMI_MUX_SEL", config->mux_sel_gpio, ret);
77 goto error5; 78 goto error5;
78 } 79 }
@@ -83,7 +84,7 @@ static int gpio_config(struct hdmi *hdmi, bool on)
83 ret = gpio_request(config->mux_lpm_gpio, 84 ret = gpio_request(config->mux_lpm_gpio,
84 "HDMI_MUX_LPM"); 85 "HDMI_MUX_LPM");
85 if (ret) { 86 if (ret) {
86 dev_err(dev->dev, 87 dev_err(dev,
87 "'%s'(%d) gpio_request failed: %d\n", 88 "'%s'(%d) gpio_request failed: %d\n",
88 "HDMI_MUX_LPM", 89 "HDMI_MUX_LPM",
89 config->mux_lpm_gpio, ret); 90 config->mux_lpm_gpio, ret);
@@ -136,7 +137,7 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
136{ 137{
137 struct hdmi *hdmi = hdmi_connector->hdmi; 138 struct hdmi *hdmi = hdmi_connector->hdmi;
138 const struct hdmi_platform_config *config = hdmi->config; 139 const struct hdmi_platform_config *config = hdmi->config;
139 struct drm_device *dev = hdmi_connector->base.dev; 140 struct device *dev = &hdmi->pdev->dev;
140 struct hdmi_phy *phy = hdmi->phy; 141 struct hdmi_phy *phy = hdmi->phy;
141 uint32_t hpd_ctrl; 142 uint32_t hpd_ctrl;
142 int i, ret; 143 int i, ret;
@@ -144,15 +145,21 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
144 for (i = 0; i < config->hpd_reg_cnt; i++) { 145 for (i = 0; i < config->hpd_reg_cnt; i++) {
145 ret = regulator_enable(hdmi->hpd_regs[i]); 146 ret = regulator_enable(hdmi->hpd_regs[i]);
146 if (ret) { 147 if (ret) {
147 dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n", 148 dev_err(dev, "failed to enable hpd regulator: %s (%d)\n",
148 config->hpd_reg_names[i], ret); 149 config->hpd_reg_names[i], ret);
149 goto fail; 150 goto fail;
150 } 151 }
151 } 152 }
152 153
154 ret = pinctrl_pm_select_default_state(dev);
155 if (ret) {
156 dev_err(dev, "pinctrl state chg failed: %d\n", ret);
157 goto fail;
158 }
159
153 ret = gpio_config(hdmi, true); 160 ret = gpio_config(hdmi, true);
154 if (ret) { 161 if (ret) {
155 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret); 162 dev_err(dev, "failed to configure GPIOs: %d\n", ret);
156 goto fail; 163 goto fail;
157 } 164 }
158 165
@@ -161,13 +168,13 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
161 ret = clk_set_rate(hdmi->hpd_clks[i], 168 ret = clk_set_rate(hdmi->hpd_clks[i],
162 config->hpd_freq[i]); 169 config->hpd_freq[i]);
163 if (ret) 170 if (ret)
164 dev_warn(dev->dev, "failed to set clk %s (%d)\n", 171 dev_warn(dev, "failed to set clk %s (%d)\n",
165 config->hpd_clk_names[i], ret); 172 config->hpd_clk_names[i], ret);
166 } 173 }
167 174
168 ret = clk_prepare_enable(hdmi->hpd_clks[i]); 175 ret = clk_prepare_enable(hdmi->hpd_clks[i]);
169 if (ret) { 176 if (ret) {
170 dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n", 177 dev_err(dev, "failed to enable hpd clk: %s (%d)\n",
171 config->hpd_clk_names[i], ret); 178 config->hpd_clk_names[i], ret);
172 goto fail; 179 goto fail;
173 } 180 }
@@ -204,7 +211,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
204{ 211{
205 struct hdmi *hdmi = hdmi_connector->hdmi; 212 struct hdmi *hdmi = hdmi_connector->hdmi;
206 const struct hdmi_platform_config *config = hdmi->config; 213 const struct hdmi_platform_config *config = hdmi->config;
207 struct drm_device *dev = hdmi_connector->base.dev; 214 struct device *dev = &hdmi->pdev->dev;
208 int i, ret = 0; 215 int i, ret = 0;
209 216
210 /* Disable HPD interrupt */ 217 /* Disable HPD interrupt */
@@ -217,12 +224,16 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
217 224
218 ret = gpio_config(hdmi, false); 225 ret = gpio_config(hdmi, false);
219 if (ret) 226 if (ret)
220 dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); 227 dev_warn(dev, "failed to unconfigure GPIOs: %d\n", ret);
228
229 ret = pinctrl_pm_select_sleep_state(dev);
230 if (ret)
231 dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
221 232
222 for (i = 0; i < config->hpd_reg_cnt; i++) { 233 for (i = 0; i < config->hpd_reg_cnt; i++) {
223 ret = regulator_disable(hdmi->hpd_regs[i]); 234 ret = regulator_disable(hdmi->hpd_regs[i]);
224 if (ret) 235 if (ret)
225 dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n", 236 dev_warn(dev, "failed to disable hpd regulator: %s (%d)\n",
226 config->hpd_reg_names[i], ret); 237 config->hpd_reg_names[i], ret);
227 } 238 }
228} 239}
@@ -433,7 +444,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
433 444
434 ret = hpd_enable(hdmi_connector); 445 ret = hpd_enable(hdmi_connector);
435 if (ret) { 446 if (ret) {
436 dev_err(hdmi->dev->dev, "failed to enable HPD: %d\n", ret); 447 dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
437 goto fail; 448 goto fail;
438 } 449 }
439 450
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index 43bb54a9afbf..978c3f70872a 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -10,15 +10,15 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
22 22
23Copyright (C) 2013 by the following authors: 23Copyright (C) 2013 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 1d39174d91fb..153fc487d683 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -10,17 +10,17 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
22 22
23Copyright (C) 2013-2014 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25 25
26Permission is hereby granted, free of charge, to any person obtaining 26Permission is hereby granted, free of charge, to any person obtaining
@@ -680,18 +680,18 @@ static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
680 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK; 680 return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
681} 681}
682 682
683static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; } 683static inline uint32_t REG_MDP4_PIPE_SSTILE_FRAME_SIZE(enum mdp4_pipe i0) { return 0x00020048 + 0x10000*i0; }
684#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000 684#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
685#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16 685#define MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT 16
686static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val) 686static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(uint32_t val)
687{ 687{
688 return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK; 688 return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT__MASK;
689} 689}
690#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff 690#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
691#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0 691#define MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT 0
692static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val) 692static inline uint32_t MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(uint32_t val)
693{ 693{
694 return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK; 694 return ((val) << MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH__MASK;
695} 695}
696 696
697static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; } 697static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mdp4_pipe i0) { return 0x00020050 + 0x10000*i0; }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 73afa21822b4..c4bb9d9c7667 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -51,6 +51,11 @@ struct mdp4_crtc {
51 /* if there is a pending flip, these will be non-null: */ 51 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event *event; 52 struct drm_pending_vblank_event *event;
53 53
54 /* Bits have been flushed at the last commit,
55 * used to decide if a vsync has happened since last commit.
56 */
57 u32 flushed_mask;
58
54#define PENDING_CURSOR 0x1 59#define PENDING_CURSOR 0x1
55#define PENDING_FLIP 0x2 60#define PENDING_FLIP 0x2
56 atomic_t pending; 61 atomic_t pending;
@@ -93,6 +98,8 @@ static void crtc_flush(struct drm_crtc *crtc)
93 98
94 DBG("%s: flush=%08x", mdp4_crtc->name, flush); 99 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
95 100
101 mdp4_crtc->flushed_mask = flush;
102
96 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); 103 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
97} 104}
98 105
@@ -537,6 +544,29 @@ static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
537 crtc_flush(crtc); 544 crtc_flush(crtc);
538} 545}
539 546
547static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
548{
549 struct drm_device *dev = crtc->dev;
550 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
551 struct mdp4_kms *mdp4_kms = get_kms(crtc);
552 int ret;
553
554 ret = drm_crtc_vblank_get(crtc);
555 if (ret)
556 return;
557
558 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
559 !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
560 mdp4_crtc->flushed_mask),
561 msecs_to_jiffies(50));
562 if (ret <= 0)
563 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
564
565 mdp4_crtc->flushed_mask = 0;
566
567 drm_crtc_vblank_put(crtc);
568}
569
540uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc) 570uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
541{ 571{
542 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 572 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
@@ -600,6 +630,15 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
600 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); 630 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
601} 631}
602 632
633void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
634{
635 /* wait_for_flush_done is the only case for now.
636 * Later we will have command mode CRTC to wait for
637 * other event.
638 */
639 mdp4_crtc_wait_for_flush_done(crtc);
640}
641
603static const char *dma_names[] = { 642static const char *dma_names[] = {
604 "DMA_P", "DMA_S", "DMA_E", 643 "DMA_P", "DMA_S", "DMA_E",
605}; 644};
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 7896323b2631..89614c6a6c1b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -38,7 +38,7 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
38 return to_mdp4_kms(to_mdp_kms(priv->kms)); 38 return to_mdp4_kms(to_mdp_kms(priv->kms));
39} 39}
40 40
41#ifdef CONFIG_MSM_BUS_SCALING 41#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
42#include <mach/board.h> 42#include <mach/board.h>
43/* not ironically named at all.. no, really.. */ 43/* not ironically named at all.. no, really.. */
44static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) 44static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index d847b9436194..531e4acc2a87 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -119,6 +119,8 @@ static int mdp4_hw_init(struct msm_kms *kms)
119 if (mdp4_kms->rev > 1) 119 if (mdp4_kms->rev > 1)
120 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1); 120 mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
121 121
122 dev->mode_config.allow_fb_modifiers = true;
123
122out: 124out:
123 pm_runtime_put_sync(dev->dev); 125 pm_runtime_put_sync(dev->dev);
124 126
@@ -157,6 +159,12 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
157 mdp4_disable(mdp4_kms); 159 mdp4_disable(mdp4_kms);
158} 160}
159 161
162static void mdp4_wait_for_crtc_commit_done(struct msm_kms *kms,
163 struct drm_crtc *crtc)
164{
165 mdp4_crtc_wait_for_commit_done(crtc);
166}
167
160static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate, 168static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
161 struct drm_encoder *encoder) 169 struct drm_encoder *encoder)
162{ 170{
@@ -195,6 +203,7 @@ static const struct mdp_kms_funcs kms_funcs = {
195 .disable_vblank = mdp4_disable_vblank, 203 .disable_vblank = mdp4_disable_vblank,
196 .prepare_commit = mdp4_prepare_commit, 204 .prepare_commit = mdp4_prepare_commit,
197 .complete_commit = mdp4_complete_commit, 205 .complete_commit = mdp4_complete_commit,
206 .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done,
198 .get_format = mdp_get_format, 207 .get_format = mdp_get_format,
199 .round_pixclk = mdp4_round_pixclk, 208 .round_pixclk = mdp4_round_pixclk,
200 .preclose = mdp4_preclose, 209 .preclose = mdp4_preclose,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 0a5c58bde7a9..c1ecb9d6bdef 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -206,6 +206,7 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
206void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 206void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
207void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); 207void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
208void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer); 208void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
209void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
209struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, 210struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
210 struct drm_plane *plane, int id, int ovlp_id, 211 struct drm_plane *plane, int id, int ovlp_id,
211 enum mdp4_dma dma_id); 212 enum mdp4_dma dma_id);
@@ -229,7 +230,7 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
229} 230}
230#endif 231#endif
231 232
232#ifdef CONFIG_MSM_BUS_SCALING 233#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
233static inline int match_dev_name(struct device *dev, void *data) 234static inline int match_dev_name(struct device *dev, void *data)
234{ 235{
235 return !strcmp(dev_name(dev), data); 236 return !strcmp(dev_name(dev), data);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
index 60ec8222c9f6..c04843376c54 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
@@ -38,7 +38,7 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
38 return to_mdp4_kms(to_mdp_kms(priv->kms)); 38 return to_mdp4_kms(to_mdp_kms(priv->kms));
39} 39}
40 40
41#ifdef CONFIG_MSM_BUS_SCALING 41#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
42#include <mach/board.h> 42#include <mach/board.h>
43static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) 43static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
44{ 44{
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index dbc068988377..0d1dbb737933 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -33,6 +33,21 @@ struct mdp4_plane {
33}; 33};
34#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) 34#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
35 35
36/* MDP format helper functions */
37static inline
38enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb)
39{
40 bool is_tile = false;
41
42 if (fb->modifier[1] == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)
43 is_tile = true;
44
45 if (fb->pixel_format == DRM_FORMAT_NV12 && is_tile)
46 return FRAME_TILE_YCBCR_420;
47
48 return FRAME_LINEAR;
49}
50
36static void mdp4_plane_set_scanout(struct drm_plane *plane, 51static void mdp4_plane_set_scanout(struct drm_plane *plane,
37 struct drm_framebuffer *fb); 52 struct drm_framebuffer *fb);
38static int mdp4_plane_mode_set(struct drm_plane *plane, 53static int mdp4_plane_mode_set(struct drm_plane *plane,
@@ -205,6 +220,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
205 uint32_t op_mode = 0; 220 uint32_t op_mode = 0;
206 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
207 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
223 enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb);
208 224
209 if (!(crtc && fb)) { 225 if (!(crtc && fb)) {
210 DBG("%s: disabled!", mdp4_plane->name); 226 DBG("%s: disabled!", mdp4_plane->name);
@@ -304,6 +320,7 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
304 MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) | 320 MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
305 MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) | 321 MDP4_PIPE_SRC_FORMAT_FETCH_PLANES(format->fetch_type) |
306 MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) | 322 MDP4_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample) |
323 MDP4_PIPE_SRC_FORMAT_FRAME_FORMAT(frame_type) |
307 COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT)); 324 COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
308 325
309 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe), 326 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
@@ -324,6 +341,11 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
324 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); 341 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
325 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); 342 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
326 343
344 if (frame_type != FRAME_LINEAR)
345 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SSTILE_FRAME_SIZE(pipe),
346 MDP4_PIPE_SSTILE_FRAME_SIZE_WIDTH(src_w) |
347 MDP4_PIPE_SSTILE_FRAME_SIZE_HEIGHT(src_h));
348
327 return 0; 349 return 0;
328} 350}
329 351
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index b9a4ded6e400..50e17527e2e5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,9 +8,17 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp5.xml ( 29312 bytes, from 2015-03-23 21:18:48) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
12- /local/mnt2/workspace2/sviau/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2014-06-02 18:31:15) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /local/mnt2/workspace2/sviau/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2015-03-23 20:38:49) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
14 22
15Copyright (C) 2013-2015 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
16- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
@@ -120,6 +128,21 @@ enum mdp5_data_format {
120 DATA_FORMAT_YUV = 1, 128 DATA_FORMAT_YUV = 1,
121}; 129};
122 130
131enum mdp5_block_size {
132 BLOCK_SIZE_64 = 0,
133 BLOCK_SIZE_128 = 1,
134};
135
136enum mdp5_rotate_mode {
137 ROTATE_0 = 0,
138 ROTATE_90 = 1,
139};
140
141enum mdp5_chroma_downsample_method {
142 DS_MTHD_NO_PIXEL_DROP = 0,
143 DS_MTHD_PIXEL_DROP = 1,
144};
145
123#define MDP5_IRQ_WB_0_DONE 0x00000001 146#define MDP5_IRQ_WB_0_DONE 0x00000001
124#define MDP5_IRQ_WB_1_DONE 0x00000002 147#define MDP5_IRQ_WB_1_DONE 0x00000002
125#define MDP5_IRQ_WB_2_DONE 0x00000010 148#define MDP5_IRQ_WB_2_DONE 0x00000010
@@ -314,19 +337,19 @@ static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val)
314#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 337#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
315#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 338#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
316 339
317#define REG_MDP5_SPLIT_DPL_EN 0x000003f4 340static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_EN(uint32_t i0) { return 0x000002f4 + __offset_MDP(i0); }
318 341
319#define REG_MDP5_SPLIT_DPL_UPPER 0x000003f8 342static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_UPPER(uint32_t i0) { return 0x000002f8 + __offset_MDP(i0); }
320#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 343#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
321#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 344#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
322#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 345#define MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
323#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 346#define MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
324 347
325#define REG_MDP5_SPLIT_DPL_LOWER 0x000004f0 348static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_LOWER(uint32_t i0) { return 0x000003f0 + __offset_MDP(i0); }
326#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 349#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
327#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 350#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
328#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 351#define MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
329#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 352#define MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
330 353
331static inline uint32_t __offset_CTL(uint32_t idx) 354static inline uint32_t __offset_CTL(uint32_t idx)
332{ 355{
@@ -782,7 +805,7 @@ static inline uint32_t MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
782#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000 805#define MDP5_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
783#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000 806#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK 0x00180000
784#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19 807#define MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT 19
785static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_sspp_fetch_type val) 808static inline uint32_t MDP5_PIPE_SRC_FORMAT_NUM_PLANES(enum mdp_fetch_type val)
786{ 809{
787 return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK; 810 return ((val) << MDP5_PIPE_SRC_FORMAT_NUM_PLANES__SHIFT) & MDP5_PIPE_SRC_FORMAT_NUM_PLANES__MASK;
788} 811}
@@ -1234,6 +1257,351 @@ static inline uint32_t REG_MDP5_PP_FBC_BUDGET_CTL(uint32_t i0) { return 0x000000
1234 1257
1235static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); } 1258static inline uint32_t REG_MDP5_PP_FBC_LOSSY_MODE(uint32_t i0) { return 0x0000003c + __offset_PP(i0); }
1236 1259
1260static inline uint32_t __offset_WB(uint32_t idx)
1261{
1262 switch (idx) {
1263 default: return INVALID_IDX(idx);
1264 }
1265}
1266static inline uint32_t REG_MDP5_WB(uint32_t i0) { return 0x00000000 + __offset_WB(i0); }
1267
1268static inline uint32_t REG_MDP5_WB_DST_FORMAT(uint32_t i0) { return 0x00000000 + __offset_WB(i0); }
1269#define MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK 0x00000003
1270#define MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT 0
1271static inline uint32_t MDP5_WB_DST_FORMAT_DSTC0_OUT(uint32_t val)
1272{
1273 return ((val) << MDP5_WB_DST_FORMAT_DSTC0_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC0_OUT__MASK;
1274}
1275#define MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK 0x0000000c
1276#define MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT 2
1277static inline uint32_t MDP5_WB_DST_FORMAT_DSTC1_OUT(uint32_t val)
1278{
1279 return ((val) << MDP5_WB_DST_FORMAT_DSTC1_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC1_OUT__MASK;
1280}
1281#define MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK 0x00000030
1282#define MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT 4
1283static inline uint32_t MDP5_WB_DST_FORMAT_DSTC2_OUT(uint32_t val)
1284{
1285 return ((val) << MDP5_WB_DST_FORMAT_DSTC2_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC2_OUT__MASK;
1286}
1287#define MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK 0x000000c0
1288#define MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT 6
1289static inline uint32_t MDP5_WB_DST_FORMAT_DSTC3_OUT(uint32_t val)
1290{
1291 return ((val) << MDP5_WB_DST_FORMAT_DSTC3_OUT__SHIFT) & MDP5_WB_DST_FORMAT_DSTC3_OUT__MASK;
1292}
1293#define MDP5_WB_DST_FORMAT_DSTC3_EN 0x00000100
1294#define MDP5_WB_DST_FORMAT_DST_BPP__MASK 0x00000600
1295#define MDP5_WB_DST_FORMAT_DST_BPP__SHIFT 9
1296static inline uint32_t MDP5_WB_DST_FORMAT_DST_BPP(uint32_t val)
1297{
1298 return ((val) << MDP5_WB_DST_FORMAT_DST_BPP__SHIFT) & MDP5_WB_DST_FORMAT_DST_BPP__MASK;
1299}
1300#define MDP5_WB_DST_FORMAT_PACK_COUNT__MASK 0x00003000
1301#define MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT 12
1302static inline uint32_t MDP5_WB_DST_FORMAT_PACK_COUNT(uint32_t val)
1303{
1304 return ((val) << MDP5_WB_DST_FORMAT_PACK_COUNT__SHIFT) & MDP5_WB_DST_FORMAT_PACK_COUNT__MASK;
1305}
1306#define MDP5_WB_DST_FORMAT_DST_ALPHA_X 0x00004000
1307#define MDP5_WB_DST_FORMAT_PACK_TIGHT 0x00020000
1308#define MDP5_WB_DST_FORMAT_PACK_ALIGN_MSB 0x00040000
1309#define MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK 0x00180000
1310#define MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT 19
1311static inline uint32_t MDP5_WB_DST_FORMAT_WRITE_PLANES(uint32_t val)
1312{
1313 return ((val) << MDP5_WB_DST_FORMAT_WRITE_PLANES__SHIFT) & MDP5_WB_DST_FORMAT_WRITE_PLANES__MASK;
1314}
1315#define MDP5_WB_DST_FORMAT_DST_DITHER_EN 0x00400000
1316#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK 0x03800000
1317#define MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT 23
1318static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP(uint32_t val)
1319{
1320 return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SAMP__MASK;
1321}
1322#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK 0x3c000000
1323#define MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT 26
1324static inline uint32_t MDP5_WB_DST_FORMAT_DST_CHROMA_SITE(uint32_t val)
1325{
1326 return ((val) << MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__SHIFT) & MDP5_WB_DST_FORMAT_DST_CHROMA_SITE__MASK;
1327}
1328#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK 0xc0000000
1329#define MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT 30
1330static inline uint32_t MDP5_WB_DST_FORMAT_FRAME_FORMAT(uint32_t val)
1331{
1332 return ((val) << MDP5_WB_DST_FORMAT_FRAME_FORMAT__SHIFT) & MDP5_WB_DST_FORMAT_FRAME_FORMAT__MASK;
1333}
1334
1335static inline uint32_t REG_MDP5_WB_DST_OP_MODE(uint32_t i0) { return 0x00000004 + __offset_WB(i0); }
1336#define MDP5_WB_DST_OP_MODE_BWC_ENC_EN 0x00000001
1337#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK 0x00000006
1338#define MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT 1
1339static inline uint32_t MDP5_WB_DST_OP_MODE_BWC_ENC_OP(uint32_t val)
1340{
1341 return ((val) << MDP5_WB_DST_OP_MODE_BWC_ENC_OP__SHIFT) & MDP5_WB_DST_OP_MODE_BWC_ENC_OP__MASK;
1342}
1343#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK 0x00000010
1344#define MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT 4
1345static inline uint32_t MDP5_WB_DST_OP_MODE_BLOCK_SIZE(uint32_t val)
1346{
1347 return ((val) << MDP5_WB_DST_OP_MODE_BLOCK_SIZE__SHIFT) & MDP5_WB_DST_OP_MODE_BLOCK_SIZE__MASK;
1348}
1349#define MDP5_WB_DST_OP_MODE_ROT_MODE__MASK 0x00000020
1350#define MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT 5
1351static inline uint32_t MDP5_WB_DST_OP_MODE_ROT_MODE(uint32_t val)
1352{
1353 return ((val) << MDP5_WB_DST_OP_MODE_ROT_MODE__SHIFT) & MDP5_WB_DST_OP_MODE_ROT_MODE__MASK;
1354}
1355#define MDP5_WB_DST_OP_MODE_ROT_EN 0x00000040
1356#define MDP5_WB_DST_OP_MODE_CSC_EN 0x00000100
1357#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK 0x00000200
1358#define MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT 9
1359static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT(uint32_t val)
1360{
1361 return ((val) << MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_SRC_DATA_FORMAT__MASK;
1362}
1363#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK 0x00000400
1364#define MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT 10
1365static inline uint32_t MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT(uint32_t val)
1366{
1367 return ((val) << MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CSC_DST_DATA_FORMAT__MASK;
1368}
1369#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_EN 0x00000800
1370#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK 0x00001000
1371#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT 12
1372static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT(uint32_t val)
1373{
1374 return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_FORMAT__MASK;
1375}
1376#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK 0x00002000
1377#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT 13
1378static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD(uint32_t val)
1379{
1380 return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_H_MTHD__MASK;
1381}
1382#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK 0x00004000
1383#define MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT 14
1384static inline uint32_t MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD(uint32_t val)
1385{
1386 return ((val) << MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__SHIFT) & MDP5_WB_DST_OP_MODE_CHROMA_DWN_SAMPLE_V_MTHD__MASK;
1387}
1388
1389static inline uint32_t REG_MDP5_WB_DST_PACK_PATTERN(uint32_t i0) { return 0x00000008 + __offset_WB(i0); }
1390#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK 0x00000003
1391#define MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT 0
1392static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT0(uint32_t val)
1393{
1394 return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT0__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT0__MASK;
1395}
1396#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK 0x00000300
1397#define MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT 8
1398static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT1(uint32_t val)
1399{
1400 return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT1__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT1__MASK;
1401}
1402#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK 0x00030000
1403#define MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT 16
1404static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT2(uint32_t val)
1405{
1406 return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT2__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT2__MASK;
1407}
1408#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK 0x03000000
1409#define MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT 24
1410static inline uint32_t MDP5_WB_DST_PACK_PATTERN_ELEMENT3(uint32_t val)
1411{
1412 return ((val) << MDP5_WB_DST_PACK_PATTERN_ELEMENT3__SHIFT) & MDP5_WB_DST_PACK_PATTERN_ELEMENT3__MASK;
1413}
1414
1415static inline uint32_t REG_MDP5_WB_DST0_ADDR(uint32_t i0) { return 0x0000000c + __offset_WB(i0); }
1416
1417static inline uint32_t REG_MDP5_WB_DST1_ADDR(uint32_t i0) { return 0x00000010 + __offset_WB(i0); }
1418
1419static inline uint32_t REG_MDP5_WB_DST2_ADDR(uint32_t i0) { return 0x00000014 + __offset_WB(i0); }
1420
1421static inline uint32_t REG_MDP5_WB_DST3_ADDR(uint32_t i0) { return 0x00000018 + __offset_WB(i0); }
1422
1423static inline uint32_t REG_MDP5_WB_DST_YSTRIDE0(uint32_t i0) { return 0x0000001c + __offset_WB(i0); }
1424#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK 0x0000ffff
1425#define MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT 0
1426static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE(uint32_t val)
1427{
1428 return ((val) << MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST0_YSTRIDE__MASK;
1429}
1430#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK 0xffff0000
1431#define MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT 16
1432static inline uint32_t MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE(uint32_t val)
1433{
1434 return ((val) << MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE0_DST1_YSTRIDE__MASK;
1435}
1436
1437static inline uint32_t REG_MDP5_WB_DST_YSTRIDE1(uint32_t i0) { return 0x00000020 + __offset_WB(i0); }
1438#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK 0x0000ffff
1439#define MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT 0
1440static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE(uint32_t val)
1441{
1442 return ((val) << MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST2_YSTRIDE__MASK;
1443}
1444#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK 0xffff0000
1445#define MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT 16
1446static inline uint32_t MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE(uint32_t val)
1447{
1448 return ((val) << MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__SHIFT) & MDP5_WB_DST_YSTRIDE1_DST3_YSTRIDE__MASK;
1449}
1450
1451static inline uint32_t REG_MDP5_WB_DST_DITHER_BITDEPTH(uint32_t i0) { return 0x00000024 + __offset_WB(i0); }
1452
1453static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW0(uint32_t i0) { return 0x00000030 + __offset_WB(i0); }
1454
1455static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW1(uint32_t i0) { return 0x00000034 + __offset_WB(i0); }
1456
1457static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW2(uint32_t i0) { return 0x00000038 + __offset_WB(i0); }
1458
1459static inline uint32_t REG_MDP5_WB_DITHER_MATRIX_ROW3(uint32_t i0) { return 0x0000003c + __offset_WB(i0); }
1460
1461static inline uint32_t REG_MDP5_WB_DST_WRITE_CONFIG(uint32_t i0) { return 0x00000048 + __offset_WB(i0); }
1462
1463static inline uint32_t REG_MDP5_WB_ROTATION_DNSCALER(uint32_t i0) { return 0x00000050 + __offset_WB(i0); }
1464
1465static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_0_3(uint32_t i0) { return 0x00000060 + __offset_WB(i0); }
1466
1467static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_X_1_2(uint32_t i0) { return 0x00000064 + __offset_WB(i0); }
1468
1469static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_0_3(uint32_t i0) { return 0x00000068 + __offset_WB(i0); }
1470
1471static inline uint32_t REG_MDP5_WB_N16_INIT_PHASE_Y_1_2(uint32_t i0) { return 0x0000006c + __offset_WB(i0); }
1472
1473static inline uint32_t REG_MDP5_WB_OUT_SIZE(uint32_t i0) { return 0x00000074 + __offset_WB(i0); }
1474#define MDP5_WB_OUT_SIZE_DST_W__MASK 0x0000ffff
1475#define MDP5_WB_OUT_SIZE_DST_W__SHIFT 0
1476static inline uint32_t MDP5_WB_OUT_SIZE_DST_W(uint32_t val)
1477{
1478 return ((val) << MDP5_WB_OUT_SIZE_DST_W__SHIFT) & MDP5_WB_OUT_SIZE_DST_W__MASK;
1479}
1480#define MDP5_WB_OUT_SIZE_DST_H__MASK 0xffff0000
1481#define MDP5_WB_OUT_SIZE_DST_H__SHIFT 16
1482static inline uint32_t MDP5_WB_OUT_SIZE_DST_H(uint32_t val)
1483{
1484 return ((val) << MDP5_WB_OUT_SIZE_DST_H__SHIFT) & MDP5_WB_OUT_SIZE_DST_H__MASK;
1485}
1486
1487static inline uint32_t REG_MDP5_WB_ALPHA_X_VALUE(uint32_t i0) { return 0x00000078 + __offset_WB(i0); }
1488
1489static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_0(uint32_t i0) { return 0x00000260 + __offset_WB(i0); }
1490#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK 0x00001fff
1491#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT 0
1492static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11(uint32_t val)
1493{
1494 return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_11__MASK;
1495}
1496#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK 0x1fff0000
1497#define MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT 16
1498static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12(uint32_t val)
1499{
1500 return ((val) << MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_0_COEFF_12__MASK;
1501}
1502
1503static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_1(uint32_t i0) { return 0x00000264 + __offset_WB(i0); }
1504#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK 0x00001fff
1505#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT 0
1506static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13(uint32_t val)
1507{
1508 return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_13__MASK;
1509}
1510#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK 0x1fff0000
1511#define MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT 16
1512static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21(uint32_t val)
1513{
1514 return ((val) << MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_1_COEFF_21__MASK;
1515}
1516
1517static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_2(uint32_t i0) { return 0x00000268 + __offset_WB(i0); }
1518#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK 0x00001fff
1519#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT 0
1520static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22(uint32_t val)
1521{
1522 return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_22__MASK;
1523}
1524#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK 0x1fff0000
1525#define MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT 16
1526static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23(uint32_t val)
1527{
1528 return ((val) << MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_2_COEFF_23__MASK;
1529}
1530
1531static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_3(uint32_t i0) { return 0x0000026c + __offset_WB(i0); }
1532#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK 0x00001fff
1533#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT 0
1534static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31(uint32_t val)
1535{
1536 return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_31__MASK;
1537}
1538#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK 0x1fff0000
1539#define MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT 16
1540static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32(uint32_t val)
1541{
1542 return ((val) << MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_3_COEFF_32__MASK;
1543}
1544
1545static inline uint32_t REG_MDP5_WB_CSC_MATRIX_COEFF_4(uint32_t i0) { return 0x00000270 + __offset_WB(i0); }
1546#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK 0x00001fff
1547#define MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT 0
1548static inline uint32_t MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33(uint32_t val)
1549{
1550 return ((val) << MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__SHIFT) & MDP5_WB_CSC_MATRIX_COEFF_4_COEFF_33__MASK;
1551}
1552
1553static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; }
1554
1555static inline uint32_t REG_MDP5_WB_CSC_COMP_PRECLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000274 + __offset_WB(i0) + 0x4*i1; }
1556#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK 0x000000ff
1557#define MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT 0
1558static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH(uint32_t val)
1559{
1560 return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_HIGH__MASK;
1561}
1562#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK 0x0000ff00
1563#define MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT 8
1564static inline uint32_t MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW(uint32_t val)
1565{
1566 return ((val) << MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_PRECLAMP_REG_LOW__MASK;
1567}
1568
1569static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; }
1570
1571static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTCLAMP_REG(uint32_t i0, uint32_t i1) { return 0x00000280 + __offset_WB(i0) + 0x4*i1; }
1572#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK 0x000000ff
1573#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT 0
1574static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH(uint32_t val)
1575{
1576 return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_HIGH__MASK;
1577}
1578#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK 0x0000ff00
1579#define MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT 8
1580static inline uint32_t MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW(uint32_t val)
1581{
1582 return ((val) << MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__SHIFT) & MDP5_WB_CSC_COMP_POSTCLAMP_REG_LOW__MASK;
1583}
1584
1585static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; }
1586
1587static inline uint32_t REG_MDP5_WB_CSC_COMP_PREBIAS_REG(uint32_t i0, uint32_t i1) { return 0x0000028c + __offset_WB(i0) + 0x4*i1; }
1588#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK 0x000001ff
1589#define MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT 0
1590static inline uint32_t MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE(uint32_t val)
1591{
1592 return ((val) << MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_PREBIAS_REG_VALUE__MASK;
1593}
1594
1595static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; }
1596
1597static inline uint32_t REG_MDP5_WB_CSC_COMP_POSTBIAS_REG(uint32_t i0, uint32_t i1) { return 0x00000298 + __offset_WB(i0) + 0x4*i1; }
1598#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK 0x000001ff
1599#define MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT 0
1600static inline uint32_t MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE(uint32_t val)
1601{
1602 return ((val) << MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__SHIFT) & MDP5_WB_CSC_COMP_POSTBIAS_REG_VALUE__MASK;
1603}
1604
1237static inline uint32_t __offset_INTF(uint32_t idx) 1605static inline uint32_t __offset_INTF(uint32_t idx)
1238{ 1606{
1239 switch (idx) { 1607 switch (idx) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index e4e89567f51d..ee31b16fe7ea 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -30,7 +30,7 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
30 return to_mdp5_kms(to_mdp_kms(priv->kms)); 30 return to_mdp5_kms(to_mdp_kms(priv->kms));
31} 31}
32 32
33#ifdef CONFIG_MSM_BUS_SCALING 33#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
34#include <mach/board.h> 34#include <mach/board.h>
35#include <linux/msm-bus.h> 35#include <linux/msm-bus.h>
36#include <linux/msm-bus-board.h> 36#include <linux/msm-bus-board.h>
@@ -216,16 +216,12 @@ static void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
216static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder) 216static void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
217{ 217{
218 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder); 218 struct mdp5_cmd_encoder *mdp5_cmd_enc = to_mdp5_cmd_encoder(encoder);
219 struct mdp5_kms *mdp5_kms = get_kms(encoder);
220 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc); 219 struct mdp5_ctl *ctl = mdp5_crtc_get_ctl(encoder->crtc);
221 struct mdp5_interface *intf = &mdp5_cmd_enc->intf; 220 struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
222 int lm = mdp5_crtc_get_lm(encoder->crtc);
223 221
224 if (WARN_ON(!mdp5_cmd_enc->enabled)) 222 if (WARN_ON(!mdp5_cmd_enc->enabled))
225 return; 223 return;
226 224
227 /* Wait for the last frame done */
228 mdp_irq_wait(&mdp5_kms->base, lm2ppdone(lm));
229 pingpong_tearcheck_disable(encoder); 225 pingpong_tearcheck_disable(encoder);
230 226
231 mdp5_ctl_set_encoder_state(ctl, false); 227 mdp5_ctl_set_encoder_state(ctl, false);
@@ -281,22 +277,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
281 * start signal for the slave encoder 277 * start signal for the slave encoder
282 */ 278 */
283 if (intf_num == 1) 279 if (intf_num == 1)
284 data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; 280 data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
285 else if (intf_num == 2) 281 else if (intf_num == 2)
286 data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; 282 data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
287 else 283 else
288 return -EINVAL; 284 return -EINVAL;
289 285
290 /* Smart Panel, Sync mode */ 286 /* Smart Panel, Sync mode */
291 data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL; 287 data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL;
292 288
293 /* Make sure clocks are on when connectors calling this function. */ 289 /* Make sure clocks are on when connectors calling this function. */
294 mdp5_enable(mdp5_kms); 290 mdp5_enable(mdp5_kms);
295 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data); 291 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data);
296 292
297 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, 293 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0),
298 MDP5_SPLIT_DPL_LOWER_SMART_PANEL); 294 MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL);
299 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); 295 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
300 mdp5_disable(mdp5_kms); 296 mdp5_disable(mdp5_kms);
301 297
302 return 0; 298 return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index c1530772187d..dea3d2e559b1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved. 2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
5 * 5 *
@@ -46,6 +46,11 @@ struct mdp5_crtc {
46 /* if there is a pending flip, these will be non-null: */ 46 /* if there is a pending flip, these will be non-null: */
47 struct drm_pending_vblank_event *event; 47 struct drm_pending_vblank_event *event;
48 48
49 /* Bits have been flushed at the last commit,
50 * used to decide if a vsync has happened since last commit.
51 */
52 u32 flushed_mask;
53
49#define PENDING_CURSOR 0x1 54#define PENDING_CURSOR 0x1
50#define PENDING_FLIP 0x2 55#define PENDING_FLIP 0x2
51 atomic_t pending; 56 atomic_t pending;
@@ -55,6 +60,11 @@ struct mdp5_crtc {
55 60
56 struct mdp_irq vblank; 61 struct mdp_irq vblank;
57 struct mdp_irq err; 62 struct mdp_irq err;
63 struct mdp_irq pp_done;
64
65 struct completion pp_completion;
66
67 bool cmd_mode;
58 68
59 struct { 69 struct {
60 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/ 70 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
@@ -82,12 +92,18 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
82 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); 92 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
83} 93}
84 94
85static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask) 95static void request_pp_done_pending(struct drm_crtc *crtc)
96{
97 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
98 reinit_completion(&mdp5_crtc->pp_completion);
99}
100
101static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
86{ 102{
87 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 103 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
88 104
89 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask); 105 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
90 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask); 106 return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
91} 107}
92 108
93/* 109/*
@@ -95,7 +111,7 @@ static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
95 * so that we can safely queue unref to current fb (ie. next 111 * so that we can safely queue unref to current fb (ie. next
96 * vblank we know hw is done w/ previous scanout_fb). 112 * vblank we know hw is done w/ previous scanout_fb).
97 */ 113 */
98static void crtc_flush_all(struct drm_crtc *crtc) 114static u32 crtc_flush_all(struct drm_crtc *crtc)
99{ 115{
100 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 116 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
101 struct drm_plane *plane; 117 struct drm_plane *plane;
@@ -103,7 +119,7 @@ static void crtc_flush_all(struct drm_crtc *crtc)
103 119
104 /* this should not happen: */ 120 /* this should not happen: */
105 if (WARN_ON(!mdp5_crtc->ctl)) 121 if (WARN_ON(!mdp5_crtc->ctl))
106 return; 122 return 0;
107 123
108 drm_atomic_crtc_for_each_plane(plane, crtc) { 124 drm_atomic_crtc_for_each_plane(plane, crtc) {
109 flush_mask |= mdp5_plane_get_flush(plane); 125 flush_mask |= mdp5_plane_get_flush(plane);
@@ -111,7 +127,7 @@ static void crtc_flush_all(struct drm_crtc *crtc)
111 127
112 flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm); 128 flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
113 129
114 crtc_flush(crtc, flush_mask); 130 return crtc_flush(crtc, flush_mask);
115} 131}
116 132
117/* if file!=NULL, this is preclose potential cancel-flip path */ 133/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -143,6 +159,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
143 } 159 }
144 160
145 if (mdp5_crtc->ctl && !crtc->state->enable) { 161 if (mdp5_crtc->ctl && !crtc->state->enable) {
162 /* set STAGE_UNUSED for all layers */
163 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
146 mdp5_ctl_release(mdp5_crtc->ctl); 164 mdp5_ctl_release(mdp5_crtc->ctl);
147 mdp5_crtc->ctl = NULL; 165 mdp5_crtc->ctl = NULL;
148 } 166 }
@@ -274,8 +292,8 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
274 if (WARN_ON(!mdp5_crtc->enabled)) 292 if (WARN_ON(!mdp5_crtc->enabled))
275 return; 293 return;
276 294
277 /* set STAGE_UNUSED for all layers */ 295 if (mdp5_crtc->cmd_mode)
278 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000); 296 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
279 297
280 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); 298 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
281 mdp5_disable(mdp5_kms); 299 mdp5_disable(mdp5_kms);
@@ -296,6 +314,9 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
296 mdp5_enable(mdp5_kms); 314 mdp5_enable(mdp5_kms);
297 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); 315 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
298 316
317 if (mdp5_crtc->cmd_mode)
318 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
319
299 mdp5_crtc->enabled = true; 320 mdp5_crtc->enabled = true;
300} 321}
301 322
@@ -396,7 +417,18 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
396 return; 417 return;
397 418
398 blend_setup(crtc); 419 blend_setup(crtc);
399 crtc_flush_all(crtc); 420
421 /* PP_DONE irq is only used by command mode for now.
422 * It is better to request pending before FLUSH and START trigger
423 * to make sure no pp_done irq missed.
424 * This is safe because no pp_done will happen before SW trigger
425 * in command mode.
426 */
427 if (mdp5_crtc->cmd_mode)
428 request_pp_done_pending(crtc);
429
430 mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
431
400 request_pending(crtc, PENDING_FLIP); 432 request_pending(crtc, PENDING_FLIP);
401} 433}
402 434
@@ -601,6 +633,52 @@ static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
601 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); 633 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
602} 634}
603 635
636static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
637{
638 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
639 pp_done);
640
641 complete(&mdp5_crtc->pp_completion);
642}
643
644static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
645{
646 struct drm_device *dev = crtc->dev;
647 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
648 int ret;
649
650 ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
651 msecs_to_jiffies(50));
652 if (ret == 0)
653 dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
654}
655
656static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
657{
658 struct drm_device *dev = crtc->dev;
659 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
660 int ret;
661
662 /* Should not call this function if crtc is disabled. */
663 if (!mdp5_crtc->ctl)
664 return;
665
666 ret = drm_crtc_vblank_get(crtc);
667 if (ret)
668 return;
669
670 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
671 ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) &
672 mdp5_crtc->flushed_mask) == 0),
673 msecs_to_jiffies(50));
674 if (ret <= 0)
675 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp5_crtc->id);
676
677 mdp5_crtc->flushed_mask = 0;
678
679 drm_crtc_vblank_put(crtc);
680}
681
604uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) 682uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
605{ 683{
606 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 684 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
@@ -622,16 +700,19 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf)
622 700
623 /* now that we know what irq's we want: */ 701 /* now that we know what irq's we want: */
624 mdp5_crtc->err.irqmask = intf2err(intf->num); 702 mdp5_crtc->err.irqmask = intf2err(intf->num);
703 mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
704
705 if ((intf->type == INTF_DSI) &&
706 (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
707 mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
708 mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
709 mdp5_crtc->cmd_mode = true;
710 } else {
711 mdp5_crtc->pp_done.irqmask = 0;
712 mdp5_crtc->pp_done.irq = NULL;
713 mdp5_crtc->cmd_mode = false;
714 }
625 715
626 /* Register command mode Pingpong done as vblank for now,
627 * so that atomic commit should wait for it to finish.
628 * Ideally, in the future, we should take rd_ptr done as vblank,
629 * and let atomic commit wait for pingpong done for commond mode.
630 */
631 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
632 mdp5_crtc->vblank.irqmask = lm2ppdone(lm);
633 else
634 mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
635 mdp_irq_update(&mdp5_kms->base); 716 mdp_irq_update(&mdp5_kms->base);
636 717
637 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf); 718 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
@@ -649,6 +730,16 @@ struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
649 return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl; 730 return WARN_ON(!crtc) ? NULL : mdp5_crtc->ctl;
650} 731}
651 732
733void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
734{
735 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
736
737 if (mdp5_crtc->cmd_mode)
738 mdp5_crtc_wait_for_pp_done(crtc);
739 else
740 mdp5_crtc_wait_for_flush_done(crtc);
741}
742
652/* initialize crtc */ 743/* initialize crtc */
653struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 744struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
654 struct drm_plane *plane, int id) 745 struct drm_plane *plane, int id)
@@ -667,6 +758,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
667 758
668 spin_lock_init(&mdp5_crtc->lm_lock); 759 spin_lock_init(&mdp5_crtc->lm_lock);
669 spin_lock_init(&mdp5_crtc->cursor.lock); 760 spin_lock_init(&mdp5_crtc->cursor.lock);
761 init_completion(&mdp5_crtc->pp_completion);
670 762
671 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; 763 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
672 mdp5_crtc->err.irq = mdp5_crtc_err_irq; 764 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 5488b687c8d1..f2530f224a76 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -392,8 +392,10 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
392 * CTL registers need to be flushed in some circumstances; if that is the 392 * CTL registers need to be flushed in some circumstances; if that is the
393 * case, some trigger bits will be present in both flush mask and 393 * case, some trigger bits will be present in both flush mask and
394 * ctl->pending_ctl_trigger. 394 * ctl->pending_ctl_trigger.
395 *
396 * Return H/W flushed bit mask.
395 */ 397 */
396int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask) 398u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
397{ 399{
398 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; 400 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
399 struct op_mode *pipeline = &ctl->pipeline; 401 struct op_mode *pipeline = &ctl->pipeline;
@@ -424,7 +426,12 @@ int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
424 refill_start_mask(ctl); 426 refill_start_mask(ctl);
425 } 427 }
426 428
427 return 0; 429 return flush_mask;
430}
431
432u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
433{
434 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
428} 435}
429 436
430void mdp5_ctl_release(struct mdp5_ctl *ctl) 437void mdp5_ctl_release(struct mdp5_ctl *ctl)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index 7a62000994a1..4678228c4f14 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -88,7 +88,8 @@ u32 mdp_ctl_flush_mask_cursor(int cursor_id);
88u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); 88u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
89 89
90/* @flush_mask: see CTL flush masks definitions below */ 90/* @flush_mask: see CTL flush masks definitions below */
91int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask); 91u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
92u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
92 93
93void mdp5_ctl_release(struct mdp5_ctl *ctl); 94void mdp5_ctl_release(struct mdp5_ctl *ctl);
94 95
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 1188f4bf1e60..de97c08f3f1f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -36,7 +36,7 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
36 return to_mdp5_kms(to_mdp_kms(priv->kms)); 36 return to_mdp5_kms(to_mdp_kms(priv->kms));
37} 37}
38 38
39#ifdef CONFIG_MSM_BUS_SCALING 39#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
40#include <mach/board.h> 40#include <mach/board.h>
41#include <mach/msm_bus.h> 41#include <mach/msm_bus.h>
42#include <mach/msm_bus_board.h> 42#include <mach/msm_bus_board.h>
@@ -144,10 +144,14 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
144 mode->type, mode->flags); 144 mode->type, mode->flags);
145 145
146 ctrl_pol = 0; 146 ctrl_pol = 0;
147 if (mode->flags & DRM_MODE_FLAG_NHSYNC) 147
148 ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW; 148 /* DSI controller cannot handle active-low sync signals. */
149 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 149 if (mdp5_encoder->intf.type != INTF_DSI) {
150 ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW; 150 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
151 ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
152 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
153 ctrl_pol |= MDP5_INTF_POLARITY_CTL_VSYNC_LOW;
154 }
151 /* probably need to get DATA_EN polarity from panel.. */ 155 /* probably need to get DATA_EN polarity from panel.. */
152 156
153 dtv_hsync_skew = 0; /* get this from panel? */ 157 dtv_hsync_skew = 0; /* get this from panel? */
@@ -304,9 +308,9 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
304 * to use the master's enable signal for the slave encoder. 308 * to use the master's enable signal for the slave encoder.
305 */ 309 */
306 if (intf_num == 1) 310 if (intf_num == 1)
307 data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC; 311 data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
308 else if (intf_num == 2) 312 else if (intf_num == 2)
309 data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC; 313 data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
310 else 314 else
311 return -EINVAL; 315 return -EINVAL;
312 316
@@ -315,9 +319,9 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
315 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 319 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0),
316 MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); 320 MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
317 /* Dumb Panel, Sync mode */ 321 /* Dumb Panel, Sync mode */
318 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0); 322 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0);
319 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data); 323 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data);
320 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1); 324 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1);
321 mdp5_disable(mdp5_kms); 325 mdp5_disable(mdp5_kms);
322 326
323 return 0; 327 return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index bbacf9d2b738..206f758f7d64 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -80,6 +80,12 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
80 mdp5_disable(mdp5_kms); 80 mdp5_disable(mdp5_kms);
81} 81}
82 82
83static void mdp5_wait_for_crtc_commit_done(struct msm_kms *kms,
84 struct drm_crtc *crtc)
85{
86 mdp5_crtc_wait_for_commit_done(crtc);
87}
88
83static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate, 89static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
84 struct drm_encoder *encoder) 90 struct drm_encoder *encoder)
85{ 91{
@@ -141,6 +147,7 @@ static const struct mdp_kms_funcs kms_funcs = {
141 .disable_vblank = mdp5_disable_vblank, 147 .disable_vblank = mdp5_disable_vblank,
142 .prepare_commit = mdp5_prepare_commit, 148 .prepare_commit = mdp5_prepare_commit,
143 .complete_commit = mdp5_complete_commit, 149 .complete_commit = mdp5_complete_commit,
150 .wait_for_crtc_commit_done = mdp5_wait_for_crtc_commit_done,
144 .get_format = mdp_get_format, 151 .get_format = mdp_get_format,
145 .round_pixclk = mdp5_round_pixclk, 152 .round_pixclk = mdp5_round_pixclk,
146 .set_split_display = mdp5_set_split_display, 153 .set_split_display = mdp5_set_split_display,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 2c0de174cc09..e0eb24587c84 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -237,6 +237,7 @@ int mdp5_crtc_get_lm(struct drm_crtc *crtc);
237struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc); 237struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
238void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 238void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
239void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf); 239void mdp5_crtc_set_intf(struct drm_crtc *crtc, struct mdp5_interface *intf);
240void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
240struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 241struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
241 struct drm_plane *plane, int id); 242 struct drm_plane *plane, int id);
242 243
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index a1d35f162c7f..641d036c5bcb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -10,17 +10,17 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2014-12-05 15:34:49)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20908 bytes, from 2014-12-08 16:13:00) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-03-24 22:05:22)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2357 bytes, from 2014-12-08 16:13:00) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2352 bytes, from 2015-04-12 15:02:42)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 27208 bytes, from 2015-01-13 23:56:11) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 35083 bytes, from 2015-04-12 15:04:03)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 22094 bytes, from 2015-05-12 12:45:23)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 26848 bytes, from 2015-01-13 23:55:57) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29012 bytes, from 2015-05-12 12:45:23)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 8253 bytes, from 2014-12-08 16:13:00) 21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-12 12:45:23)
22 22
23Copyright (C) 2013-2014 by the following authors: 23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 24- Rob Clark <robdclark@gmail.com> (robclark)
25 25
26Permission is hereby granted, free of charge, to any person obtaining 26Permission is hereby granted, free of charge, to any person obtaining
@@ -52,7 +52,7 @@ enum mdp_chroma_samp_type {
52 CHROMA_420 = 3, 52 CHROMA_420 = 3,
53}; 53};
54 54
55enum mdp_sspp_fetch_type { 55enum mdp_fetch_type {
56 MDP_PLANE_INTERLEAVED = 0, 56 MDP_PLANE_INTERLEAVED = 0,
57 MDP_PLANE_PLANAR = 1, 57 MDP_PLANE_PLANAR = 1,
58 MDP_PLANE_PSEUDO_PLANAR = 2, 58 MDP_PLANE_PSEUDO_PLANAR = 2,
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index f683433b6727..7b0524dc1872 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -96,6 +96,12 @@ static const struct mdp_format formats[] = {
96 /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */ 96 /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt ... */
97 FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4, 97 FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4,
98 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 98 MDP_PLANE_INTERLEAVED, CHROMA_RGB),
99 FMT(ABGR8888, 8, 8, 8, 8, 2, 0, 1, 3, true, true, 4, 4,
100 MDP_PLANE_INTERLEAVED, CHROMA_RGB),
101 FMT(RGBA8888, 8, 8, 8, 8, 3, 1, 0, 2, true, true, 4, 4,
102 MDP_PLANE_INTERLEAVED, CHROMA_RGB),
103 FMT(BGRA8888, 8, 8, 8, 8, 3, 2, 0, 1, true, true, 4, 4,
104 MDP_PLANE_INTERLEAVED, CHROMA_RGB),
99 FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4, 105 FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4,
100 MDP_PLANE_INTERLEAVED, CHROMA_RGB), 106 MDP_PLANE_INTERLEAVED, CHROMA_RGB),
101 FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3, 107 FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3,
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 5ae4039d68e4..2d3428cb74d0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -88,7 +88,7 @@ struct mdp_format {
88 uint8_t unpack[4]; 88 uint8_t unpack[4];
89 bool alpha_enable, unpack_tight; 89 bool alpha_enable, unpack_tight;
90 uint8_t cpp, unpack_count; 90 uint8_t cpp, unpack_count;
91 enum mdp_sspp_fetch_type fetch_type; 91 enum mdp_fetch_type fetch_type;
92 enum mdp_chroma_samp_type chroma_sample; 92 enum mdp_chroma_samp_type chroma_sample;
93}; 93};
94#define to_mdp_format(x) container_of(x, struct mdp_format, base) 94#define to_mdp_format(x) container_of(x, struct mdp_format, base)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 5b192128cda2..1b22d8bfe142 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -84,6 +84,33 @@ static void commit_destroy(struct msm_commit *c)
84 kfree(c); 84 kfree(c);
85} 85}
86 86
87static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
88 struct drm_atomic_state *old_state)
89{
90 struct drm_crtc *crtc;
91 struct msm_drm_private *priv = old_state->dev->dev_private;
92 struct msm_kms *kms = priv->kms;
93 int ncrtcs = old_state->dev->mode_config.num_crtc;
94 int i;
95
96 for (i = 0; i < ncrtcs; i++) {
97 crtc = old_state->crtcs[i];
98
99 if (!crtc)
100 continue;
101
102 if (!crtc->state->enable)
103 continue;
104
105 /* Legacy cursor ioctls are completely unsynced, and userspace
106 * relies on that (by doing tons of cursor updates). */
107 if (old_state->legacy_cursor_update)
108 continue;
109
110 kms->funcs->wait_for_crtc_commit_done(kms, crtc);
111 }
112}
113
87/* The (potentially) asynchronous part of the commit. At this point 114/* The (potentially) asynchronous part of the commit. At this point
88 * nothing can fail short of armageddon. 115 * nothing can fail short of armageddon.
89 */ 116 */
@@ -115,7 +142,7 @@ static void complete_commit(struct msm_commit *c)
115 * not be critical path) 142 * not be critical path)
116 */ 143 */
117 144
118 drm_atomic_helper_wait_for_vblanks(dev, state); 145 msm_atomic_wait_for_commit_done(dev, state);
119 146
120 drm_atomic_helper_cleanup_planes(dev, state); 147 drm_atomic_helper_cleanup_planes(dev, state);
121 148
@@ -139,7 +166,6 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
139 c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ)); 166 c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
140} 167}
141 168
142
143int msm_atomic_check(struct drm_device *dev, 169int msm_atomic_check(struct drm_device *dev,
144 struct drm_atomic_state *state) 170 struct drm_atomic_state *state)
145{ 171{
@@ -178,7 +204,7 @@ int msm_atomic_commit(struct drm_device *dev,
178{ 204{
179 int nplanes = dev->mode_config.num_total_plane; 205 int nplanes = dev->mode_config.num_total_plane;
180 int ncrtcs = dev->mode_config.num_crtc; 206 int ncrtcs = dev->mode_config.num_crtc;
181 struct timespec timeout; 207 ktime_t timeout;
182 struct msm_commit *c; 208 struct msm_commit *c;
183 int i, ret; 209 int i, ret;
184 210
@@ -187,8 +213,10 @@ int msm_atomic_commit(struct drm_device *dev,
187 return ret; 213 return ret;
188 214
189 c = commit_init(state); 215 c = commit_init(state);
190 if (!c) 216 if (!c) {
191 return -ENOMEM; 217 ret = -ENOMEM;
218 goto error;
219 }
192 220
193 /* 221 /*
194 * Figure out what crtcs we have: 222 * Figure out what crtcs we have:
@@ -221,7 +249,7 @@ int msm_atomic_commit(struct drm_device *dev,
221 ret = start_atomic(dev->dev_private, c->crtc_mask); 249 ret = start_atomic(dev->dev_private, c->crtc_mask);
222 if (ret) { 250 if (ret) {
223 kfree(c); 251 kfree(c);
224 return ret; 252 goto error;
225 } 253 }
226 254
227 /* 255 /*
@@ -253,7 +281,7 @@ int msm_atomic_commit(struct drm_device *dev,
253 return 0; 281 return 0;
254 } 282 }
255 283
256 jiffies_to_timespec(jiffies + msecs_to_jiffies(1000), &timeout); 284 timeout = ktime_add_ms(ktime_get(), 1000);
257 285
258 ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); 286 ret = msm_wait_fence_interruptable(dev, c->fence, &timeout);
259 if (ret) { 287 if (ret) {
@@ -265,4 +293,8 @@ int msm_atomic_commit(struct drm_device *dev,
265 complete_commit(c); 293 complete_commit(c);
266 294
267 return 0; 295 return 0;
296
297error:
298 drm_atomic_helper_cleanup_planes(dev, state);
299 return ret;
268} 300}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c80a6bee2b18..b7ef56ed8d1c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -638,7 +638,7 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
638 */ 638 */
639 639
640int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 640int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
641 struct timespec *timeout) 641 ktime_t *timeout)
642{ 642{
643 struct msm_drm_private *priv = dev->dev_private; 643 struct msm_drm_private *priv = dev->dev_private;
644 int ret; 644 int ret;
@@ -656,14 +656,16 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
656 /* no-wait: */ 656 /* no-wait: */
657 ret = fence_completed(dev, fence) ? 0 : -EBUSY; 657 ret = fence_completed(dev, fence) ? 0 : -EBUSY;
658 } else { 658 } else {
659 unsigned long timeout_jiffies = timespec_to_jiffies(timeout); 659 ktime_t now = ktime_get();
660 unsigned long start_jiffies = jiffies;
661 unsigned long remaining_jiffies; 660 unsigned long remaining_jiffies;
662 661
663 if (time_after(start_jiffies, timeout_jiffies)) 662 if (ktime_compare(*timeout, now) < 0) {
664 remaining_jiffies = 0; 663 remaining_jiffies = 0;
665 else 664 } else {
666 remaining_jiffies = timeout_jiffies - start_jiffies; 665 ktime_t rem = ktime_sub(*timeout, now);
666 struct timespec ts = ktime_to_timespec(rem);
667 remaining_jiffies = timespec_to_jiffies(&ts);
668 }
667 669
668 ret = wait_event_interruptible_timeout(priv->fence_event, 670 ret = wait_event_interruptible_timeout(priv->fence_event,
669 fence_completed(dev, fence), 671 fence_completed(dev, fence),
@@ -772,13 +774,17 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
772 args->flags, &args->handle); 774 args->flags, &args->handle);
773} 775}
774 776
775#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec }) 777static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
778{
779 return ktime_set(timeout.tv_sec, timeout.tv_nsec);
780}
776 781
777static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data, 782static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
778 struct drm_file *file) 783 struct drm_file *file)
779{ 784{
780 struct drm_msm_gem_cpu_prep *args = data; 785 struct drm_msm_gem_cpu_prep *args = data;
781 struct drm_gem_object *obj; 786 struct drm_gem_object *obj;
787 ktime_t timeout = to_ktime(args->timeout);
782 int ret; 788 int ret;
783 789
784 if (args->op & ~MSM_PREP_FLAGS) { 790 if (args->op & ~MSM_PREP_FLAGS) {
@@ -790,7 +796,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
790 if (!obj) 796 if (!obj)
791 return -ENOENT; 797 return -ENOENT;
792 798
793 ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout)); 799 ret = msm_gem_cpu_prep(obj, args->op, &timeout);
794 800
795 drm_gem_object_unreference_unlocked(obj); 801 drm_gem_object_unreference_unlocked(obj);
796 802
@@ -840,14 +846,14 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
840 struct drm_file *file) 846 struct drm_file *file)
841{ 847{
842 struct drm_msm_wait_fence *args = data; 848 struct drm_msm_wait_fence *args = data;
849 ktime_t timeout = to_ktime(args->timeout);
843 850
844 if (args->pad) { 851 if (args->pad) {
845 DRM_ERROR("invalid pad: %08x\n", args->pad); 852 DRM_ERROR("invalid pad: %08x\n", args->pad);
846 return -EINVAL; 853 return -EINVAL;
847 } 854 }
848 855
849 return msm_wait_fence_interruptable(dev, args->fence, 856 return msm_wait_fence_interruptable(dev, args->fence, &timeout);
850 &TS(args->timeout));
851} 857}
852 858
853static const struct drm_ioctl_desc msm_ioctls[] = { 859static const struct drm_ioctl_desc msm_ioctls[] = {
@@ -885,6 +891,7 @@ static struct drm_driver msm_driver = {
885 DRIVER_GEM | 891 DRIVER_GEM |
886 DRIVER_PRIME | 892 DRIVER_PRIME |
887 DRIVER_RENDER | 893 DRIVER_RENDER |
894 DRIVER_ATOMIC |
888 DRIVER_MODESET, 895 DRIVER_MODESET,
889 .load = msm_load, 896 .load = msm_load,
890 .unload = msm_unload, 897 .unload = msm_unload,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 04db4bd1b5b6..e7c5ea125d45 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -165,7 +165,7 @@ int msm_atomic_commit(struct drm_device *dev,
165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
166 166
167int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 167int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
168 struct timespec *timeout); 168 ktime_t *timeout);
169int msm_queue_fence_cb(struct drm_device *dev, 169int msm_queue_fence_cb(struct drm_device *dev,
170 struct msm_fence_cb *cb, uint32_t fence); 170 struct msm_fence_cb *cb, uint32_t fence);
171void msm_update_fence(struct drm_device *dev, uint32_t fence); 171void msm_update_fence(struct drm_device *dev, uint32_t fence);
@@ -205,7 +205,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
205 struct msm_gpu *gpu, bool write, uint32_t fence); 205 struct msm_gpu *gpu, bool write, uint32_t fence);
206void msm_gem_move_to_inactive(struct drm_gem_object *obj); 206void msm_gem_move_to_inactive(struct drm_gem_object *obj);
207int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, 207int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
208 struct timespec *timeout); 208 ktime_t *timeout);
209int msm_gem_cpu_fini(struct drm_gem_object *obj); 209int msm_gem_cpu_fini(struct drm_gem_object *obj);
210void msm_gem_free_object(struct drm_gem_object *obj); 210void msm_gem_free_object(struct drm_gem_object *obj);
211int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, 211int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 52839769eb6c..f211b80e3a1e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -448,8 +448,7 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
448 list_add_tail(&msm_obj->mm_list, &priv->inactive_list); 448 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
449} 449}
450 450
451int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, 451int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
452 struct timespec *timeout)
453{ 452{
454 struct drm_device *dev = obj->dev; 453 struct drm_device *dev = obj->dev;
455 struct msm_gem_object *msm_obj = to_msm_bo(obj); 454 struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -540,6 +539,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
540 if (msm_obj->pages) 539 if (msm_obj->pages)
541 drm_free_large(msm_obj->pages); 540 drm_free_large(msm_obj->pages);
542 541
542 drm_prime_gem_destroy(obj, msm_obj->sgt);
543 } else { 543 } else {
544 vunmap(msm_obj->vaddr); 544 vunmap(msm_obj->vaddr);
545 put_pages(obj); 545 put_pages(obj);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 85d481e29276..6fc59bfeedeb 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -96,6 +96,7 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
96struct msm_gem_submit { 96struct msm_gem_submit {
97 struct drm_device *dev; 97 struct drm_device *dev;
98 struct msm_gpu *gpu; 98 struct msm_gpu *gpu;
99 struct list_head node; /* node in gpu submit_list */
99 struct list_head bo_list; 100 struct list_head bo_list;
100 struct ww_acquire_ctx ticket; 101 struct ww_acquire_ctx ticket;
101 uint32_t fence; 102 uint32_t fence;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index cd0554f68316..6d7cd3fe21e7 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -314,7 +314,6 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
314 } 314 }
315 315
316 ww_acquire_fini(&submit->ticket); 316 ww_acquire_fini(&submit->ticket);
317 kfree(submit);
318} 317}
319 318
320int msm_ioctl_gem_submit(struct drm_device *dev, void *data, 319int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4a0dce587745..8f70d9248ac5 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -24,7 +24,7 @@
24 * Power Management: 24 * Power Management:
25 */ 25 */
26 26
27#ifdef CONFIG_MSM_BUS_SCALING 27#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
28#include <mach/board.h> 28#include <mach/board.h>
29static void bs_init(struct msm_gpu *gpu) 29static void bs_init(struct msm_gpu *gpu)
30{ 30{
@@ -265,6 +265,8 @@ static void inactive_start(struct msm_gpu *gpu)
265 * Hangcheck detection for locked gpu: 265 * Hangcheck detection for locked gpu:
266 */ 266 */
267 267
268static void retire_submits(struct msm_gpu *gpu, uint32_t fence);
269
268static void recover_worker(struct work_struct *work) 270static void recover_worker(struct work_struct *work)
269{ 271{
270 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); 272 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
@@ -274,8 +276,19 @@ static void recover_worker(struct work_struct *work)
274 276
275 mutex_lock(&dev->struct_mutex); 277 mutex_lock(&dev->struct_mutex);
276 if (msm_gpu_active(gpu)) { 278 if (msm_gpu_active(gpu)) {
279 struct msm_gem_submit *submit;
280 uint32_t fence = gpu->funcs->last_fence(gpu);
281
282 /* retire completed submits, plus the one that hung: */
283 retire_submits(gpu, fence + 1);
284
277 inactive_cancel(gpu); 285 inactive_cancel(gpu);
278 gpu->funcs->recover(gpu); 286 gpu->funcs->recover(gpu);
287
288 /* replay the remaining submits after the one that hung: */
289 list_for_each_entry(submit, &gpu->submit_list, node) {
290 gpu->funcs->submit(gpu, submit, NULL);
291 }
279 } 292 }
280 mutex_unlock(&dev->struct_mutex); 293 mutex_unlock(&dev->struct_mutex);
281 294
@@ -418,6 +431,27 @@ out:
418 * Cmdstream submission/retirement: 431 * Cmdstream submission/retirement:
419 */ 432 */
420 433
434static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
435{
436 struct drm_device *dev = gpu->dev;
437
438 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
439
440 while (!list_empty(&gpu->submit_list)) {
441 struct msm_gem_submit *submit;
442
443 submit = list_first_entry(&gpu->submit_list,
444 struct msm_gem_submit, node);
445
446 if (submit->fence <= fence) {
447 list_del(&submit->node);
448 kfree(submit);
449 } else {
450 break;
451 }
452 }
453}
454
421static void retire_worker(struct work_struct *work) 455static void retire_worker(struct work_struct *work)
422{ 456{
423 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); 457 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
@@ -428,6 +462,8 @@ static void retire_worker(struct work_struct *work)
428 462
429 mutex_lock(&dev->struct_mutex); 463 mutex_lock(&dev->struct_mutex);
430 464
465 retire_submits(gpu, fence);
466
431 while (!list_empty(&gpu->active_list)) { 467 while (!list_empty(&gpu->active_list)) {
432 struct msm_gem_object *obj; 468 struct msm_gem_object *obj;
433 469
@@ -467,21 +503,22 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
467 struct msm_drm_private *priv = dev->dev_private; 503 struct msm_drm_private *priv = dev->dev_private;
468 int i, ret; 504 int i, ret;
469 505
506 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
507
470 submit->fence = ++priv->next_fence; 508 submit->fence = ++priv->next_fence;
471 509
472 gpu->submitted_fence = submit->fence; 510 gpu->submitted_fence = submit->fence;
473 511
474 inactive_cancel(gpu); 512 inactive_cancel(gpu);
475 513
514 list_add_tail(&submit->node, &gpu->submit_list);
515
476 msm_rd_dump_submit(submit); 516 msm_rd_dump_submit(submit);
477 517
478 gpu->submitted_fence = submit->fence; 518 gpu->submitted_fence = submit->fence;
479 519
480 update_sw_cntrs(gpu); 520 update_sw_cntrs(gpu);
481 521
482 ret = gpu->funcs->submit(gpu, submit, ctx);
483 priv->lastctx = ctx;
484
485 for (i = 0; i < submit->nr_bos; i++) { 522 for (i = 0; i < submit->nr_bos; i++) {
486 struct msm_gem_object *msm_obj = submit->bos[i].obj; 523 struct msm_gem_object *msm_obj = submit->bos[i].obj;
487 524
@@ -505,6 +542,10 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
505 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 542 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
506 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); 543 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
507 } 544 }
545
546 ret = gpu->funcs->submit(gpu, submit, ctx);
547 priv->lastctx = ctx;
548
508 hangcheck_timer_reset(gpu); 549 hangcheck_timer_reset(gpu);
509 550
510 return ret; 551 return ret;
@@ -522,6 +563,7 @@ static irqreturn_t irq_handler(int irq, void *data)
522 563
523static const char *clk_names[] = { 564static const char *clk_names[] = {
524 "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk", 565 "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
566 "alt_mem_iface_clk",
525}; 567};
526 568
527int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 569int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
@@ -544,6 +586,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
544 INIT_WORK(&gpu->inactive_work, inactive_worker); 586 INIT_WORK(&gpu->inactive_work, inactive_worker);
545 INIT_WORK(&gpu->recover_work, recover_worker); 587 INIT_WORK(&gpu->recover_work, recover_worker);
546 588
589 INIT_LIST_HEAD(&gpu->submit_list);
590
547 setup_timer(&gpu->inactive_timer, inactive_handler, 591 setup_timer(&gpu->inactive_timer, inactive_handler,
548 (unsigned long)gpu); 592 (unsigned long)gpu);
549 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, 593 setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index fd1e4b4a6d40..2bbe85a3d6f6 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -100,10 +100,10 @@ struct msm_gpu {
100 100
101 /* Power Control: */ 101 /* Power Control: */
102 struct regulator *gpu_reg, *gpu_cx; 102 struct regulator *gpu_reg, *gpu_cx;
103 struct clk *ebi1_clk, *grp_clks[5]; 103 struct clk *ebi1_clk, *grp_clks[6];
104 uint32_t fast_rate, slow_rate, bus_freq; 104 uint32_t fast_rate, slow_rate, bus_freq;
105 105
106#ifdef CONFIG_MSM_BUS_SCALING 106#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
107 struct msm_bus_scale_pdata *bus_scale_table; 107 struct msm_bus_scale_pdata *bus_scale_table;
108 uint32_t bsc; 108 uint32_t bsc;
109#endif 109#endif
@@ -119,6 +119,8 @@ struct msm_gpu {
119 struct timer_list hangcheck_timer; 119 struct timer_list hangcheck_timer;
120 uint32_t hangcheck_fence; 120 uint32_t hangcheck_fence;
121 struct work_struct recover_work; 121 struct work_struct recover_work;
122
123 struct list_head submit_list;
122}; 124};
123 125
124static inline bool msm_gpu_active(struct msm_gpu *gpu) 126static inline bool msm_gpu_active(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index a9f17bdb4530..9bcabaada179 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -43,6 +43,9 @@ struct msm_kms_funcs {
43 /* modeset, bracketing atomic_commit(): */ 43 /* modeset, bracketing atomic_commit(): */
44 void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state); 44 void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
45 void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state); 45 void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
46 /* functions to wait for atomic commit completed on each CRTC */
47 void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
48 struct drm_crtc *crtc);
46 /* misc: */ 49 /* misc: */
47 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format); 50 const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
48 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate, 51 long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 89049335b738..649024d4daf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/console.h> 25#include <linux/console.h>
26#include <linux/delay.h>
26#include <linux/module.h> 27#include <linux/module.h>
27#include <linux/pci.h> 28#include <linux/pci.h>
28#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
@@ -666,6 +667,7 @@ nouveau_pmops_suspend(struct device *dev)
666 pci_save_state(pdev); 667 pci_save_state(pdev);
667 pci_disable_device(pdev); 668 pci_disable_device(pdev);
668 pci_set_power_state(pdev, PCI_D3hot); 669 pci_set_power_state(pdev, PCI_D3hot);
670 udelay(200);
669 return 0; 671 return 0;
670} 672}
671 673
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0e690bf19fc9..af1ee517f372 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -555,10 +555,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
555static inline void 555static inline void
556u_free(void *addr) 556u_free(void *addr)
557{ 557{
558 if (!is_vmalloc_addr(addr)) 558 kvfree(addr);
559 kfree(addr);
560 else
561 vfree(addr);
562} 559}
563 560
564static inline void * 561static inline void *
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index 08cf2c588c3d..9c27bded4c09 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -367,18 +367,18 @@ static int ld9040_remove(struct spi_device *spi)
367 return 0; 367 return 0;
368} 368}
369 369
370static struct of_device_id ld9040_of_match[] = { 370static const struct of_device_id ld9040_of_match[] = {
371 { .compatible = "samsung,ld9040" }, 371 { .compatible = "samsung,ld9040" },
372 { } 372 { }
373}; 373};
374MODULE_DEVICE_TABLE(of, ld9040_of_match); 374MODULE_DEVICE_TABLE(of, ld9040_of_match);
375 375
376static struct spi_driver ld9040_driver = { 376static struct spi_driver ld9040_driver = {
377 .probe = ld9040_probe, 377 .probe = ld9040_probe,
378 .remove = ld9040_remove, 378 .remove = ld9040_remove,
379 .driver = { 379 .driver = {
380 .name = "ld9040", 380 .name = "ld9040",
381 .owner = THIS_MODULE, 381 .owner = THIS_MODULE,
382 .of_match_table = ld9040_of_match, 382 .of_match_table = ld9040_of_match,
383 }, 383 },
384}; 384};
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index 144b2733e3d7..30051108eec4 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -1041,7 +1041,7 @@ static int s6e8aa0_remove(struct mipi_dsi_device *dsi)
1041 return 0; 1041 return 0;
1042} 1042}
1043 1043
1044static struct of_device_id s6e8aa0_of_match[] = { 1044static const struct of_device_id s6e8aa0_of_match[] = {
1045 { .compatible = "samsung,s6e8aa0" }, 1045 { .compatible = "samsung,s6e8aa0" },
1046 { } 1046 { }
1047}; 1047};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 30904a9b2a4c..f94201b6e882 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -731,6 +731,30 @@ static const struct panel_desc hannstar_hsd070pww1 = {
731 }, 731 },
732}; 732};
733 733
734static const struct display_timing hannstar_hsd100pxn1_timing = {
735 .pixelclock = { 55000000, 65000000, 75000000 },
736 .hactive = { 1024, 1024, 1024 },
737 .hfront_porch = { 40, 40, 40 },
738 .hback_porch = { 220, 220, 220 },
739 .hsync_len = { 20, 60, 100 },
740 .vactive = { 768, 768, 768 },
741 .vfront_porch = { 7, 7, 7 },
742 .vback_porch = { 21, 21, 21 },
743 .vsync_len = { 10, 10, 10 },
744 .flags = DISPLAY_FLAGS_DE_HIGH,
745};
746
747static const struct panel_desc hannstar_hsd100pxn1 = {
748 .timings = &hannstar_hsd100pxn1_timing,
749 .num_timings = 1,
750 .bpc = 6,
751 .size = {
752 .width = 203,
753 .height = 152,
754 },
755 .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
756};
757
734static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = { 758static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
735 .clock = 33333, 759 .clock = 33333,
736 .hdisplay = 800, 760 .hdisplay = 800,
@@ -872,6 +896,30 @@ static const struct panel_desc innolux_zj070na_01p = {
872 }, 896 },
873}; 897};
874 898
899static const struct drm_display_mode lg_lb070wv8_mode = {
900 .clock = 33246,
901 .hdisplay = 800,
902 .hsync_start = 800 + 88,
903 .hsync_end = 800 + 88 + 80,
904 .htotal = 800 + 88 + 80 + 88,
905 .vdisplay = 480,
906 .vsync_start = 480 + 10,
907 .vsync_end = 480 + 10 + 25,
908 .vtotal = 480 + 10 + 25 + 10,
909 .vrefresh = 60,
910};
911
912static const struct panel_desc lg_lb070wv8 = {
913 .modes = &lg_lb070wv8_mode,
914 .num_modes = 1,
915 .bpc = 16,
916 .size = {
917 .width = 151,
918 .height = 91,
919 },
920 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
921};
922
875static const struct drm_display_mode lg_lp129qe_mode = { 923static const struct drm_display_mode lg_lp129qe_mode = {
876 .clock = 285250, 924 .clock = 285250,
877 .hdisplay = 2560, 925 .hdisplay = 2560,
@@ -1038,6 +1086,9 @@ static const struct of_device_id platform_of_match[] = {
1038 .compatible = "hannstar,hsd070pww1", 1086 .compatible = "hannstar,hsd070pww1",
1039 .data = &hannstar_hsd070pww1, 1087 .data = &hannstar_hsd070pww1,
1040 }, { 1088 }, {
1089 .compatible = "hannstar,hsd100pxn1",
1090 .data = &hannstar_hsd100pxn1,
1091 }, {
1041 .compatible = "hit,tx23d38vm0caa", 1092 .compatible = "hit,tx23d38vm0caa",
1042 .data = &hitachi_tx23d38vm0caa 1093 .data = &hitachi_tx23d38vm0caa
1043 }, { 1094 }, {
@@ -1056,6 +1107,9 @@ static const struct of_device_id platform_of_match[] = {
1056 .compatible = "innolux,zj070na-01p", 1107 .compatible = "innolux,zj070na-01p",
1057 .data = &innolux_zj070na_01p, 1108 .data = &innolux_zj070na_01p,
1058 }, { 1109 }, {
1110 .compatible = "lg,lb070wv8",
1111 .data = &lg_lb070wv8,
1112 }, {
1059 .compatible = "lg,lp129qe", 1113 .compatible = "lg,lp129qe",
1060 .data = &lg_lp129qe, 1114 .data = &lg_lp129qe,
1061 }, { 1115 }, {
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 92be50c39ffd..ab89eed9ddd9 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -7944,8 +7944,8 @@ typedef struct {
7944typedef struct { 7944typedef struct {
7945 AMD_ACPI_DESCRIPTION_HEADER SHeader; 7945 AMD_ACPI_DESCRIPTION_HEADER SHeader;
7946 UCHAR TableUUID[16]; //0x24 7946 UCHAR TableUUID[16]; //0x24
7947 ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture. 7947 ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure.
7948 ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture. 7948 ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure.
7949 ULONG Reserved[4]; //0x3C 7949 ULONG Reserved[4]; //0x3C
7950}UEFI_ACPI_VFCT; 7950}UEFI_ACPI_VFCT;
7951 7951
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index e597ffc26563..dac78ad24b31 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
580 else 580 else
581 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 581 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
582 582
583 /* if there is no audio, set MINM_OVER_MAXP */
584 if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
585 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
586 if (rdev->family < CHIP_RV770) 583 if (rdev->family < CHIP_RV770)
587 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; 584 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
588 /* use frac fb div on APUs */ 585 /* use frac fb div on APUs */
@@ -1798,9 +1795,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1798 if ((crtc->mode.clock == test_crtc->mode.clock) && 1795 if ((crtc->mode.clock == test_crtc->mode.clock) &&
1799 (adjusted_clock == test_adjusted_clock) && 1796 (adjusted_clock == test_adjusted_clock) &&
1800 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && 1797 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
1801 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) && 1798 (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
1802 (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
1803 drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
1804 return test_radeon_crtc->pll_id; 1799 return test_radeon_crtc->pll_id;
1805 } 1800 }
1806 } 1801 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b0688b0c8908..248953d2fdb7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4604,6 +4604,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
4604 WDOORBELL32(ring->doorbell_index, ring->wptr); 4604 WDOORBELL32(ring->doorbell_index, ring->wptr);
4605} 4605}
4606 4606
4607static void cik_compute_stop(struct radeon_device *rdev,
4608 struct radeon_ring *ring)
4609{
4610 u32 j, tmp;
4611
4612 cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
4613 /* Disable wptr polling. */
4614 tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
4615 tmp &= ~WPTR_POLL_EN;
4616 WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
4617 /* Disable HQD. */
4618 if (RREG32(CP_HQD_ACTIVE) & 1) {
4619 WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
4620 for (j = 0; j < rdev->usec_timeout; j++) {
4621 if (!(RREG32(CP_HQD_ACTIVE) & 1))
4622 break;
4623 udelay(1);
4624 }
4625 WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
4626 WREG32(CP_HQD_PQ_RPTR, 0);
4627 WREG32(CP_HQD_PQ_WPTR, 0);
4628 }
4629 cik_srbm_select(rdev, 0, 0, 0, 0);
4630}
4631
4607/** 4632/**
4608 * cik_cp_compute_enable - enable/disable the compute CP MEs 4633 * cik_cp_compute_enable - enable/disable the compute CP MEs
4609 * 4634 *
@@ -4617,6 +4642,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
4617 if (enable) 4642 if (enable)
4618 WREG32(CP_MEC_CNTL, 0); 4643 WREG32(CP_MEC_CNTL, 0);
4619 else { 4644 else {
4645 /*
4646 * To make hibernation reliable we need to clear compute ring
4647 * configuration before halting the compute ring.
4648 */
4649 mutex_lock(&rdev->srbm_mutex);
4650 cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
4651 cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
4652 mutex_unlock(&rdev->srbm_mutex);
4653
4620 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); 4654 WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
4621 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; 4655 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
4622 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; 4656 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
@@ -7930,23 +7964,27 @@ restart_ih:
7930 case 1: /* D1 vblank/vline */ 7964 case 1: /* D1 vblank/vline */
7931 switch (src_data) { 7965 switch (src_data) {
7932 case 0: /* D1 vblank */ 7966 case 0: /* D1 vblank */
7933 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) { 7967 if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
7934 if (rdev->irq.crtc_vblank_int[0]) { 7968 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7935 drm_handle_vblank(rdev->ddev, 0); 7969
7936 rdev->pm.vblank_sync = true; 7970 if (rdev->irq.crtc_vblank_int[0]) {
7937 wake_up(&rdev->irq.vblank_queue); 7971 drm_handle_vblank(rdev->ddev, 0);
7938 } 7972 rdev->pm.vblank_sync = true;
7939 if (atomic_read(&rdev->irq.pflip[0])) 7973 wake_up(&rdev->irq.vblank_queue);
7940 radeon_crtc_handle_vblank(rdev, 0);
7941 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7942 DRM_DEBUG("IH: D1 vblank\n");
7943 } 7974 }
7975 if (atomic_read(&rdev->irq.pflip[0]))
7976 radeon_crtc_handle_vblank(rdev, 0);
7977 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7978 DRM_DEBUG("IH: D1 vblank\n");
7979
7944 break; 7980 break;
7945 case 1: /* D1 vline */ 7981 case 1: /* D1 vline */
7946 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) { 7982 if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
7947 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT; 7983 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7948 DRM_DEBUG("IH: D1 vline\n"); 7984
7949 } 7985 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
7986 DRM_DEBUG("IH: D1 vline\n");
7987
7950 break; 7988 break;
7951 default: 7989 default:
7952 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 7990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7956,23 +7994,27 @@ restart_ih:
7956 case 2: /* D2 vblank/vline */ 7994 case 2: /* D2 vblank/vline */
7957 switch (src_data) { 7995 switch (src_data) {
7958 case 0: /* D2 vblank */ 7996 case 0: /* D2 vblank */
7959 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 7997 if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
7960 if (rdev->irq.crtc_vblank_int[1]) { 7998 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7961 drm_handle_vblank(rdev->ddev, 1); 7999
7962 rdev->pm.vblank_sync = true; 8000 if (rdev->irq.crtc_vblank_int[1]) {
7963 wake_up(&rdev->irq.vblank_queue); 8001 drm_handle_vblank(rdev->ddev, 1);
7964 } 8002 rdev->pm.vblank_sync = true;
7965 if (atomic_read(&rdev->irq.pflip[1])) 8003 wake_up(&rdev->irq.vblank_queue);
7966 radeon_crtc_handle_vblank(rdev, 1);
7967 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
7968 DRM_DEBUG("IH: D2 vblank\n");
7969 } 8004 }
8005 if (atomic_read(&rdev->irq.pflip[1]))
8006 radeon_crtc_handle_vblank(rdev, 1);
8007 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
8008 DRM_DEBUG("IH: D2 vblank\n");
8009
7970 break; 8010 break;
7971 case 1: /* D2 vline */ 8011 case 1: /* D2 vline */
7972 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 8012 if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
7973 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 8013 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7974 DRM_DEBUG("IH: D2 vline\n"); 8014
7975 } 8015 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
8016 DRM_DEBUG("IH: D2 vline\n");
8017
7976 break; 8018 break;
7977 default: 8019 default:
7978 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8020 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7982,23 +8024,27 @@ restart_ih:
7982 case 3: /* D3 vblank/vline */ 8024 case 3: /* D3 vblank/vline */
7983 switch (src_data) { 8025 switch (src_data) {
7984 case 0: /* D3 vblank */ 8026 case 0: /* D3 vblank */
7985 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 8027 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
7986 if (rdev->irq.crtc_vblank_int[2]) { 8028 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7987 drm_handle_vblank(rdev->ddev, 2); 8029
7988 rdev->pm.vblank_sync = true; 8030 if (rdev->irq.crtc_vblank_int[2]) {
7989 wake_up(&rdev->irq.vblank_queue); 8031 drm_handle_vblank(rdev->ddev, 2);
7990 } 8032 rdev->pm.vblank_sync = true;
7991 if (atomic_read(&rdev->irq.pflip[2])) 8033 wake_up(&rdev->irq.vblank_queue);
7992 radeon_crtc_handle_vblank(rdev, 2);
7993 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
7994 DRM_DEBUG("IH: D3 vblank\n");
7995 } 8034 }
8035 if (atomic_read(&rdev->irq.pflip[2]))
8036 radeon_crtc_handle_vblank(rdev, 2);
8037 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
8038 DRM_DEBUG("IH: D3 vblank\n");
8039
7996 break; 8040 break;
7997 case 1: /* D3 vline */ 8041 case 1: /* D3 vline */
7998 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 8042 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
7999 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 8043 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8000 DRM_DEBUG("IH: D3 vline\n"); 8044
8001 } 8045 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
8046 DRM_DEBUG("IH: D3 vline\n");
8047
8002 break; 8048 break;
8003 default: 8049 default:
8004 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8050 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8008,23 +8054,27 @@ restart_ih:
8008 case 4: /* D4 vblank/vline */ 8054 case 4: /* D4 vblank/vline */
8009 switch (src_data) { 8055 switch (src_data) {
8010 case 0: /* D4 vblank */ 8056 case 0: /* D4 vblank */
8011 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 8057 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
8012 if (rdev->irq.crtc_vblank_int[3]) { 8058 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8013 drm_handle_vblank(rdev->ddev, 3); 8059
8014 rdev->pm.vblank_sync = true; 8060 if (rdev->irq.crtc_vblank_int[3]) {
8015 wake_up(&rdev->irq.vblank_queue); 8061 drm_handle_vblank(rdev->ddev, 3);
8016 } 8062 rdev->pm.vblank_sync = true;
8017 if (atomic_read(&rdev->irq.pflip[3])) 8063 wake_up(&rdev->irq.vblank_queue);
8018 radeon_crtc_handle_vblank(rdev, 3);
8019 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
8020 DRM_DEBUG("IH: D4 vblank\n");
8021 } 8064 }
8065 if (atomic_read(&rdev->irq.pflip[3]))
8066 radeon_crtc_handle_vblank(rdev, 3);
8067 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
8068 DRM_DEBUG("IH: D4 vblank\n");
8069
8022 break; 8070 break;
8023 case 1: /* D4 vline */ 8071 case 1: /* D4 vline */
8024 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 8072 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
8025 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 8073 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8026 DRM_DEBUG("IH: D4 vline\n"); 8074
8027 } 8075 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
8076 DRM_DEBUG("IH: D4 vline\n");
8077
8028 break; 8078 break;
8029 default: 8079 default:
8030 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8080 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8034,23 +8084,27 @@ restart_ih:
8034 case 5: /* D5 vblank/vline */ 8084 case 5: /* D5 vblank/vline */
8035 switch (src_data) { 8085 switch (src_data) {
8036 case 0: /* D5 vblank */ 8086 case 0: /* D5 vblank */
8037 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 8087 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
8038 if (rdev->irq.crtc_vblank_int[4]) { 8088 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8039 drm_handle_vblank(rdev->ddev, 4); 8089
8040 rdev->pm.vblank_sync = true; 8090 if (rdev->irq.crtc_vblank_int[4]) {
8041 wake_up(&rdev->irq.vblank_queue); 8091 drm_handle_vblank(rdev->ddev, 4);
8042 } 8092 rdev->pm.vblank_sync = true;
8043 if (atomic_read(&rdev->irq.pflip[4])) 8093 wake_up(&rdev->irq.vblank_queue);
8044 radeon_crtc_handle_vblank(rdev, 4);
8045 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
8046 DRM_DEBUG("IH: D5 vblank\n");
8047 } 8094 }
8095 if (atomic_read(&rdev->irq.pflip[4]))
8096 radeon_crtc_handle_vblank(rdev, 4);
8097 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
8098 DRM_DEBUG("IH: D5 vblank\n");
8099
8048 break; 8100 break;
8049 case 1: /* D5 vline */ 8101 case 1: /* D5 vline */
8050 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 8102 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
8051 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 8103 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8052 DRM_DEBUG("IH: D5 vline\n"); 8104
8053 } 8105 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
8106 DRM_DEBUG("IH: D5 vline\n");
8107
8054 break; 8108 break;
8055 default: 8109 default:
8056 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8110 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8060,23 +8114,27 @@ restart_ih:
8060 case 6: /* D6 vblank/vline */ 8114 case 6: /* D6 vblank/vline */
8061 switch (src_data) { 8115 switch (src_data) {
8062 case 0: /* D6 vblank */ 8116 case 0: /* D6 vblank */
8063 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 8117 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
8064 if (rdev->irq.crtc_vblank_int[5]) { 8118 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8065 drm_handle_vblank(rdev->ddev, 5); 8119
8066 rdev->pm.vblank_sync = true; 8120 if (rdev->irq.crtc_vblank_int[5]) {
8067 wake_up(&rdev->irq.vblank_queue); 8121 drm_handle_vblank(rdev->ddev, 5);
8068 } 8122 rdev->pm.vblank_sync = true;
8069 if (atomic_read(&rdev->irq.pflip[5])) 8123 wake_up(&rdev->irq.vblank_queue);
8070 radeon_crtc_handle_vblank(rdev, 5);
8071 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
8072 DRM_DEBUG("IH: D6 vblank\n");
8073 } 8124 }
8125 if (atomic_read(&rdev->irq.pflip[5]))
8126 radeon_crtc_handle_vblank(rdev, 5);
8127 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
8128 DRM_DEBUG("IH: D6 vblank\n");
8129
8074 break; 8130 break;
8075 case 1: /* D6 vline */ 8131 case 1: /* D6 vline */
8076 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 8132 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
8077 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 8133 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8078 DRM_DEBUG("IH: D6 vline\n"); 8134
8079 } 8135 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
8136 DRM_DEBUG("IH: D6 vline\n");
8137
8080 break; 8138 break;
8081 default: 8139 default:
8082 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8140 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8096,88 +8154,112 @@ restart_ih:
8096 case 42: /* HPD hotplug */ 8154 case 42: /* HPD hotplug */
8097 switch (src_data) { 8155 switch (src_data) {
8098 case 0: 8156 case 0:
8099 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) { 8157 if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
8100 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT; 8158 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8101 queue_hotplug = true; 8159
8102 DRM_DEBUG("IH: HPD1\n"); 8160 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
8103 } 8161 queue_hotplug = true;
8162 DRM_DEBUG("IH: HPD1\n");
8163
8104 break; 8164 break;
8105 case 1: 8165 case 1:
8106 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) { 8166 if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
8107 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT; 8167 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8108 queue_hotplug = true; 8168
8109 DRM_DEBUG("IH: HPD2\n"); 8169 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
8110 } 8170 queue_hotplug = true;
8171 DRM_DEBUG("IH: HPD2\n");
8172
8111 break; 8173 break;
8112 case 2: 8174 case 2:
8113 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) { 8175 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
8114 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 8176 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8115 queue_hotplug = true; 8177
8116 DRM_DEBUG("IH: HPD3\n"); 8178 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
8117 } 8179 queue_hotplug = true;
8180 DRM_DEBUG("IH: HPD3\n");
8181
8118 break; 8182 break;
8119 case 3: 8183 case 3:
8120 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) { 8184 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
8121 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 8185 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8122 queue_hotplug = true; 8186
8123 DRM_DEBUG("IH: HPD4\n"); 8187 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
8124 } 8188 queue_hotplug = true;
8189 DRM_DEBUG("IH: HPD4\n");
8190
8125 break; 8191 break;
8126 case 4: 8192 case 4:
8127 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) { 8193 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
8128 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 8194 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8129 queue_hotplug = true; 8195
8130 DRM_DEBUG("IH: HPD5\n"); 8196 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
8131 } 8197 queue_hotplug = true;
8198 DRM_DEBUG("IH: HPD5\n");
8199
8132 break; 8200 break;
8133 case 5: 8201 case 5:
8134 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { 8202 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
8135 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 8203 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8136 queue_hotplug = true; 8204
8137 DRM_DEBUG("IH: HPD6\n"); 8205 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
8138 } 8206 queue_hotplug = true;
8207 DRM_DEBUG("IH: HPD6\n");
8208
8139 break; 8209 break;
8140 case 6: 8210 case 6:
8141 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { 8211 if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
8142 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT; 8212 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8143 queue_dp = true; 8213
8144 DRM_DEBUG("IH: HPD_RX 1\n"); 8214 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
8145 } 8215 queue_dp = true;
8216 DRM_DEBUG("IH: HPD_RX 1\n");
8217
8146 break; 8218 break;
8147 case 7: 8219 case 7:
8148 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 8220 if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
8149 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 8221 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8150 queue_dp = true; 8222
8151 DRM_DEBUG("IH: HPD_RX 2\n"); 8223 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
8152 } 8224 queue_dp = true;
8225 DRM_DEBUG("IH: HPD_RX 2\n");
8226
8153 break; 8227 break;
8154 case 8: 8228 case 8:
8155 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 8229 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
8156 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 8230 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8157 queue_dp = true; 8231
8158 DRM_DEBUG("IH: HPD_RX 3\n"); 8232 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
8159 } 8233 queue_dp = true;
8234 DRM_DEBUG("IH: HPD_RX 3\n");
8235
8160 break; 8236 break;
8161 case 9: 8237 case 9:
8162 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 8238 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
8163 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 8239 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8164 queue_dp = true; 8240
8165 DRM_DEBUG("IH: HPD_RX 4\n"); 8241 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
8166 } 8242 queue_dp = true;
8243 DRM_DEBUG("IH: HPD_RX 4\n");
8244
8167 break; 8245 break;
8168 case 10: 8246 case 10:
8169 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 8247 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
8170 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 8248 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8171 queue_dp = true; 8249
8172 DRM_DEBUG("IH: HPD_RX 5\n"); 8250 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
8173 } 8251 queue_dp = true;
8252 DRM_DEBUG("IH: HPD_RX 5\n");
8253
8174 break; 8254 break;
8175 case 11: 8255 case 11:
8176 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 8256 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
8177 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 8257 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8178 queue_dp = true; 8258
8179 DRM_DEBUG("IH: HPD_RX 6\n"); 8259 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
8180 } 8260 queue_dp = true;
8261 DRM_DEBUG("IH: HPD_RX 6\n");
8262
8181 break; 8263 break;
8182 default: 8264 default:
8183 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8265 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index f86eb54e7763..d16f2eebd95e 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
268 } 268 }
269 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; 269 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
270 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; 270 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
271
272 /* FIXME use something else than big hammer but after few days can not
273 * seem to find good combination so reset SDMA blocks as it seems we
274 * do not shut them down properly. This fix hibernation and does not
275 * affect suspend to ram.
276 */
277 WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
278 (void)RREG32(SRBM_SOFT_RESET);
279 udelay(50);
280 WREG32(SRBM_SOFT_RESET, 0);
281 (void)RREG32(SRBM_SOFT_RESET);
271} 282}
272 283
273/** 284/**
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index f04205170b8a..cfa3a84a2af0 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
173 struct drm_device *dev = encoder->dev; 173 struct drm_device *dev = encoder->dev;
174 struct radeon_device *rdev = dev->dev_private; 174 struct radeon_device *rdev = dev->dev_private;
175 175
176 WREG32(HDMI0_ACR_PACKET_CONTROL + offset, 176 WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
177 HDMI0_ACR_SOURCE | /* select SW CTS value */ 177 HDMI0_ACR_SOURCE | /* select SW CTS value */
178 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */ 178 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
179 179
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3a6d483a2c36..0acde1949c18 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@ restart_ih:
4924 return IRQ_NONE; 4924 return IRQ_NONE;
4925 4925
4926 rptr = rdev->ih.rptr; 4926 rptr = rdev->ih.rptr;
4927 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 4927 DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4928 4928
4929 /* Order reading of wptr vs. reading of IH ring data */ 4929 /* Order reading of wptr vs. reading of IH ring data */
4930 rmb(); 4930 rmb();
@@ -4942,23 +4942,27 @@ restart_ih:
4942 case 1: /* D1 vblank/vline */ 4942 case 1: /* D1 vblank/vline */
4943 switch (src_data) { 4943 switch (src_data) {
4944 case 0: /* D1 vblank */ 4944 case 0: /* D1 vblank */
4945 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 4945 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
4946 if (rdev->irq.crtc_vblank_int[0]) { 4946 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4947 drm_handle_vblank(rdev->ddev, 0); 4947
4948 rdev->pm.vblank_sync = true; 4948 if (rdev->irq.crtc_vblank_int[0]) {
4949 wake_up(&rdev->irq.vblank_queue); 4949 drm_handle_vblank(rdev->ddev, 0);
4950 } 4950 rdev->pm.vblank_sync = true;
4951 if (atomic_read(&rdev->irq.pflip[0])) 4951 wake_up(&rdev->irq.vblank_queue);
4952 radeon_crtc_handle_vblank(rdev, 0);
4953 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4954 DRM_DEBUG("IH: D1 vblank\n");
4955 } 4952 }
4953 if (atomic_read(&rdev->irq.pflip[0]))
4954 radeon_crtc_handle_vblank(rdev, 0);
4955 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4956 DRM_DEBUG("IH: D1 vblank\n");
4957
4956 break; 4958 break;
4957 case 1: /* D1 vline */ 4959 case 1: /* D1 vline */
4958 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 4960 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
4959 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4961 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4960 DRM_DEBUG("IH: D1 vline\n"); 4962
4961 } 4963 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4964 DRM_DEBUG("IH: D1 vline\n");
4965
4962 break; 4966 break;
4963 default: 4967 default:
4964 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4968 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4968,23 +4972,27 @@ restart_ih:
4968 case 2: /* D2 vblank/vline */ 4972 case 2: /* D2 vblank/vline */
4969 switch (src_data) { 4973 switch (src_data) {
4970 case 0: /* D2 vblank */ 4974 case 0: /* D2 vblank */
4971 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 4975 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
4972 if (rdev->irq.crtc_vblank_int[1]) { 4976 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4973 drm_handle_vblank(rdev->ddev, 1); 4977
4974 rdev->pm.vblank_sync = true; 4978 if (rdev->irq.crtc_vblank_int[1]) {
4975 wake_up(&rdev->irq.vblank_queue); 4979 drm_handle_vblank(rdev->ddev, 1);
4976 } 4980 rdev->pm.vblank_sync = true;
4977 if (atomic_read(&rdev->irq.pflip[1])) 4981 wake_up(&rdev->irq.vblank_queue);
4978 radeon_crtc_handle_vblank(rdev, 1);
4979 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4980 DRM_DEBUG("IH: D2 vblank\n");
4981 } 4982 }
4983 if (atomic_read(&rdev->irq.pflip[1]))
4984 radeon_crtc_handle_vblank(rdev, 1);
4985 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4986 DRM_DEBUG("IH: D2 vblank\n");
4987
4982 break; 4988 break;
4983 case 1: /* D2 vline */ 4989 case 1: /* D2 vline */
4984 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 4990 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
4985 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 4991 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4986 DRM_DEBUG("IH: D2 vline\n"); 4992
4987 } 4993 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
4994 DRM_DEBUG("IH: D2 vline\n");
4995
4988 break; 4996 break;
4989 default: 4997 default:
4990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4998 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4994,23 +5002,27 @@ restart_ih:
4994 case 3: /* D3 vblank/vline */ 5002 case 3: /* D3 vblank/vline */
4995 switch (src_data) { 5003 switch (src_data) {
4996 case 0: /* D3 vblank */ 5004 case 0: /* D3 vblank */
4997 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 5005 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
4998 if (rdev->irq.crtc_vblank_int[2]) { 5006 DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
4999 drm_handle_vblank(rdev->ddev, 2); 5007
5000 rdev->pm.vblank_sync = true; 5008 if (rdev->irq.crtc_vblank_int[2]) {
5001 wake_up(&rdev->irq.vblank_queue); 5009 drm_handle_vblank(rdev->ddev, 2);
5002 } 5010 rdev->pm.vblank_sync = true;
5003 if (atomic_read(&rdev->irq.pflip[2])) 5011 wake_up(&rdev->irq.vblank_queue);
5004 radeon_crtc_handle_vblank(rdev, 2);
5005 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5006 DRM_DEBUG("IH: D3 vblank\n");
5007 } 5012 }
5013 if (atomic_read(&rdev->irq.pflip[2]))
5014 radeon_crtc_handle_vblank(rdev, 2);
5015 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5016 DRM_DEBUG("IH: D3 vblank\n");
5017
5008 break; 5018 break;
5009 case 1: /* D3 vline */ 5019 case 1: /* D3 vline */
5010 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 5020 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
5011 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 5021 DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
5012 DRM_DEBUG("IH: D3 vline\n"); 5022
5013 } 5023 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
5024 DRM_DEBUG("IH: D3 vline\n");
5025
5014 break; 5026 break;
5015 default: 5027 default:
5016 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5028 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5020,23 +5032,27 @@ restart_ih:
5020 case 4: /* D4 vblank/vline */ 5032 case 4: /* D4 vblank/vline */
5021 switch (src_data) { 5033 switch (src_data) {
5022 case 0: /* D4 vblank */ 5034 case 0: /* D4 vblank */
5023 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 5035 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
5024 if (rdev->irq.crtc_vblank_int[3]) { 5036 DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
5025 drm_handle_vblank(rdev->ddev, 3); 5037
5026 rdev->pm.vblank_sync = true; 5038 if (rdev->irq.crtc_vblank_int[3]) {
5027 wake_up(&rdev->irq.vblank_queue); 5039 drm_handle_vblank(rdev->ddev, 3);
5028 } 5040 rdev->pm.vblank_sync = true;
5029 if (atomic_read(&rdev->irq.pflip[3])) 5041 wake_up(&rdev->irq.vblank_queue);
5030 radeon_crtc_handle_vblank(rdev, 3);
5031 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5032 DRM_DEBUG("IH: D4 vblank\n");
5033 } 5042 }
5043 if (atomic_read(&rdev->irq.pflip[3]))
5044 radeon_crtc_handle_vblank(rdev, 3);
5045 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5046 DRM_DEBUG("IH: D4 vblank\n");
5047
5034 break; 5048 break;
5035 case 1: /* D4 vline */ 5049 case 1: /* D4 vline */
5036 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 5050 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
5037 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 5051 DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
5038 DRM_DEBUG("IH: D4 vline\n"); 5052
5039 } 5053 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
5054 DRM_DEBUG("IH: D4 vline\n");
5055
5040 break; 5056 break;
5041 default: 5057 default:
5042 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5058 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5046,23 +5062,27 @@ restart_ih:
5046 case 5: /* D5 vblank/vline */ 5062 case 5: /* D5 vblank/vline */
5047 switch (src_data) { 5063 switch (src_data) {
5048 case 0: /* D5 vblank */ 5064 case 0: /* D5 vblank */
5049 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 5065 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
5050 if (rdev->irq.crtc_vblank_int[4]) { 5066 DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
5051 drm_handle_vblank(rdev->ddev, 4); 5067
5052 rdev->pm.vblank_sync = true; 5068 if (rdev->irq.crtc_vblank_int[4]) {
5053 wake_up(&rdev->irq.vblank_queue); 5069 drm_handle_vblank(rdev->ddev, 4);
5054 } 5070 rdev->pm.vblank_sync = true;
5055 if (atomic_read(&rdev->irq.pflip[4])) 5071 wake_up(&rdev->irq.vblank_queue);
5056 radeon_crtc_handle_vblank(rdev, 4);
5057 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5058 DRM_DEBUG("IH: D5 vblank\n");
5059 } 5072 }
5073 if (atomic_read(&rdev->irq.pflip[4]))
5074 radeon_crtc_handle_vblank(rdev, 4);
5075 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5076 DRM_DEBUG("IH: D5 vblank\n");
5077
5060 break; 5078 break;
5061 case 1: /* D5 vline */ 5079 case 1: /* D5 vline */
5062 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 5080 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
5063 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 5081 DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
5064 DRM_DEBUG("IH: D5 vline\n"); 5082
5065 } 5083 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
5084 DRM_DEBUG("IH: D5 vline\n");
5085
5066 break; 5086 break;
5067 default: 5087 default:
5068 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5088 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5072,23 +5092,27 @@ restart_ih:
5072 case 6: /* D6 vblank/vline */ 5092 case 6: /* D6 vblank/vline */
5073 switch (src_data) { 5093 switch (src_data) {
5074 case 0: /* D6 vblank */ 5094 case 0: /* D6 vblank */
5075 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 5095 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
5076 if (rdev->irq.crtc_vblank_int[5]) { 5096 DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
5077 drm_handle_vblank(rdev->ddev, 5); 5097
5078 rdev->pm.vblank_sync = true; 5098 if (rdev->irq.crtc_vblank_int[5]) {
5079 wake_up(&rdev->irq.vblank_queue); 5099 drm_handle_vblank(rdev->ddev, 5);
5080 } 5100 rdev->pm.vblank_sync = true;
5081 if (atomic_read(&rdev->irq.pflip[5])) 5101 wake_up(&rdev->irq.vblank_queue);
5082 radeon_crtc_handle_vblank(rdev, 5);
5083 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5084 DRM_DEBUG("IH: D6 vblank\n");
5085 } 5102 }
5103 if (atomic_read(&rdev->irq.pflip[5]))
5104 radeon_crtc_handle_vblank(rdev, 5);
5105 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5106 DRM_DEBUG("IH: D6 vblank\n");
5107
5086 break; 5108 break;
5087 case 1: /* D6 vline */ 5109 case 1: /* D6 vline */
5088 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 5110 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
5089 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 5111 DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
5090 DRM_DEBUG("IH: D6 vline\n"); 5112
5091 } 5113 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
5114 DRM_DEBUG("IH: D6 vline\n");
5115
5092 break; 5116 break;
5093 default: 5117 default:
5094 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5118 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5108,88 +5132,100 @@ restart_ih:
5108 case 42: /* HPD hotplug */ 5132 case 42: /* HPD hotplug */
5109 switch (src_data) { 5133 switch (src_data) {
5110 case 0: 5134 case 0:
5111 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 5135 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
5112 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 5136 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5113 queue_hotplug = true; 5137
5114 DRM_DEBUG("IH: HPD1\n"); 5138 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
5115 } 5139 queue_hotplug = true;
5140 DRM_DEBUG("IH: HPD1\n");
5116 break; 5141 break;
5117 case 1: 5142 case 1:
5118 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 5143 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
5119 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 5144 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5120 queue_hotplug = true; 5145
5121 DRM_DEBUG("IH: HPD2\n"); 5146 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
5122 } 5147 queue_hotplug = true;
5148 DRM_DEBUG("IH: HPD2\n");
5123 break; 5149 break;
5124 case 2: 5150 case 2:
5125 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 5151 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
5126 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 5152 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5127 queue_hotplug = true; 5153
5128 DRM_DEBUG("IH: HPD3\n"); 5154 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
5129 } 5155 queue_hotplug = true;
5156 DRM_DEBUG("IH: HPD3\n");
5130 break; 5157 break;
5131 case 3: 5158 case 3:
5132 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 5159 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
5133 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 5160 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5134 queue_hotplug = true; 5161
5135 DRM_DEBUG("IH: HPD4\n"); 5162 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
5136 } 5163 queue_hotplug = true;
5164 DRM_DEBUG("IH: HPD4\n");
5137 break; 5165 break;
5138 case 4: 5166 case 4:
5139 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 5167 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
5140 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 5168 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5141 queue_hotplug = true; 5169
5142 DRM_DEBUG("IH: HPD5\n"); 5170 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
5143 } 5171 queue_hotplug = true;
5172 DRM_DEBUG("IH: HPD5\n");
5144 break; 5173 break;
5145 case 5: 5174 case 5:
5146 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 5175 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
5147 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 5176 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5148 queue_hotplug = true; 5177
5149 DRM_DEBUG("IH: HPD6\n"); 5178 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
5150 } 5179 queue_hotplug = true;
5180 DRM_DEBUG("IH: HPD6\n");
5151 break; 5181 break;
5152 case 6: 5182 case 6:
5153 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { 5183 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
5154 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; 5184 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5155 queue_dp = true; 5185
5156 DRM_DEBUG("IH: HPD_RX 1\n"); 5186 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
5157 } 5187 queue_dp = true;
5188 DRM_DEBUG("IH: HPD_RX 1\n");
5158 break; 5189 break;
5159 case 7: 5190 case 7:
5160 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 5191 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
5161 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 5192 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5162 queue_dp = true; 5193
5163 DRM_DEBUG("IH: HPD_RX 2\n"); 5194 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
5164 } 5195 queue_dp = true;
5196 DRM_DEBUG("IH: HPD_RX 2\n");
5165 break; 5197 break;
5166 case 8: 5198 case 8:
5167 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 5199 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
5168 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 5200 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5169 queue_dp = true; 5201
5170 DRM_DEBUG("IH: HPD_RX 3\n"); 5202 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
5171 } 5203 queue_dp = true;
5204 DRM_DEBUG("IH: HPD_RX 3\n");
5172 break; 5205 break;
5173 case 9: 5206 case 9:
5174 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 5207 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
5175 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 5208 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5176 queue_dp = true; 5209
5177 DRM_DEBUG("IH: HPD_RX 4\n"); 5210 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
5178 } 5211 queue_dp = true;
5212 DRM_DEBUG("IH: HPD_RX 4\n");
5179 break; 5213 break;
5180 case 10: 5214 case 10:
5181 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 5215 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
5182 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 5216 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5183 queue_dp = true; 5217
5184 DRM_DEBUG("IH: HPD_RX 5\n"); 5218 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
5185 } 5219 queue_dp = true;
5220 DRM_DEBUG("IH: HPD_RX 5\n");
5186 break; 5221 break;
5187 case 11: 5222 case 11:
5188 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 5223 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
5189 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 5224 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5190 queue_dp = true; 5225
5191 DRM_DEBUG("IH: HPD_RX 6\n"); 5226 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
5192 } 5227 queue_dp = true;
5228 DRM_DEBUG("IH: HPD_RX 6\n");
5193 break; 5229 break;
5194 default: 5230 default:
5195 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5231 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5199,46 +5235,52 @@ restart_ih:
5199 case 44: /* hdmi */ 5235 case 44: /* hdmi */
5200 switch (src_data) { 5236 switch (src_data) {
5201 case 0: 5237 case 0:
5202 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { 5238 if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
5203 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; 5239 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5204 queue_hdmi = true; 5240
5205 DRM_DEBUG("IH: HDMI0\n"); 5241 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
5206 } 5242 queue_hdmi = true;
5243 DRM_DEBUG("IH: HDMI0\n");
5207 break; 5244 break;
5208 case 1: 5245 case 1:
5209 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { 5246 if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
5210 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; 5247 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5211 queue_hdmi = true; 5248
5212 DRM_DEBUG("IH: HDMI1\n"); 5249 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
5213 } 5250 queue_hdmi = true;
5251 DRM_DEBUG("IH: HDMI1\n");
5214 break; 5252 break;
5215 case 2: 5253 case 2:
5216 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { 5254 if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
5217 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; 5255 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5218 queue_hdmi = true; 5256
5219 DRM_DEBUG("IH: HDMI2\n"); 5257 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
5220 } 5258 queue_hdmi = true;
5259 DRM_DEBUG("IH: HDMI2\n");
5221 break; 5260 break;
5222 case 3: 5261 case 3:
5223 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { 5262 if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
5224 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; 5263 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5225 queue_hdmi = true; 5264
5226 DRM_DEBUG("IH: HDMI3\n"); 5265 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
5227 } 5266 queue_hdmi = true;
5267 DRM_DEBUG("IH: HDMI3\n");
5228 break; 5268 break;
5229 case 4: 5269 case 4:
5230 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { 5270 if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
5231 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; 5271 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5232 queue_hdmi = true; 5272
5233 DRM_DEBUG("IH: HDMI4\n"); 5273 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
5234 } 5274 queue_hdmi = true;
5275 DRM_DEBUG("IH: HDMI4\n");
5235 break; 5276 break;
5236 case 5: 5277 case 5:
5237 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { 5278 if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
5238 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; 5279 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5239 queue_hdmi = true; 5280
5240 DRM_DEBUG("IH: HDMI5\n"); 5281 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
5241 } 5282 queue_hdmi = true;
5283 DRM_DEBUG("IH: HDMI5\n");
5242 break; 5284 break;
5243 default: 5285 default:
5244 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 5286 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8e5aeeb058a5..158872eb78e4 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2162,18 +2162,20 @@ static int cayman_startup(struct radeon_device *rdev)
2162 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 2162 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2163 } 2163 }
2164 2164
2165 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; 2165 if (rdev->family == CHIP_ARUBA) {
2166 if (ring->ring_size) 2166 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2167 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2167 if (ring->ring_size)
2168 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2168 2169
2169 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; 2170 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2170 if (ring->ring_size) 2171 if (ring->ring_size)
2171 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2172 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2172 2173
2173 if (!r) 2174 if (!r)
2174 r = vce_v1_0_init(rdev); 2175 r = vce_v1_0_init(rdev);
2175 else if (r != -ENOENT) 2176 if (r)
2176 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); 2177 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
2178 }
2177 2179
2178 r = radeon_ib_pool_init(rdev); 2180 r = radeon_ib_pool_init(rdev);
2179 if (r) { 2181 if (r) {
@@ -2396,7 +2398,8 @@ void cayman_fini(struct radeon_device *rdev)
2396 radeon_irq_kms_fini(rdev); 2398 radeon_irq_kms_fini(rdev);
2397 uvd_v1_0_fini(rdev); 2399 uvd_v1_0_fini(rdev);
2398 radeon_uvd_fini(rdev); 2400 radeon_uvd_fini(rdev);
2399 radeon_vce_fini(rdev); 2401 if (rdev->family == CHIP_ARUBA)
2402 radeon_vce_fini(rdev);
2400 cayman_pcie_gart_fini(rdev); 2403 cayman_pcie_gart_fini(rdev);
2401 r600_vram_scratch_fini(rdev); 2404 r600_vram_scratch_fini(rdev);
2402 radeon_gem_fini(rdev); 2405 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 35dafd77a639..4ea5b10ff5f4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4086,23 +4086,27 @@ restart_ih:
4086 case 1: /* D1 vblank/vline */ 4086 case 1: /* D1 vblank/vline */
4087 switch (src_data) { 4087 switch (src_data) {
4088 case 0: /* D1 vblank */ 4088 case 0: /* D1 vblank */
4089 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { 4089 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4090 if (rdev->irq.crtc_vblank_int[0]) { 4090 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4091 drm_handle_vblank(rdev->ddev, 0); 4091
4092 rdev->pm.vblank_sync = true; 4092 if (rdev->irq.crtc_vblank_int[0]) {
4093 wake_up(&rdev->irq.vblank_queue); 4093 drm_handle_vblank(rdev->ddev, 0);
4094 } 4094 rdev->pm.vblank_sync = true;
4095 if (atomic_read(&rdev->irq.pflip[0])) 4095 wake_up(&rdev->irq.vblank_queue);
4096 radeon_crtc_handle_vblank(rdev, 0);
4097 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4098 DRM_DEBUG("IH: D1 vblank\n");
4099 } 4096 }
4097 if (atomic_read(&rdev->irq.pflip[0]))
4098 radeon_crtc_handle_vblank(rdev, 0);
4099 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4100 DRM_DEBUG("IH: D1 vblank\n");
4101
4100 break; 4102 break;
4101 case 1: /* D1 vline */ 4103 case 1: /* D1 vline */
4102 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { 4104 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4103 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4105 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4104 DRM_DEBUG("IH: D1 vline\n"); 4106
4105 } 4107 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4108 DRM_DEBUG("IH: D1 vline\n");
4109
4106 break; 4110 break;
4107 default: 4111 default:
4108 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4112 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,23 +4116,27 @@ restart_ih:
4112 case 5: /* D2 vblank/vline */ 4116 case 5: /* D2 vblank/vline */
4113 switch (src_data) { 4117 switch (src_data) {
4114 case 0: /* D2 vblank */ 4118 case 0: /* D2 vblank */
4115 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { 4119 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4116 if (rdev->irq.crtc_vblank_int[1]) { 4120 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4117 drm_handle_vblank(rdev->ddev, 1); 4121
4118 rdev->pm.vblank_sync = true; 4122 if (rdev->irq.crtc_vblank_int[1]) {
4119 wake_up(&rdev->irq.vblank_queue); 4123 drm_handle_vblank(rdev->ddev, 1);
4120 } 4124 rdev->pm.vblank_sync = true;
4121 if (atomic_read(&rdev->irq.pflip[1])) 4125 wake_up(&rdev->irq.vblank_queue);
4122 radeon_crtc_handle_vblank(rdev, 1);
4123 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4124 DRM_DEBUG("IH: D2 vblank\n");
4125 } 4126 }
4127 if (atomic_read(&rdev->irq.pflip[1]))
4128 radeon_crtc_handle_vblank(rdev, 1);
4129 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4130 DRM_DEBUG("IH: D2 vblank\n");
4131
4126 break; 4132 break;
4127 case 1: /* D1 vline */ 4133 case 1: /* D1 vline */
4128 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { 4134 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4129 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; 4135 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4130 DRM_DEBUG("IH: D2 vline\n"); 4136
4131 } 4137 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4138 DRM_DEBUG("IH: D2 vline\n");
4139
4132 break; 4140 break;
4133 default: 4141 default:
4134 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4142 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4148,46 +4156,53 @@ restart_ih:
4148 case 19: /* HPD/DAC hotplug */ 4156 case 19: /* HPD/DAC hotplug */
4149 switch (src_data) { 4157 switch (src_data) {
4150 case 0: 4158 case 0:
4151 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 4159 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4152 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; 4160 DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4153 queue_hotplug = true; 4161
4154 DRM_DEBUG("IH: HPD1\n"); 4162 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4155 } 4163 queue_hotplug = true;
4164 DRM_DEBUG("IH: HPD1\n");
4156 break; 4165 break;
4157 case 1: 4166 case 1:
4158 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 4167 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4159 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; 4168 DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4160 queue_hotplug = true; 4169
4161 DRM_DEBUG("IH: HPD2\n"); 4170 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4162 } 4171 queue_hotplug = true;
4172 DRM_DEBUG("IH: HPD2\n");
4163 break; 4173 break;
4164 case 4: 4174 case 4:
4165 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 4175 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4166 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; 4176 DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4167 queue_hotplug = true; 4177
4168 DRM_DEBUG("IH: HPD3\n"); 4178 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4169 } 4179 queue_hotplug = true;
4180 DRM_DEBUG("IH: HPD3\n");
4170 break; 4181 break;
4171 case 5: 4182 case 5:
4172 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 4183 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4173 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; 4184 DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4174 queue_hotplug = true; 4185
4175 DRM_DEBUG("IH: HPD4\n"); 4186 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4176 } 4187 queue_hotplug = true;
4188 DRM_DEBUG("IH: HPD4\n");
4177 break; 4189 break;
4178 case 10: 4190 case 10:
4179 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 4191 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4180 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 4192 DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4181 queue_hotplug = true; 4193
4182 DRM_DEBUG("IH: HPD5\n"); 4194 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4183 } 4195 queue_hotplug = true;
4196 DRM_DEBUG("IH: HPD5\n");
4184 break; 4197 break;
4185 case 12: 4198 case 12:
4186 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 4199 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4187 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 4200 DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4188 queue_hotplug = true; 4201
4189 DRM_DEBUG("IH: HPD6\n"); 4202 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4190 } 4203 queue_hotplug = true;
4204 DRM_DEBUG("IH: HPD6\n");
4205
4191 break; 4206 break;
4192 default: 4207 default:
4193 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4208 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4197,18 +4212,22 @@ restart_ih:
4197 case 21: /* hdmi */ 4212 case 21: /* hdmi */
4198 switch (src_data) { 4213 switch (src_data) {
4199 case 4: 4214 case 4:
4200 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 4215 if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4201 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4216 DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4202 queue_hdmi = true; 4217
4203 DRM_DEBUG("IH: HDMI0\n"); 4218 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4204 } 4219 queue_hdmi = true;
4220 DRM_DEBUG("IH: HDMI0\n");
4221
4205 break; 4222 break;
4206 case 5: 4223 case 5:
4207 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 4224 if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4208 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4225 DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4209 queue_hdmi = true; 4226
4210 DRM_DEBUG("IH: HDMI1\n"); 4227 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4211 } 4228 queue_hdmi = true;
4229 DRM_DEBUG("IH: HDMI1\n");
4230
4212 break; 4231 break;
4213 default: 4232 default:
4214 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4233 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 09e3f39925fa..98f9adaccc3d 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2483,7 +2483,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
2483 struct drm_buf *buf; 2483 struct drm_buf *buf;
2484 u32 *buffer; 2484 u32 *buffer;
2485 const u8 __user *data; 2485 const u8 __user *data;
2486 int size, pass_size; 2486 unsigned int size, pass_size;
2487 u64 src_offset, dst_offset; 2487 u64 src_offset, dst_offset;
2488 2488
2489 if (!radeon_check_offset(dev_priv, tex->offset)) { 2489 if (!radeon_check_offset(dev_priv, tex->offset)) {
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4d2d0579fd49..f03b7eb15233 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2458,7 +2458,6 @@ struct radeon_device {
2458 2458
2459 /* amdkfd interface */ 2459 /* amdkfd interface */
2460 struct kfd_dev *kfd; 2460 struct kfd_dev *kfd;
2461 struct radeon_sa_manager kfd_bo;
2462 2461
2463 struct mutex mn_lock; 2462 struct mutex mn_lock;
2464 DECLARE_HASHTABLE(mn_hash, 7); 2463 DECLARE_HASHTABLE(mn_hash, 7);
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index c89215275053..fa719c53449b 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -469,22 +469,22 @@ void radeon_audio_detect(struct drm_connector *connector,
469 dig = radeon_encoder->enc_priv; 469 dig = radeon_encoder->enc_priv;
470 470
471 if (status == connector_status_connected) { 471 if (status == connector_status_connected) {
472 struct radeon_connector *radeon_connector;
473 int sink_type;
474
475 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { 472 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
476 radeon_encoder->audio = NULL; 473 radeon_encoder->audio = NULL;
477 return; 474 return;
478 } 475 }
479 476
480 radeon_connector = to_radeon_connector(connector); 477 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
481 sink_type = radeon_dp_getsinktype(radeon_connector); 478 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
482 479
483 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && 480 if (radeon_dp_getsinktype(radeon_connector) ==
484 sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) 481 CONNECTOR_OBJECT_ID_DISPLAYPORT)
485 radeon_encoder->audio = rdev->audio.dp_funcs; 482 radeon_encoder->audio = rdev->audio.dp_funcs;
486 else 483 else
484 radeon_encoder->audio = rdev->audio.hdmi_funcs;
485 } else {
487 radeon_encoder->audio = rdev->audio.hdmi_funcs; 486 radeon_encoder->audio = rdev->audio.hdmi_funcs;
487 }
488 488
489 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 489 dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
490 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 490 radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 45e54060ee97..afaf346bd50e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -91,15 +91,34 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
91 struct radeon_device *rdev = crtc->dev->dev_private; 91 struct radeon_device *rdev = crtc->dev->dev_private;
92 92
93 if (ASIC_IS_DCE4(rdev)) { 93 if (ASIC_IS_DCE4(rdev)) {
94 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
95 upper_32_bits(radeon_crtc->cursor_addr));
96 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
97 lower_32_bits(radeon_crtc->cursor_addr));
94 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); 98 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
95 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | 99 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
96 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | 100 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
97 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); 101 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
98 } else if (ASIC_IS_AVIVO(rdev)) { 102 } else if (ASIC_IS_AVIVO(rdev)) {
103 if (rdev->family >= CHIP_RV770) {
104 if (radeon_crtc->crtc_id)
105 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
106 upper_32_bits(radeon_crtc->cursor_addr));
107 else
108 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
109 upper_32_bits(radeon_crtc->cursor_addr));
110 }
111
112 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
113 lower_32_bits(radeon_crtc->cursor_addr));
99 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 114 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
100 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | 115 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
101 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 116 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
102 } else { 117 } else {
118 /* offset is from DISP(2)_BASE_ADDRESS */
119 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
120 radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
121
103 switch (radeon_crtc->crtc_id) { 122 switch (radeon_crtc->crtc_id) {
104 case 0: 123 case 0:
105 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); 124 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -205,8 +224,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
205 | (x << 16) 224 | (x << 16)
206 | y)); 225 | y));
207 /* offset is from DISP(2)_BASE_ADDRESS */ 226 /* offset is from DISP(2)_BASE_ADDRESS */
208 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + 227 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
209 (yorigin * 256))); 228 radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
229 yorigin * 256);
210 } 230 }
211 231
212 radeon_crtc->cursor_x = x; 232 radeon_crtc->cursor_x = x;
@@ -227,53 +247,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
227 return ret; 247 return ret;
228} 248}
229 249
230static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
231{
232 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
233 struct radeon_device *rdev = crtc->dev->dev_private;
234 struct radeon_bo *robj = gem_to_radeon_bo(obj);
235 uint64_t gpu_addr;
236 int ret;
237
238 ret = radeon_bo_reserve(robj, false);
239 if (unlikely(ret != 0))
240 goto fail;
241 /* Only 27 bit offset for legacy cursor */
242 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
243 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
244 &gpu_addr);
245 radeon_bo_unreserve(robj);
246 if (ret)
247 goto fail;
248
249 if (ASIC_IS_DCE4(rdev)) {
250 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
251 upper_32_bits(gpu_addr));
252 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
253 gpu_addr & 0xffffffff);
254 } else if (ASIC_IS_AVIVO(rdev)) {
255 if (rdev->family >= CHIP_RV770) {
256 if (radeon_crtc->crtc_id)
257 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
258 else
259 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
260 }
261 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
262 gpu_addr & 0xffffffff);
263 } else {
264 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
265 /* offset is from DISP(2)_BASE_ADDRESS */
266 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
267 }
268
269 return 0;
270
271fail:
272 drm_gem_object_unreference_unlocked(obj);
273
274 return ret;
275}
276
277int radeon_crtc_cursor_set2(struct drm_crtc *crtc, 250int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
278 struct drm_file *file_priv, 251 struct drm_file *file_priv,
279 uint32_t handle, 252 uint32_t handle,
@@ -283,7 +256,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
283 int32_t hot_y) 256 int32_t hot_y)
284{ 257{
285 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 258 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
259 struct radeon_device *rdev = crtc->dev->dev_private;
286 struct drm_gem_object *obj; 260 struct drm_gem_object *obj;
261 struct radeon_bo *robj;
287 int ret; 262 int ret;
288 263
289 if (!handle) { 264 if (!handle) {
@@ -305,6 +280,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
305 return -ENOENT; 280 return -ENOENT;
306 } 281 }
307 282
283 robj = gem_to_radeon_bo(obj);
284 ret = radeon_bo_reserve(robj, false);
285 if (ret != 0) {
286 drm_gem_object_unreference_unlocked(obj);
287 return ret;
288 }
289 /* Only 27 bit offset for legacy cursor */
290 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
291 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
292 &radeon_crtc->cursor_addr);
293 radeon_bo_unreserve(robj);
294 if (ret) {
295 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
296 drm_gem_object_unreference_unlocked(obj);
297 return ret;
298 }
299
308 radeon_crtc->cursor_width = width; 300 radeon_crtc->cursor_width = width;
309 radeon_crtc->cursor_height = height; 301 radeon_crtc->cursor_height = height;
310 302
@@ -323,13 +315,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
323 radeon_crtc->cursor_hot_y = hot_y; 315 radeon_crtc->cursor_hot_y = hot_y;
324 } 316 }
325 317
326 ret = radeon_set_cursor(crtc, obj); 318 radeon_show_cursor(crtc);
327
328 if (ret)
329 DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
330 ret);
331 else
332 radeon_show_cursor(crtc);
333 319
334 radeon_lock_cursor(crtc, false); 320 radeon_lock_cursor(crtc, false);
335 321
@@ -341,8 +327,7 @@ unpin:
341 radeon_bo_unpin(robj); 327 radeon_bo_unpin(robj);
342 radeon_bo_unreserve(robj); 328 radeon_bo_unreserve(robj);
343 } 329 }
344 if (radeon_crtc->cursor_bo != obj) 330 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
345 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
346 } 331 }
347 332
348 radeon_crtc->cursor_bo = obj; 333 radeon_crtc->cursor_bo = obj;
@@ -360,7 +345,6 @@ unpin:
360void radeon_cursor_reset(struct drm_crtc *crtc) 345void radeon_cursor_reset(struct drm_crtc *crtc)
361{ 346{
362 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
363 int ret;
364 348
365 if (radeon_crtc->cursor_bo) { 349 if (radeon_crtc->cursor_bo) {
366 radeon_lock_cursor(crtc, true); 350 radeon_lock_cursor(crtc, true);
@@ -368,12 +352,7 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
368 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x, 352 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
369 radeon_crtc->cursor_y); 353 radeon_crtc->cursor_y);
370 354
371 ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo); 355 radeon_show_cursor(crtc);
372 if (ret)
373 DRM_ERROR("radeon_set_cursor returned %d, not showing "
374 "cursor\n", ret);
375 else
376 radeon_show_cursor(crtc);
377 356
378 radeon_lock_cursor(crtc, false); 357 radeon_lock_cursor(crtc, false);
379 } 358 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 13e207e0dff0..d8319dae8358 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1080,6 +1080,22 @@ static bool radeon_check_pot_argument(int arg)
1080} 1080}
1081 1081
1082/** 1082/**
1083 * Determine a sensible default GART size according to ASIC family.
1084 *
1085 * @family ASIC family name
1086 */
1087static int radeon_gart_size_auto(enum radeon_family family)
1088{
1089 /* default to a larger gart size on newer asics */
1090 if (family >= CHIP_TAHITI)
1091 return 2048;
1092 else if (family >= CHIP_RV770)
1093 return 1024;
1094 else
1095 return 512;
1096}
1097
1098/**
1083 * radeon_check_arguments - validate module params 1099 * radeon_check_arguments - validate module params
1084 * 1100 *
1085 * @rdev: radeon_device pointer 1101 * @rdev: radeon_device pointer
@@ -1097,27 +1113,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1097 } 1113 }
1098 1114
1099 if (radeon_gart_size == -1) { 1115 if (radeon_gart_size == -1) {
1100 /* default to a larger gart size on newer asics */ 1116 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1101 if (rdev->family >= CHIP_RV770)
1102 radeon_gart_size = 1024;
1103 else
1104 radeon_gart_size = 512;
1105 } 1117 }
1106 /* gtt size must be power of two and greater or equal to 32M */ 1118 /* gtt size must be power of two and greater or equal to 32M */
1107 if (radeon_gart_size < 32) { 1119 if (radeon_gart_size < 32) {
1108 dev_warn(rdev->dev, "gart size (%d) too small\n", 1120 dev_warn(rdev->dev, "gart size (%d) too small\n",
1109 radeon_gart_size); 1121 radeon_gart_size);
1110 if (rdev->family >= CHIP_RV770) 1122 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1111 radeon_gart_size = 1024;
1112 else
1113 radeon_gart_size = 512;
1114 } else if (!radeon_check_pot_argument(radeon_gart_size)) { 1123 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1115 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 1124 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1116 radeon_gart_size); 1125 radeon_gart_size);
1117 if (rdev->family >= CHIP_RV770) 1126 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1118 radeon_gart_size = 1024;
1119 else
1120 radeon_gart_size = 512;
1121 } 1127 }
1122 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 1128 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1123 1129
@@ -1463,6 +1469,21 @@ int radeon_device_init(struct radeon_device *rdev,
1463 if (r) 1469 if (r)
1464 DRM_ERROR("ib ring test failed (%d).\n", r); 1470 DRM_ERROR("ib ring test failed (%d).\n", r);
1465 1471
1472 /*
1473 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1474 * after the CP ring have chew one packet at least. Hence here we stop
1475 * and restart DPM after the radeon_ib_ring_tests().
1476 */
1477 if (rdev->pm.dpm_enabled &&
1478 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1479 (rdev->family == CHIP_TURKS) &&
1480 (rdev->flags & RADEON_IS_MOBILITY)) {
1481 mutex_lock(&rdev->pm.mutex);
1482 radeon_dpm_disable(rdev);
1483 radeon_dpm_enable(rdev);
1484 mutex_unlock(&rdev->pm.mutex);
1485 }
1486
1466 if ((radeon_testing & 1)) { 1487 if ((radeon_testing & 1)) {
1467 if (rdev->accel_working) 1488 if (rdev->accel_working)
1468 radeon_test_moves(rdev); 1489 radeon_test_moves(rdev);
@@ -1557,11 +1578,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1557 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1578 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1558 } 1579 }
1559 1580
1560 /* unpin the front buffers */ 1581 /* unpin the front buffers and cursors */
1561 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1582 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1583 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1562 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); 1584 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1563 struct radeon_bo *robj; 1585 struct radeon_bo *robj;
1564 1586
1587 if (radeon_crtc->cursor_bo) {
1588 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1589 r = radeon_bo_reserve(robj, false);
1590 if (r == 0) {
1591 radeon_bo_unpin(robj);
1592 radeon_bo_unreserve(robj);
1593 }
1594 }
1595
1565 if (rfb == NULL || rfb->obj == NULL) { 1596 if (rfb == NULL || rfb->obj == NULL) {
1566 continue; 1597 continue;
1567 } 1598 }
@@ -1624,6 +1655,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1624{ 1655{
1625 struct drm_connector *connector; 1656 struct drm_connector *connector;
1626 struct radeon_device *rdev = dev->dev_private; 1657 struct radeon_device *rdev = dev->dev_private;
1658 struct drm_crtc *crtc;
1627 int r; 1659 int r;
1628 1660
1629 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1661 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1663,6 +1695,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1663 1695
1664 radeon_restore_bios_scratch_regs(rdev); 1696 radeon_restore_bios_scratch_regs(rdev);
1665 1697
1698 /* pin cursors */
1699 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1700 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1701
1702 if (radeon_crtc->cursor_bo) {
1703 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1704 r = radeon_bo_reserve(robj, false);
1705 if (r == 0) {
1706 /* Only 27 bit offset for legacy cursor */
1707 r = radeon_bo_pin_restricted(robj,
1708 RADEON_GEM_DOMAIN_VRAM,
1709 ASIC_IS_AVIVO(rdev) ?
1710 0 : 1 << 27,
1711 &radeon_crtc->cursor_addr);
1712 if (r != 0)
1713 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1714 radeon_bo_unreserve(robj);
1715 }
1716 }
1717 }
1718
1666 /* init dig PHYs, disp eng pll */ 1719 /* init dig PHYs, disp eng pll */
1667 if (rdev->is_atom_bios) { 1720 if (rdev->is_atom_bios) {
1668 radeon_atom_encoder_init(rdev); 1721 radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 2b98ed3e684d..257b10be5cda 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -663,12 +663,17 @@ int
663radeon_dp_mst_probe(struct radeon_connector *radeon_connector) 663radeon_dp_mst_probe(struct radeon_connector *radeon_connector)
664{ 664{
665 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 665 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
666 struct drm_device *dev = radeon_connector->base.dev;
667 struct radeon_device *rdev = dev->dev_private;
666 int ret; 668 int ret;
667 u8 msg[1]; 669 u8 msg[1];
668 670
669 if (!radeon_mst) 671 if (!radeon_mst)
670 return 0; 672 return 0;
671 673
674 if (!ASIC_IS_DCE5(rdev))
675 return 0;
676
672 if (dig_connector->dpcd[DP_DPCD_REV] < 0x12) 677 if (dig_connector->dpcd[DP_DPCD_REV] < 0x12)
673 return 0; 678 return 0;
674 679
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ac3c1310b953..013ec7106e55 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -428,7 +428,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
428int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 428int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *filp) 429 struct drm_file *filp)
430{ 430{
431 struct radeon_device *rdev = dev->dev_private;
432 struct drm_radeon_gem_busy *args = data; 431 struct drm_radeon_gem_busy *args = data;
433 struct drm_gem_object *gobj; 432 struct drm_gem_object *gobj;
434 struct radeon_bo *robj; 433 struct radeon_bo *robj;
@@ -440,10 +439,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
440 return -ENOENT; 439 return -ENOENT;
441 } 440 }
442 robj = gem_to_radeon_bo(gobj); 441 robj = gem_to_radeon_bo(gobj);
443 r = radeon_bo_wait(robj, &cur_placement, true); 442
443 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
444 if (r == 0)
445 r = -EBUSY;
446 else
447 r = 0;
448
449 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
444 args->domain = radeon_mem_type_to_domain(cur_placement); 450 args->domain = radeon_mem_type_to_domain(cur_placement);
445 drm_gem_object_unreference_unlocked(gobj); 451 drm_gem_object_unreference_unlocked(gobj);
446 r = radeon_gem_handle_lockup(rdev, r);
447 return r; 452 return r;
448} 453}
449 454
@@ -471,6 +476,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
471 r = ret; 476 r = ret;
472 477
473 /* Flush HDP cache via MMIO if necessary */ 478 /* Flush HDP cache via MMIO if necessary */
479 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
474 if (rdev->asic->mmio_hdp_flush && 480 if (rdev->asic->mmio_hdp_flush &&
475 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 481 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
476 robj->rdev->asic->mmio_hdp_flush(rdev); 482 robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 9632e886ddc3..4a119c255ba9 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -576,6 +576,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
576 if (radeon_get_allowed_info_register(rdev, *value, value)) 576 if (radeon_get_allowed_info_register(rdev, *value, value))
577 return -EINVAL; 577 return -EINVAL;
578 break; 578 break;
579 case RADEON_INFO_VA_UNMAP_WORKING:
580 *value = true;
581 break;
579 case RADEON_INFO_GPU_RESET_COUNTER: 582 case RADEON_INFO_GPU_RESET_COUNTER:
580 *value = atomic_read(&rdev->gpu_reset_counter); 583 *value = atomic_read(&rdev->gpu_reset_counter);
581 break; 584 break;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6de5459316b5..07909d817381 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -343,7 +343,6 @@ struct radeon_crtc {
343 int max_cursor_width; 343 int max_cursor_width;
344 int max_cursor_height; 344 int max_cursor_height;
345 uint32_t legacy_display_base_addr; 345 uint32_t legacy_display_base_addr;
346 uint32_t legacy_cursor_offset;
347 enum radeon_rmx_type rmx_type; 346 enum radeon_rmx_type rmx_type;
348 u8 h_border; 347 u8 h_border;
349 u8 v_border; 348 u8 v_border;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index edafd3c2b170..06ac59fe332a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -719,7 +719,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
719 return 0; 719 return 0;
720 720
721 if (gtt && gtt->userptr) { 721 if (gtt && gtt->userptr) {
722 ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); 722 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
723 if (!ttm->sg) 723 if (!ttm->sg)
724 return -ENOMEM; 724 return -ENOMEM;
725 725
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 9739ded91b7a..48d97c040f49 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -457,14 +457,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
457 /* make sure object fit at this offset */ 457 /* make sure object fit at this offset */
458 eoffset = soffset + size; 458 eoffset = soffset + size;
459 if (soffset >= eoffset) { 459 if (soffset >= eoffset) {
460 return -EINVAL; 460 r = -EINVAL;
461 goto error_unreserve;
461 } 462 }
462 463
463 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; 464 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
464 if (last_pfn > rdev->vm_manager.max_pfn) { 465 if (last_pfn > rdev->vm_manager.max_pfn) {
465 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", 466 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
466 last_pfn, rdev->vm_manager.max_pfn); 467 last_pfn, rdev->vm_manager.max_pfn);
467 return -EINVAL; 468 r = -EINVAL;
469 goto error_unreserve;
468 } 470 }
469 471
470 } else { 472 } else {
@@ -485,42 +487,41 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
485 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, 487 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
486 soffset, tmp->bo, tmp->it.start, tmp->it.last); 488 soffset, tmp->bo, tmp->it.start, tmp->it.last);
487 mutex_unlock(&vm->mutex); 489 mutex_unlock(&vm->mutex);
488 return -EINVAL; 490 r = -EINVAL;
491 goto error_unreserve;
489 } 492 }
490 } 493 }
491 494
492 if (bo_va->it.start || bo_va->it.last) { 495 if (bo_va->it.start || bo_va->it.last) {
493 spin_lock(&vm->status_lock); 496 /* add a clone of the bo_va to clear the old address */
494 if (list_empty(&bo_va->vm_status)) { 497 struct radeon_bo_va *tmp;
495 /* add a clone of the bo_va to clear the old address */ 498 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
496 struct radeon_bo_va *tmp; 499 if (!tmp) {
497 spin_unlock(&vm->status_lock); 500 mutex_unlock(&vm->mutex);
498 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 501 r = -ENOMEM;
499 if (!tmp) { 502 goto error_unreserve;
500 mutex_unlock(&vm->mutex);
501 return -ENOMEM;
502 }
503 tmp->it.start = bo_va->it.start;
504 tmp->it.last = bo_va->it.last;
505 tmp->vm = vm;
506 tmp->bo = radeon_bo_ref(bo_va->bo);
507 spin_lock(&vm->status_lock);
508 list_add(&tmp->vm_status, &vm->freed);
509 } 503 }
510 spin_unlock(&vm->status_lock); 504 tmp->it.start = bo_va->it.start;
505 tmp->it.last = bo_va->it.last;
506 tmp->vm = vm;
507 tmp->bo = radeon_bo_ref(bo_va->bo);
511 508
512 interval_tree_remove(&bo_va->it, &vm->va); 509 interval_tree_remove(&bo_va->it, &vm->va);
510 spin_lock(&vm->status_lock);
513 bo_va->it.start = 0; 511 bo_va->it.start = 0;
514 bo_va->it.last = 0; 512 bo_va->it.last = 0;
513 list_del_init(&bo_va->vm_status);
514 list_add(&tmp->vm_status, &vm->freed);
515 spin_unlock(&vm->status_lock);
515 } 516 }
516 517
517 if (soffset || eoffset) { 518 if (soffset || eoffset) {
519 spin_lock(&vm->status_lock);
518 bo_va->it.start = soffset; 520 bo_va->it.start = soffset;
519 bo_va->it.last = eoffset - 1; 521 bo_va->it.last = eoffset - 1;
520 interval_tree_insert(&bo_va->it, &vm->va);
521 spin_lock(&vm->status_lock);
522 list_add(&bo_va->vm_status, &vm->cleared); 522 list_add(&bo_va->vm_status, &vm->cleared);
523 spin_unlock(&vm->status_lock); 523 spin_unlock(&vm->status_lock);
524 interval_tree_insert(&bo_va->it, &vm->va);
524 } 525 }
525 526
526 bo_va->flags = flags; 527 bo_va->flags = flags;
@@ -555,7 +556,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
555 r = radeon_vm_clear_bo(rdev, pt); 556 r = radeon_vm_clear_bo(rdev, pt);
556 if (r) { 557 if (r) {
557 radeon_bo_unref(&pt); 558 radeon_bo_unref(&pt);
558 radeon_bo_reserve(bo_va->bo, false);
559 return r; 559 return r;
560 } 560 }
561 561
@@ -575,6 +575,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
575 575
576 mutex_unlock(&vm->mutex); 576 mutex_unlock(&vm->mutex);
577 return 0; 577 return 0;
578
579error_unreserve:
580 radeon_bo_unreserve(bo_va->bo);
581 return r;
578} 582}
579 583
580/** 584/**
@@ -1122,12 +1126,12 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
1122 interval_tree_remove(&bo_va->it, &vm->va); 1126 interval_tree_remove(&bo_va->it, &vm->va);
1123 1127
1124 spin_lock(&vm->status_lock); 1128 spin_lock(&vm->status_lock);
1125 if (list_empty(&bo_va->vm_status)) { 1129 list_del(&bo_va->vm_status);
1130 if (bo_va->it.start || bo_va->it.last) {
1126 bo_va->bo = radeon_bo_ref(bo_va->bo); 1131 bo_va->bo = radeon_bo_ref(bo_va->bo);
1127 list_add(&bo_va->vm_status, &vm->freed); 1132 list_add(&bo_va->vm_status, &vm->freed);
1128 } else { 1133 } else {
1129 radeon_fence_unref(&bo_va->last_pt_update); 1134 radeon_fence_unref(&bo_va->last_pt_update);
1130 list_del(&bo_va->vm_status);
1131 kfree(bo_va); 1135 kfree(bo_va);
1132 } 1136 }
1133 spin_unlock(&vm->status_lock); 1137 spin_unlock(&vm->status_lock);
@@ -1151,7 +1155,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1151 1155
1152 list_for_each_entry(bo_va, &bo->va, bo_list) { 1156 list_for_each_entry(bo_va, &bo->va, bo_list) {
1153 spin_lock(&bo_va->vm->status_lock); 1157 spin_lock(&bo_va->vm->status_lock);
1154 if (list_empty(&bo_va->vm_status)) 1158 if (list_empty(&bo_va->vm_status) &&
1159 (bo_va->it.start || bo_va->it.last))
1155 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1160 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1156 spin_unlock(&bo_va->vm->status_lock); 1161 spin_unlock(&bo_va->vm->status_lock);
1157 } 1162 }
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 26388b5dd6ed..07037e32dea3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6466,23 +6466,27 @@ restart_ih:
6466 case 1: /* D1 vblank/vline */ 6466 case 1: /* D1 vblank/vline */
6467 switch (src_data) { 6467 switch (src_data) {
6468 case 0: /* D1 vblank */ 6468 case 0: /* D1 vblank */
6469 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 6469 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
6470 if (rdev->irq.crtc_vblank_int[0]) { 6470 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6471 drm_handle_vblank(rdev->ddev, 0); 6471
6472 rdev->pm.vblank_sync = true; 6472 if (rdev->irq.crtc_vblank_int[0]) {
6473 wake_up(&rdev->irq.vblank_queue); 6473 drm_handle_vblank(rdev->ddev, 0);
6474 } 6474 rdev->pm.vblank_sync = true;
6475 if (atomic_read(&rdev->irq.pflip[0])) 6475 wake_up(&rdev->irq.vblank_queue);
6476 radeon_crtc_handle_vblank(rdev, 0);
6477 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6478 DRM_DEBUG("IH: D1 vblank\n");
6479 } 6476 }
6477 if (atomic_read(&rdev->irq.pflip[0]))
6478 radeon_crtc_handle_vblank(rdev, 0);
6479 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6480 DRM_DEBUG("IH: D1 vblank\n");
6481
6480 break; 6482 break;
6481 case 1: /* D1 vline */ 6483 case 1: /* D1 vline */
6482 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 6484 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
6483 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 6485 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6484 DRM_DEBUG("IH: D1 vline\n"); 6486
6485 } 6487 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6488 DRM_DEBUG("IH: D1 vline\n");
6489
6486 break; 6490 break;
6487 default: 6491 default:
6488 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6492 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@ restart_ih:
6492 case 2: /* D2 vblank/vline */ 6496 case 2: /* D2 vblank/vline */
6493 switch (src_data) { 6497 switch (src_data) {
6494 case 0: /* D2 vblank */ 6498 case 0: /* D2 vblank */
6495 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 6499 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
6496 if (rdev->irq.crtc_vblank_int[1]) { 6500 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6497 drm_handle_vblank(rdev->ddev, 1); 6501
6498 rdev->pm.vblank_sync = true; 6502 if (rdev->irq.crtc_vblank_int[1]) {
6499 wake_up(&rdev->irq.vblank_queue); 6503 drm_handle_vblank(rdev->ddev, 1);
6500 } 6504 rdev->pm.vblank_sync = true;
6501 if (atomic_read(&rdev->irq.pflip[1])) 6505 wake_up(&rdev->irq.vblank_queue);
6502 radeon_crtc_handle_vblank(rdev, 1);
6503 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6504 DRM_DEBUG("IH: D2 vblank\n");
6505 } 6506 }
6507 if (atomic_read(&rdev->irq.pflip[1]))
6508 radeon_crtc_handle_vblank(rdev, 1);
6509 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6510 DRM_DEBUG("IH: D2 vblank\n");
6511
6506 break; 6512 break;
6507 case 1: /* D2 vline */ 6513 case 1: /* D2 vline */
6508 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 6514 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
6509 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 6515 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6510 DRM_DEBUG("IH: D2 vline\n"); 6516
6511 } 6517 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6518 DRM_DEBUG("IH: D2 vline\n");
6519
6512 break; 6520 break;
6513 default: 6521 default:
6514 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6522 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@ restart_ih:
6518 case 3: /* D3 vblank/vline */ 6526 case 3: /* D3 vblank/vline */
6519 switch (src_data) { 6527 switch (src_data) {
6520 case 0: /* D3 vblank */ 6528 case 0: /* D3 vblank */
6521 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 6529 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
6522 if (rdev->irq.crtc_vblank_int[2]) { 6530 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6523 drm_handle_vblank(rdev->ddev, 2); 6531
6524 rdev->pm.vblank_sync = true; 6532 if (rdev->irq.crtc_vblank_int[2]) {
6525 wake_up(&rdev->irq.vblank_queue); 6533 drm_handle_vblank(rdev->ddev, 2);
6526 } 6534 rdev->pm.vblank_sync = true;
6527 if (atomic_read(&rdev->irq.pflip[2])) 6535 wake_up(&rdev->irq.vblank_queue);
6528 radeon_crtc_handle_vblank(rdev, 2);
6529 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6530 DRM_DEBUG("IH: D3 vblank\n");
6531 } 6536 }
6537 if (atomic_read(&rdev->irq.pflip[2]))
6538 radeon_crtc_handle_vblank(rdev, 2);
6539 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6540 DRM_DEBUG("IH: D3 vblank\n");
6541
6532 break; 6542 break;
6533 case 1: /* D3 vline */ 6543 case 1: /* D3 vline */
6534 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 6544 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
6535 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 6545 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6536 DRM_DEBUG("IH: D3 vline\n"); 6546
6537 } 6547 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6548 DRM_DEBUG("IH: D3 vline\n");
6549
6538 break; 6550 break;
6539 default: 6551 default:
6540 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6552 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@ restart_ih:
6544 case 4: /* D4 vblank/vline */ 6556 case 4: /* D4 vblank/vline */
6545 switch (src_data) { 6557 switch (src_data) {
6546 case 0: /* D4 vblank */ 6558 case 0: /* D4 vblank */
6547 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 6559 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
6548 if (rdev->irq.crtc_vblank_int[3]) { 6560 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6549 drm_handle_vblank(rdev->ddev, 3); 6561
6550 rdev->pm.vblank_sync = true; 6562 if (rdev->irq.crtc_vblank_int[3]) {
6551 wake_up(&rdev->irq.vblank_queue); 6563 drm_handle_vblank(rdev->ddev, 3);
6552 } 6564 rdev->pm.vblank_sync = true;
6553 if (atomic_read(&rdev->irq.pflip[3])) 6565 wake_up(&rdev->irq.vblank_queue);
6554 radeon_crtc_handle_vblank(rdev, 3);
6555 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6556 DRM_DEBUG("IH: D4 vblank\n");
6557 } 6566 }
6567 if (atomic_read(&rdev->irq.pflip[3]))
6568 radeon_crtc_handle_vblank(rdev, 3);
6569 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6570 DRM_DEBUG("IH: D4 vblank\n");
6571
6558 break; 6572 break;
6559 case 1: /* D4 vline */ 6573 case 1: /* D4 vline */
6560 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 6574 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
6561 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 6575 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6562 DRM_DEBUG("IH: D4 vline\n"); 6576
6563 } 6577 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6578 DRM_DEBUG("IH: D4 vline\n");
6579
6564 break; 6580 break;
6565 default: 6581 default:
6566 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6582 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@ restart_ih:
6570 case 5: /* D5 vblank/vline */ 6586 case 5: /* D5 vblank/vline */
6571 switch (src_data) { 6587 switch (src_data) {
6572 case 0: /* D5 vblank */ 6588 case 0: /* D5 vblank */
6573 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 6589 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
6574 if (rdev->irq.crtc_vblank_int[4]) { 6590 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6575 drm_handle_vblank(rdev->ddev, 4); 6591
6576 rdev->pm.vblank_sync = true; 6592 if (rdev->irq.crtc_vblank_int[4]) {
6577 wake_up(&rdev->irq.vblank_queue); 6593 drm_handle_vblank(rdev->ddev, 4);
6578 } 6594 rdev->pm.vblank_sync = true;
6579 if (atomic_read(&rdev->irq.pflip[4])) 6595 wake_up(&rdev->irq.vblank_queue);
6580 radeon_crtc_handle_vblank(rdev, 4);
6581 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6582 DRM_DEBUG("IH: D5 vblank\n");
6583 } 6596 }
6597 if (atomic_read(&rdev->irq.pflip[4]))
6598 radeon_crtc_handle_vblank(rdev, 4);
6599 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6600 DRM_DEBUG("IH: D5 vblank\n");
6601
6584 break; 6602 break;
6585 case 1: /* D5 vline */ 6603 case 1: /* D5 vline */
6586 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 6604 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
6587 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 6605 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6588 DRM_DEBUG("IH: D5 vline\n"); 6606
6589 } 6607 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6608 DRM_DEBUG("IH: D5 vline\n");
6609
6590 break; 6610 break;
6591 default: 6611 default:
6592 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6612 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@ restart_ih:
6596 case 6: /* D6 vblank/vline */ 6616 case 6: /* D6 vblank/vline */
6597 switch (src_data) { 6617 switch (src_data) {
6598 case 0: /* D6 vblank */ 6618 case 0: /* D6 vblank */
6599 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 6619 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
6600 if (rdev->irq.crtc_vblank_int[5]) { 6620 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6601 drm_handle_vblank(rdev->ddev, 5); 6621
6602 rdev->pm.vblank_sync = true; 6622 if (rdev->irq.crtc_vblank_int[5]) {
6603 wake_up(&rdev->irq.vblank_queue); 6623 drm_handle_vblank(rdev->ddev, 5);
6604 } 6624 rdev->pm.vblank_sync = true;
6605 if (atomic_read(&rdev->irq.pflip[5])) 6625 wake_up(&rdev->irq.vblank_queue);
6606 radeon_crtc_handle_vblank(rdev, 5);
6607 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6608 DRM_DEBUG("IH: D6 vblank\n");
6609 } 6626 }
6627 if (atomic_read(&rdev->irq.pflip[5]))
6628 radeon_crtc_handle_vblank(rdev, 5);
6629 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6630 DRM_DEBUG("IH: D6 vblank\n");
6631
6610 break; 6632 break;
6611 case 1: /* D6 vline */ 6633 case 1: /* D6 vline */
6612 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 6634 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
6613 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 6635 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6614 DRM_DEBUG("IH: D6 vline\n"); 6636
6615 } 6637 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6638 DRM_DEBUG("IH: D6 vline\n");
6639
6616 break; 6640 break;
6617 default: 6641 default:
6618 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6642 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@ restart_ih:
6632 case 42: /* HPD hotplug */ 6656 case 42: /* HPD hotplug */
6633 switch (src_data) { 6657 switch (src_data) {
6634 case 0: 6658 case 0:
6635 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 6659 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
6636 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 6660 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6637 queue_hotplug = true; 6661
6638 DRM_DEBUG("IH: HPD1\n"); 6662 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6639 } 6663 queue_hotplug = true;
6664 DRM_DEBUG("IH: HPD1\n");
6665
6640 break; 6666 break;
6641 case 1: 6667 case 1:
6642 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 6668 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
6643 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 6669 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6644 queue_hotplug = true; 6670
6645 DRM_DEBUG("IH: HPD2\n"); 6671 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6646 } 6672 queue_hotplug = true;
6673 DRM_DEBUG("IH: HPD2\n");
6674
6647 break; 6675 break;
6648 case 2: 6676 case 2:
6649 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 6677 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
6650 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 6678 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6651 queue_hotplug = true; 6679
6652 DRM_DEBUG("IH: HPD3\n"); 6680 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6653 } 6681 queue_hotplug = true;
6682 DRM_DEBUG("IH: HPD3\n");
6683
6654 break; 6684 break;
6655 case 3: 6685 case 3:
6656 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 6686 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
6657 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 6687 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6658 queue_hotplug = true; 6688
6659 DRM_DEBUG("IH: HPD4\n"); 6689 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6660 } 6690 queue_hotplug = true;
6691 DRM_DEBUG("IH: HPD4\n");
6692
6661 break; 6693 break;
6662 case 4: 6694 case 4:
6663 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 6695 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
6664 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 6696 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6665 queue_hotplug = true; 6697
6666 DRM_DEBUG("IH: HPD5\n"); 6698 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6667 } 6699 queue_hotplug = true;
6700 DRM_DEBUG("IH: HPD5\n");
6701
6668 break; 6702 break;
6669 case 5: 6703 case 5:
6670 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 6704 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
6671 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 6705 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6672 queue_hotplug = true; 6706
6673 DRM_DEBUG("IH: HPD6\n"); 6707 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6674 } 6708 queue_hotplug = true;
6709 DRM_DEBUG("IH: HPD6\n");
6710
6675 break; 6711 break;
6676 case 6: 6712 case 6:
6677 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { 6713 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
6678 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; 6714 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6679 queue_dp = true; 6715
6680 DRM_DEBUG("IH: HPD_RX 1\n"); 6716 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
6681 } 6717 queue_dp = true;
6718 DRM_DEBUG("IH: HPD_RX 1\n");
6719
6682 break; 6720 break;
6683 case 7: 6721 case 7:
6684 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 6722 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
6685 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 6723 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6686 queue_dp = true; 6724
6687 DRM_DEBUG("IH: HPD_RX 2\n"); 6725 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
6688 } 6726 queue_dp = true;
6727 DRM_DEBUG("IH: HPD_RX 2\n");
6728
6689 break; 6729 break;
6690 case 8: 6730 case 8:
6691 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 6731 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
6692 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 6732 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6693 queue_dp = true; 6733
6694 DRM_DEBUG("IH: HPD_RX 3\n"); 6734 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
6695 } 6735 queue_dp = true;
6736 DRM_DEBUG("IH: HPD_RX 3\n");
6737
6696 break; 6738 break;
6697 case 9: 6739 case 9:
6698 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 6740 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
6699 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 6741 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6700 queue_dp = true; 6742
6701 DRM_DEBUG("IH: HPD_RX 4\n"); 6743 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
6702 } 6744 queue_dp = true;
6745 DRM_DEBUG("IH: HPD_RX 4\n");
6746
6703 break; 6747 break;
6704 case 10: 6748 case 10:
6705 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 6749 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
6706 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 6750 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6707 queue_dp = true; 6751
6708 DRM_DEBUG("IH: HPD_RX 5\n"); 6752 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
6709 } 6753 queue_dp = true;
6754 DRM_DEBUG("IH: HPD_RX 5\n");
6755
6710 break; 6756 break;
6711 case 11: 6757 case 11:
6712 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 6758 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
6713 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 6759 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6714 queue_dp = true; 6760
6715 DRM_DEBUG("IH: HPD_RX 6\n"); 6761 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
6716 } 6762 queue_dp = true;
6763 DRM_DEBUG("IH: HPD_RX 6\n");
6764
6717 break; 6765 break;
6718 default: 6766 default:
6719 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6767 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index e6a32c4e4040..65d6ba6621ac 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -214,7 +214,7 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
214 unsigned int i; 214 unsigned int i;
215 u32 dspr = 0; 215 u32 dspr = 0;
216 216
217 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes); ++i) { 217 for (i = 0; i < rcrtc->group->num_planes; ++i) {
218 struct rcar_du_plane *plane = &rcrtc->group->planes[i]; 218 struct rcar_du_plane *plane = &rcrtc->group->planes[i];
219 unsigned int j; 219 unsigned int j;
220 220
@@ -398,6 +398,19 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
398 if (!rcrtc->started) 398 if (!rcrtc->started)
399 return; 399 return;
400 400
401 /* Disable all planes and wait for the change to take effect. This is
402 * required as the DSnPR registers are updated on vblank, and no vblank
403 * will occur once the CRTC is stopped. Disabling planes when starting
404 * the CRTC thus wouldn't be enough as it would start scanning out
405 * immediately from old frame buffers until the next vblank.
406 *
407 * This increases the CRTC stop delay, especially when multiple CRTCs
408 * are stopped in one operation as we now wait for one vblank per CRTC.
409 * Whether this can be improved needs to be researched.
410 */
411 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
412 drm_crtc_wait_one_vblank(crtc);
413
401 /* Disable vertical blanking interrupt reporting. We first need to wait 414 /* Disable vertical blanking interrupt reporting. We first need to wait
402 * for page flip completion before stopping the CRTC as userspace 415 * for page flip completion before stopping the CRTC as userspace
403 * expects page flips to eventually complete. 416 * expects page flips to eventually complete.
@@ -432,7 +445,7 @@ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
432 rcar_du_crtc_start(rcrtc); 445 rcar_du_crtc_start(rcrtc);
433 446
434 /* Commit the planes state. */ 447 /* Commit the planes state. */
435 for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes); ++i) { 448 for (i = 0; i < rcrtc->group->num_planes; ++i) {
436 struct rcar_du_plane *plane = &rcrtc->group->planes[i]; 449 struct rcar_du_plane *plane = &rcrtc->group->planes[i];
437 450
438 if (plane->plane.state->crtc != &rcrtc->crtc) 451 if (plane->plane.state->crtc != &rcrtc->crtc)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index da1216a73969..780ca11512ba 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -190,7 +190,7 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
190 /* DRM/KMS objects */ 190 /* DRM/KMS objects */
191 ret = rcar_du_modeset_init(rcdu); 191 ret = rcar_du_modeset_init(rcdu);
192 if (ret < 0) { 192 if (ret < 0) {
193 dev_err(&pdev->dev, "failed to initialize DRM/KMS\n"); 193 dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
194 goto done; 194 goto done;
195 } 195 }
196 196
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
index 7b414b31c3be..d7318e1a6b00 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -30,6 +30,7 @@ struct rcar_du_device;
30 * @used_crtcs: number of CRTCs currently in use 30 * @used_crtcs: number of CRTCs currently in use
31 * @lock: protects the dptsr_planes field and the DPTSR register 31 * @lock: protects the dptsr_planes field and the DPTSR register
32 * @dptsr_planes: bitmask of planes driven by dot-clock and timing generator 1 32 * @dptsr_planes: bitmask of planes driven by dot-clock and timing generator 1
33 * @num_planes: number of planes in the group
33 * @planes: planes handled by the group 34 * @planes: planes handled by the group
34 */ 35 */
35struct rcar_du_group { 36struct rcar_du_group {
@@ -44,6 +45,7 @@ struct rcar_du_group {
44 struct mutex lock; 45 struct mutex lock;
45 unsigned int dptsr_planes; 46 unsigned int dptsr_planes;
46 47
48 unsigned int num_planes;
47 struct rcar_du_plane planes[RCAR_DU_NUM_KMS_PLANES]; 49 struct rcar_du_plane planes[RCAR_DU_NUM_KMS_PLANES];
48}; 50};
49 51
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 20859aae882e..56518eb1269a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -336,7 +336,7 @@ static int rcar_du_atomic_check(struct drm_device *dev,
336 dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n", 336 dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n",
337 __func__, index); 337 __func__, index);
338 338
339 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) { 339 for (i = 0; i < group->num_planes; ++i) {
340 struct rcar_du_plane *plane = &group->planes[i]; 340 struct rcar_du_plane *plane = &group->planes[i];
341 struct rcar_du_plane_state *plane_state; 341 struct rcar_du_plane_state *plane_state;
342 struct drm_plane_state *s; 342 struct drm_plane_state *s;
@@ -495,8 +495,10 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
495 495
496 /* Allocate the commit object. */ 496 /* Allocate the commit object. */
497 commit = kzalloc(sizeof(*commit), GFP_KERNEL); 497 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
498 if (commit == NULL) 498 if (commit == NULL) {
499 return -ENOMEM; 499 ret = -ENOMEM;
500 goto error;
501 }
500 502
501 INIT_WORK(&commit->work, rcar_du_atomic_work); 503 INIT_WORK(&commit->work, rcar_du_atomic_work);
502 commit->dev = dev; 504 commit->dev = dev;
@@ -519,7 +521,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
519 521
520 if (ret) { 522 if (ret) {
521 kfree(commit); 523 kfree(commit);
522 return ret; 524 goto error;
523 } 525 }
524 526
525 /* Swap the state, this is the point of no return. */ 527 /* Swap the state, this is the point of no return. */
@@ -531,6 +533,10 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
531 rcar_du_atomic_complete(commit); 533 rcar_du_atomic_complete(commit);
532 534
533 return 0; 535 return 0;
536
537error:
538 drm_atomic_helper_cleanup_planes(dev, state);
539 return ret;
534} 540}
535 541
536/* ----------------------------------------------------------------------------- 542/* -----------------------------------------------------------------------------
@@ -573,7 +579,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
573 if (!entity) { 579 if (!entity) {
574 dev_dbg(rcdu->dev, "unconnected endpoint %s, skipping\n", 580 dev_dbg(rcdu->dev, "unconnected endpoint %s, skipping\n",
575 ep->local_node->full_name); 581 ep->local_node->full_name);
576 return 0; 582 return -ENODEV;
577 } 583 }
578 584
579 entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0); 585 entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
@@ -596,7 +602,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
596 encoder->full_name); 602 encoder->full_name);
597 of_node_put(entity_ep_node); 603 of_node_put(entity_ep_node);
598 of_node_put(encoder); 604 of_node_put(encoder);
599 return 0; 605 return -ENODEV;
600 } 606 }
601 607
602 break; 608 break;
@@ -625,7 +631,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
625 encoder->full_name); 631 encoder->full_name);
626 of_node_put(encoder); 632 of_node_put(encoder);
627 of_node_put(connector); 633 of_node_put(connector);
628 return 0; 634 return -EINVAL;
629 } 635 }
630 } else { 636 } else {
631 /* 637 /*
@@ -639,7 +645,12 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
639 of_node_put(encoder); 645 of_node_put(encoder);
640 of_node_put(connector); 646 of_node_put(connector);
641 647
642 return ret < 0 ? ret : 1; 648 if (ret && ret != -EPROBE_DEFER)
649 dev_warn(rcdu->dev,
650 "failed to initialize encoder %s (%d), skipping\n",
651 encoder->full_name, ret);
652
653 return ret;
643} 654}
644 655
645static int rcar_du_encoders_init(struct rcar_du_device *rcdu) 656static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
@@ -688,12 +699,10 @@ static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
688 return ret; 699 return ret;
689 } 700 }
690 701
691 dev_info(rcdu->dev,
692 "encoder initialization failed, skipping\n");
693 continue; 702 continue;
694 } 703 }
695 704
696 num_encoders += ret; 705 num_encoders++;
697 } 706 }
698 707
699 return num_encoders; 708 return num_encoders;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 3e30d84b798f..c66986414bb4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -302,13 +302,15 @@ rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane)
302 struct rcar_du_plane_state *state; 302 struct rcar_du_plane_state *state;
303 struct rcar_du_plane_state *copy; 303 struct rcar_du_plane_state *copy;
304 304
305 if (WARN_ON(!plane->state))
306 return NULL;
307
305 state = to_rcar_plane_state(plane->state); 308 state = to_rcar_plane_state(plane->state);
306 copy = kmemdup(state, sizeof(*state), GFP_KERNEL); 309 copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
307 if (copy == NULL) 310 if (copy == NULL)
308 return NULL; 311 return NULL;
309 312
310 if (copy->state.fb) 313 __drm_atomic_helper_plane_duplicate_state(plane, &copy->state);
311 drm_framebuffer_reference(copy->state.fb);
312 314
313 return &copy->state; 315 return &copy->state;
314} 316}
@@ -316,9 +318,7 @@ rcar_du_plane_atomic_duplicate_state(struct drm_plane *plane)
316static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane, 318static void rcar_du_plane_atomic_destroy_state(struct drm_plane *plane,
317 struct drm_plane_state *state) 319 struct drm_plane_state *state)
318{ 320{
319 if (state->fb) 321 __drm_atomic_helper_plane_destroy_state(plane, state);
320 drm_framebuffer_unreference(state->fb);
321
322 kfree(to_rcar_plane_state(state)); 322 kfree(to_rcar_plane_state(state));
323} 323}
324 324
@@ -390,7 +390,6 @@ static const uint32_t formats[] = {
390int rcar_du_planes_init(struct rcar_du_group *rgrp) 390int rcar_du_planes_init(struct rcar_du_group *rgrp)
391{ 391{
392 struct rcar_du_device *rcdu = rgrp->dev; 392 struct rcar_du_device *rcdu = rgrp->dev;
393 unsigned int num_planes;
394 unsigned int crtcs; 393 unsigned int crtcs;
395 unsigned int i; 394 unsigned int i;
396 int ret; 395 int ret;
@@ -398,11 +397,11 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
398 /* Create one primary plane per CRTC in this group and seven overlay 397 /* Create one primary plane per CRTC in this group and seven overlay
399 * planes. 398 * planes.
400 */ 399 */
401 num_planes = rgrp->num_crtcs + 7; 400 rgrp->num_planes = rgrp->num_crtcs + 7;
402 401
403 crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index)); 402 crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
404 403
405 for (i = 0; i < num_planes; ++i) { 404 for (i = 0; i < rgrp->num_planes; ++i) {
406 enum drm_plane_type type = i < rgrp->num_crtcs 405 enum drm_plane_type type = i < rgrp->num_crtcs
407 ? DRM_PLANE_TYPE_PRIMARY 406 ? DRM_PLANE_TYPE_PRIMARY
408 : DRM_PLANE_TYPE_OVERLAY; 407 : DRM_PLANE_TYPE_OVERLAY;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 3962176ee713..01b558fe3695 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -21,6 +21,7 @@
21#include <drm/drm_fb_helper.h> 21#include <drm/drm_fb_helper.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/module.h>
24#include <linux/of_graph.h> 25#include <linux/of_graph.h>
25#include <linux/component.h> 26#include <linux/component.h>
26 27
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 4557f335a8a5..dc65161d7cad 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -19,6 +19,7 @@
19#include <drm/drm_plane_helper.h> 19#include <drm/drm_plane_helper.h>
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/module.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
23#include <linux/clk.h> 24#include <linux/clk.h>
24#include <linux/of.h> 25#include <linux/of.h>
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index d6b55e3e3716..07b26972f487 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -56,15 +56,14 @@ static inline struct tegra_dpaux *work_to_dpaux(struct work_struct *work)
56 return container_of(work, struct tegra_dpaux, work); 56 return container_of(work, struct tegra_dpaux, work);
57} 57}
58 58
59static inline unsigned long tegra_dpaux_readl(struct tegra_dpaux *dpaux, 59static inline u32 tegra_dpaux_readl(struct tegra_dpaux *dpaux,
60 unsigned long offset) 60 unsigned long offset)
61{ 61{
62 return readl(dpaux->regs + (offset << 2)); 62 return readl(dpaux->regs + (offset << 2));
63} 63}
64 64
65static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux, 65static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
66 unsigned long value, 66 u32 value, unsigned long offset)
67 unsigned long offset)
68{ 67{
69 writel(value, dpaux->regs + (offset << 2)); 68 writel(value, dpaux->regs + (offset << 2));
70} 69}
@@ -72,34 +71,32 @@ static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
72static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer, 71static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
73 size_t size) 72 size_t size)
74{ 73{
75 unsigned long offset = DPAUX_DP_AUXDATA_WRITE(0);
76 size_t i, j; 74 size_t i, j;
77 75
78 for (i = 0; i < size; i += 4) { 76 for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
79 size_t num = min_t(size_t, size - i, 4); 77 size_t num = min_t(size_t, size - i * 4, 4);
80 unsigned long value = 0; 78 u32 value = 0;
81 79
82 for (j = 0; j < num; j++) 80 for (j = 0; j < num; j++)
83 value |= buffer[i + j] << (j * 8); 81 value |= buffer[i * 4 + j] << (j * 8);
84 82
85 tegra_dpaux_writel(dpaux, value, offset++); 83 tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXDATA_WRITE(i));
86 } 84 }
87} 85}
88 86
89static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer, 87static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
90 size_t size) 88 size_t size)
91{ 89{
92 unsigned long offset = DPAUX_DP_AUXDATA_READ(0);
93 size_t i, j; 90 size_t i, j;
94 91
95 for (i = 0; i < size; i += 4) { 92 for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
96 size_t num = min_t(size_t, size - i, 4); 93 size_t num = min_t(size_t, size - i * 4, 4);
97 unsigned long value; 94 u32 value;
98 95
99 value = tegra_dpaux_readl(dpaux, offset++); 96 value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXDATA_READ(i));
100 97
101 for (j = 0; j < num; j++) 98 for (j = 0; j < num; j++)
102 buffer[i + j] = value >> (j * 8); 99 buffer[i * 4 + j] = value >> (j * 8);
103 } 100 }
104} 101}
105 102
@@ -250,7 +247,7 @@ static irqreturn_t tegra_dpaux_irq(int irq, void *data)
250{ 247{
251 struct tegra_dpaux *dpaux = data; 248 struct tegra_dpaux *dpaux = data;
252 irqreturn_t ret = IRQ_HANDLED; 249 irqreturn_t ret = IRQ_HANDLED;
253 unsigned long value; 250 u32 value;
254 251
255 /* clear interrupts */ 252 /* clear interrupts */
256 value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX); 253 value = tegra_dpaux_readl(dpaux, DPAUX_INTR_AUX);
@@ -273,7 +270,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
273{ 270{
274 struct tegra_dpaux *dpaux; 271 struct tegra_dpaux *dpaux;
275 struct resource *regs; 272 struct resource *regs;
276 unsigned long value; 273 u32 value;
277 int err; 274 int err;
278 275
279 dpaux = devm_kzalloc(&pdev->dev, sizeof(*dpaux), GFP_KERNEL); 276 dpaux = devm_kzalloc(&pdev->dev, sizeof(*dpaux), GFP_KERNEL);
@@ -465,7 +462,7 @@ int tegra_dpaux_detach(struct tegra_dpaux *dpaux)
465 462
466enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux) 463enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux)
467{ 464{
468 unsigned long value; 465 u32 value;
469 466
470 value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT); 467 value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXSTAT);
471 468
@@ -477,7 +474,7 @@ enum drm_connector_status tegra_dpaux_detect(struct tegra_dpaux *dpaux)
477 474
478int tegra_dpaux_enable(struct tegra_dpaux *dpaux) 475int tegra_dpaux_enable(struct tegra_dpaux *dpaux)
479{ 476{
480 unsigned long value; 477 u32 value;
481 478
482 value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) | 479 value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
483 DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | 480 DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
@@ -495,7 +492,7 @@ int tegra_dpaux_enable(struct tegra_dpaux *dpaux)
495 492
496int tegra_dpaux_disable(struct tegra_dpaux *dpaux) 493int tegra_dpaux_disable(struct tegra_dpaux *dpaux)
497{ 494{
498 unsigned long value; 495 u32 value;
499 496
500 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); 497 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
501 value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; 498 value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index bfad15a913a0..427f50c6803c 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -124,14 +124,22 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
124 return -ENOMEM; 124 return -ENOMEM;
125 125
126 if (iommu_present(&platform_bus_type)) { 126 if (iommu_present(&platform_bus_type)) {
127 struct iommu_domain_geometry *geometry;
128 u64 start, end;
129
127 tegra->domain = iommu_domain_alloc(&platform_bus_type); 130 tegra->domain = iommu_domain_alloc(&platform_bus_type);
128 if (!tegra->domain) { 131 if (!tegra->domain) {
129 err = -ENOMEM; 132 err = -ENOMEM;
130 goto free; 133 goto free;
131 } 134 }
132 135
133 DRM_DEBUG("IOMMU context initialized\n"); 136 geometry = &tegra->domain->geometry;
134 drm_mm_init(&tegra->mm, 0, SZ_2G); 137 start = geometry->aperture_start;
138 end = geometry->aperture_end;
139
140 DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
141 start, end);
142 drm_mm_init(&tegra->mm, start, end - start + 1);
135 } 143 }
136 144
137 mutex_init(&tegra->clients_lock); 145 mutex_init(&tegra->clients_lock);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 1217272a51f2..01e16e146bfe 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -189,7 +189,6 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
189static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) 189static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
190{ 190{
191 struct scatterlist *s; 191 struct scatterlist *s;
192 struct sg_table *sgt;
193 unsigned int i; 192 unsigned int i;
194 193
195 bo->pages = drm_gem_get_pages(&bo->gem); 194 bo->pages = drm_gem_get_pages(&bo->gem);
@@ -198,36 +197,28 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
198 197
199 bo->num_pages = bo->gem.size >> PAGE_SHIFT; 198 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
200 199
201 sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 200 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
202 if (IS_ERR(sgt)) 201 if (IS_ERR(bo->sgt))
203 goto put_pages; 202 goto put_pages;
204 203
205 /* 204 /*
206 * Fake up the SG table so that dma_map_sg() can be used to flush the 205 * Fake up the SG table so that dma_sync_sg_for_device() can be used
207 * pages associated with it. Note that this relies on the fact that 206 * to flush the pages associated with it.
208 * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
209 * only cache maintenance.
210 * 207 *
211 * TODO: Replace this by drm_clflash_sg() once it can be implemented 208 * TODO: Replace this by drm_clflash_sg() once it can be implemented
212 * without relying on symbols that are not exported. 209 * without relying on symbols that are not exported.
213 */ 210 */
214 for_each_sg(sgt->sgl, s, sgt->nents, i) 211 for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
215 sg_dma_address(s) = sg_phys(s); 212 sg_dma_address(s) = sg_phys(s);
216 213
217 if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) 214 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
218 goto release_sgt; 215 DMA_TO_DEVICE);
219
220 bo->sgt = sgt;
221 216
222 return 0; 217 return 0;
223 218
224release_sgt:
225 sg_free_table(sgt);
226 kfree(sgt);
227 sgt = ERR_PTR(-ENOMEM);
228put_pages: 219put_pages:
229 drm_gem_put_pages(&bo->gem, bo->pages, false, false); 220 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
230 return PTR_ERR(sgt); 221 return PTR_ERR(bo->sgt);
231} 222}
232 223
233static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) 224static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 7a207ca547be..6394547cf67a 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -328,6 +328,8 @@ static int __init vgem_init(void)
328 goto out; 328 goto out;
329 } 329 }
330 330
331 drm_dev_set_unique(vgem_device, "vgem");
332
331 ret = drm_dev_register(vgem_device, 0); 333 ret = drm_dev_register(vgem_device, 0);
332 334
333 if (ret) 335 if (ret)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index f4ec816e9468..88a39165edd5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -37,6 +37,26 @@ int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
37 return 0; 37 return 0;
38} 38}
39 39
40static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
41{
42 struct apertures_struct *ap;
43 bool primary;
44
45 ap = alloc_apertures(1);
46 if (!ap)
47 return;
48
49 ap->ranges[0].base = pci_resource_start(pci_dev, 0);
50 ap->ranges[0].size = pci_resource_len(pci_dev, 0);
51
52 primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
53 & IORESOURCE_ROM_SHADOW;
54
55 remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
56
57 kfree(ap);
58}
59
40int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev) 60int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
41{ 61{
42 struct drm_device *dev; 62 struct drm_device *dev;
@@ -52,27 +72,11 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
52 struct pci_dev *pdev = to_pci_dev(vdev->dev.parent); 72 struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
53 bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; 73 bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
54 74
55 if (vga) { 75 DRM_INFO("pci: %s detected\n",
56 /* 76 vga ? "virtio-vga" : "virtio-gpu-pci");
57 * Need to make sure we don't have two drivers
58 * for the same hardware here. Some day we
59 * will simply kick out the firmware
60 * (vesa/efi) framebuffer.
61 *
62 * Virtual hardware specs for virtio-vga are
63 * not finalized yet, therefore we can't add
64 * code for that yet.
65 *
66 * So ignore the device for the time being,
67 * and suggest to the user use the device
68 * variant without vga compatibility mode.
69 */
70 DRM_ERROR("virtio-vga not (yet) supported\n");
71 DRM_ERROR("please use virtio-gpu-pci instead\n");
72 ret = -ENODEV;
73 goto err_free;
74 }
75 dev->pdev = pdev; 77 dev->pdev = pdev;
78 if (vga)
79 virtio_pci_kick_out_firmware_fb(pdev);
76 } 80 }
77 81
78 ret = drm_dev_register(dev, 0); 82 ret = drm_dev_register(dev, 0);
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index e5a2c092460b..6d4db2dba90b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -162,6 +162,7 @@ struct virtio_gpu_device {
162 struct virtio_gpu_queue ctrlq; 162 struct virtio_gpu_queue ctrlq;
163 struct virtio_gpu_queue cursorq; 163 struct virtio_gpu_queue cursorq;
164 struct list_head free_vbufs; 164 struct list_head free_vbufs;
165 spinlock_t free_vbufs_lock;
165 void *vbufs; 166 void *vbufs;
166 bool vqs_ready; 167 bool vqs_ready;
167 168
@@ -171,6 +172,7 @@ struct virtio_gpu_device {
171 wait_queue_head_t resp_wq; 172 wait_queue_head_t resp_wq;
172 /* current display info */ 173 /* current display info */
173 spinlock_t display_info_lock; 174 spinlock_t display_info_lock;
175 bool display_info_pending;
174 176
175 struct virtio_gpu_fence_driver fence_drv; 177 struct virtio_gpu_fence_driver fence_drv;
176 178
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 132405f15389..782766c00d70 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -137,9 +137,11 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
137 virtio_device_ready(vgdev->vdev); 137 virtio_device_ready(vgdev->vdev);
138 vgdev->vqs_ready = true; 138 vgdev->vqs_ready = true;
139 139
140 virtio_gpu_cmd_get_display_info(vgdev);
141 wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
142 5 * HZ);
140 if (virtio_gpu_fbdev) 143 if (virtio_gpu_fbdev)
141 virtio_gpu_fbdev_init(vgdev); 144 virtio_gpu_fbdev_init(vgdev);
142 virtio_gpu_cmd_get_display_info(vgdev);
143 145
144 return 0; 146 return 0;
145 147
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 8fa6513eb3bc..1698669f4185 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -79,6 +79,7 @@ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
79 void *ptr; 79 void *ptr;
80 80
81 INIT_LIST_HEAD(&vgdev->free_vbufs); 81 INIT_LIST_HEAD(&vgdev->free_vbufs);
82 spin_lock_init(&vgdev->free_vbufs_lock);
82 count += virtqueue_get_vring_size(vgdev->ctrlq.vq); 83 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
83 count += virtqueue_get_vring_size(vgdev->cursorq.vq); 84 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
84 size = count * VBUFFER_SIZE; 85 size = count * VBUFFER_SIZE;
@@ -106,6 +107,7 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
106 count += virtqueue_get_vring_size(vgdev->ctrlq.vq); 107 count += virtqueue_get_vring_size(vgdev->ctrlq.vq);
107 count += virtqueue_get_vring_size(vgdev->cursorq.vq); 108 count += virtqueue_get_vring_size(vgdev->cursorq.vq);
108 109
110 spin_lock(&vgdev->free_vbufs_lock);
109 for (i = 0; i < count; i++) { 111 for (i = 0; i < count; i++) {
110 if (WARN_ON(list_empty(&vgdev->free_vbufs))) 112 if (WARN_ON(list_empty(&vgdev->free_vbufs)))
111 return; 113 return;
@@ -113,6 +115,7 @@ void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
113 struct virtio_gpu_vbuffer, list); 115 struct virtio_gpu_vbuffer, list);
114 list_del(&vbuf->list); 116 list_del(&vbuf->list);
115 } 117 }
118 spin_unlock(&vgdev->free_vbufs_lock);
116 kfree(vgdev->vbufs); 119 kfree(vgdev->vbufs);
117} 120}
118 121
@@ -123,10 +126,12 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
123{ 126{
124 struct virtio_gpu_vbuffer *vbuf; 127 struct virtio_gpu_vbuffer *vbuf;
125 128
129 spin_lock(&vgdev->free_vbufs_lock);
126 BUG_ON(list_empty(&vgdev->free_vbufs)); 130 BUG_ON(list_empty(&vgdev->free_vbufs));
127 vbuf = list_first_entry(&vgdev->free_vbufs, 131 vbuf = list_first_entry(&vgdev->free_vbufs,
128 struct virtio_gpu_vbuffer, list); 132 struct virtio_gpu_vbuffer, list);
129 list_del(&vbuf->list); 133 list_del(&vbuf->list);
134 spin_unlock(&vgdev->free_vbufs_lock);
130 memset(vbuf, 0, VBUFFER_SIZE); 135 memset(vbuf, 0, VBUFFER_SIZE);
131 136
132 BUG_ON(size > MAX_INLINE_CMD_SIZE); 137 BUG_ON(size > MAX_INLINE_CMD_SIZE);
@@ -201,7 +206,9 @@ static void free_vbuf(struct virtio_gpu_device *vgdev,
201 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) 206 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
202 kfree(vbuf->resp_buf); 207 kfree(vbuf->resp_buf);
203 kfree(vbuf->data_buf); 208 kfree(vbuf->data_buf);
209 spin_lock(&vgdev->free_vbufs_lock);
204 list_add(&vbuf->list, &vgdev->free_vbufs); 210 list_add(&vbuf->list, &vgdev->free_vbufs);
211 spin_unlock(&vgdev->free_vbufs_lock);
205} 212}
206 213
207static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) 214static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
@@ -534,6 +541,7 @@ static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
534 } 541 }
535 } 542 }
536 543
544 vgdev->display_info_pending = false;
537 spin_unlock(&vgdev->display_info_lock); 545 spin_unlock(&vgdev->display_info_lock);
538 wake_up(&vgdev->resp_wq); 546 wake_up(&vgdev->resp_wq);
539 547
@@ -558,6 +566,7 @@ int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
558 resp_buf); 566 resp_buf);
559 memset(cmd_p, 0, sizeof(*cmd_p)); 567 memset(cmd_p, 0, sizeof(*cmd_p));
560 568
569 vgdev->display_info_pending = true;
561 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); 570 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
562 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); 571 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
563 return 0; 572 return 0;