aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-21 23:32:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-21 23:32:09 -0400
commit1cfea546b10c8fec218973c3f3c39ff797a3e50c (patch)
treed2aa389291efa4d5552baa2502706a8a95c30ca6
parent27db64f65f1be2f2ee741a1bf20d8d13d62c167f (diff)
parentf3294568bbb19cbfc53451de192df6daae80f9b3 (diff)
Merge tag 'drm-fixes-2018-06-22' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie: "Just run of the mill fixes, core: - regression fix in device unplug qxl: - regression fix for might sleep in cursor handling nouveau: - regression fix in multi-screen cursor handling amdgpu: - switch off DC by default on Kaveri and older - some minor fixes i915: - some GEM regression fixes - doublescan mode fixes sun4i: - revert fix for a regression sii8620 bridge: - misc fixes" * tag 'drm-fixes-2018-06-22' of git://anongit.freedesktop.org/drm/drm: (28 commits) drm/bridge/sii8620: fix display of packed pixel modes in MHL2 drm/amdgpu: Make amdgpu_vram_mgr_bo_invisible_size always accurate drm/amdgpu: Refactor amdgpu_vram_mgr_bo_invisible_size helper drm/amdgpu: Update pin_size values before unpinning BO drm/amdgpu:All UVD instances share one idle_work handle drm/amdgpu: Don't default to DC support for Kaveri and older drm/amdgpu: Use kvmalloc_array for allocating VRAM manager nodes array drm/amd/pp: Fix uninitialized variable drm/i915: Enable provoking vertex fix on Gen9 systems. drm/i915: Fix context ban and hang accounting for client drm/i915: Turn off g4x DP port in .post_disable() drm/i915: Disallow interlaced modes on g4x DP outputs drm/i915: Fix PIPESTAT irq ack on i965/g4x drm/i915: Allow DBLSCAN user modes with eDP/LVDS/DSI drm/i915/execlists: Avoid putting the error pointer drm/i915: Apply batch location restrictions before pinning drm/nouveau/kms/nv50-: cursors always use core channel vram ctxdma Revert "drm/sun4i: Handle DRM_BUS_FLAG_PIXDATA_*EDGE" drm/atmel-hlcdc: check stride values in the first plane drm/bridge/sii8620: fix HDMI cable connection to dongle ...
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c2
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c309
-rw-r--r--drivers/gpu/drm/drm_drv.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h21
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c57
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c49
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h5
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c16
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c13
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c25
31 files changed, 408 insertions, 337 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3317d1536f4f..6e5284e6c028 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
2158 switch (asic_type) { 2158 switch (asic_type) {
2159#if defined(CONFIG_DRM_AMD_DC) 2159#if defined(CONFIG_DRM_AMD_DC)
2160 case CHIP_BONAIRE: 2160 case CHIP_BONAIRE:
2161 case CHIP_HAWAII:
2162 case CHIP_KAVERI: 2161 case CHIP_KAVERI:
2163 case CHIP_KABINI: 2162 case CHIP_KABINI:
2164 case CHIP_MULLINS: 2163 case CHIP_MULLINS:
2164 /*
2165 * We have systems in the wild with these ASICs that require
2166 * LVDS and VGA support which is not supported with DC.
2167 *
2168 * Fallback to the non-DC driver here by default so as not to
2169 * cause regressions.
2170 */
2171 return amdgpu_dc > 0;
2172 case CHIP_HAWAII:
2165 case CHIP_CARRIZO: 2173 case CHIP_CARRIZO:
2166 case CHIP_STONEY: 2174 case CHIP_STONEY:
2167 case CHIP_POLARIS10: 2175 case CHIP_POLARIS10:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5e4e1bd90383..3526efa8960e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
762 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 762 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
763 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 763 if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
764 adev->vram_pin_size += amdgpu_bo_size(bo); 764 adev->vram_pin_size += amdgpu_bo_size(bo);
765 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 765 adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
766 adev->invisible_pin_size += amdgpu_bo_size(bo);
767 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 766 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
768 adev->gart_pin_size += amdgpu_bo_size(bo); 767 adev->gart_pin_size += amdgpu_bo_size(bo);
769 } 768 }
@@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
790 bo->pin_count--; 789 bo->pin_count--;
791 if (bo->pin_count) 790 if (bo->pin_count)
792 return 0; 791 return 0;
793 for (i = 0; i < bo->placement.num_placement; i++) {
794 bo->placements[i].lpfn = 0;
795 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
796 }
797 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
798 if (unlikely(r)) {
799 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
800 goto error;
801 }
802 792
803 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 793 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
804 adev->vram_pin_size -= amdgpu_bo_size(bo); 794 adev->vram_pin_size -= amdgpu_bo_size(bo);
805 if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 795 adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
806 adev->invisible_pin_size -= amdgpu_bo_size(bo);
807 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 796 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
808 adev->gart_pin_size -= amdgpu_bo_size(bo); 797 adev->gart_pin_size -= amdgpu_bo_size(bo);
809 } 798 }
810 799
811error: 800 for (i = 0; i < bo->placement.num_placement; i++) {
801 bo->placements[i].lpfn = 0;
802 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
803 }
804 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
805 if (unlikely(r))
806 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
807
812 return r; 808 return r;
813} 809}
814 810
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e969c879d87e..e5da4654b630 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
73uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); 73uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
74int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); 74int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
75 75
76u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
76uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); 77uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
77uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 78uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
78 79
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index bcf68f80bbf0..3ff08e326838 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
130 unsigned version_major, version_minor, family_id; 130 unsigned version_major, version_minor, family_id;
131 int i, j, r; 131 int i, j, r;
132 132
133 INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); 133 INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
134 134
135 switch (adev->asic_type) { 135 switch (adev->asic_type) {
136#ifdef CONFIG_DRM_AMDGPU_CIK 136#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
314 void *ptr; 314 void *ptr;
315 int i, j; 315 int i, j;
316 316
317 cancel_delayed_work_sync(&adev->uvd.idle_work);
318
317 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 319 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
318 if (adev->uvd.inst[j].vcpu_bo == NULL) 320 if (adev->uvd.inst[j].vcpu_bo == NULL)
319 continue; 321 continue;
320 322
321 cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
322
323 /* only valid for physical mode */ 323 /* only valid for physical mode */
324 if (adev->asic_type < CHIP_POLARIS10) { 324 if (adev->asic_type < CHIP_POLARIS10) {
325 for (i = 0; i < adev->uvd.max_handles; ++i) 325 for (i = 0; i < adev->uvd.max_handles; ++i)
@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
1145static void amdgpu_uvd_idle_work_handler(struct work_struct *work) 1145static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1146{ 1146{
1147 struct amdgpu_device *adev = 1147 struct amdgpu_device *adev =
1148 container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); 1148 container_of(work, struct amdgpu_device, uvd.idle_work.work);
1149 unsigned fences = 0, i, j; 1149 unsigned fences = 0, i, j;
1150 1150
1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1167 AMD_CG_STATE_GATE); 1167 AMD_CG_STATE_GATE);
1168 } 1168 }
1169 } else { 1169 } else {
1170 schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1170 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1171 } 1171 }
1172} 1172}
1173 1173
@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1179 if (amdgpu_sriov_vf(adev)) 1179 if (amdgpu_sriov_vf(adev))
1180 return; 1180 return;
1181 1181
1182 set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); 1182 set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1183 if (set_clocks) { 1183 if (set_clocks) {
1184 if (adev->pm.dpm_enabled) { 1184 if (adev->pm.dpm_enabled) {
1185 amdgpu_dpm_enable_uvd(adev, true); 1185 amdgpu_dpm_enable_uvd(adev, true);
@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1196void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1196void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1197{ 1197{
1198 if (!amdgpu_sriov_vf(ring->adev)) 1198 if (!amdgpu_sriov_vf(ring->adev))
1199 schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1199 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1200} 1200}
1201 1201
1202/** 1202/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index b1579fba134c..8b23a1b00c76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
44 void *saved_bo; 44 void *saved_bo;
45 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 45 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
46 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 46 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
47 struct delayed_work idle_work;
48 struct amdgpu_ring ring; 47 struct amdgpu_ring ring;
49 struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; 48 struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
50 struct amdgpu_irq_src irq; 49 struct amdgpu_irq_src irq;
@@ -62,6 +61,7 @@ struct amdgpu_uvd {
62 bool address_64_bit; 61 bool address_64_bit;
63 bool use_ctx_buf; 62 bool use_ctx_buf;
64 struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; 63 struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
64 struct delayed_work idle_work;
65}; 65};
66 66
67int amdgpu_uvd_sw_init(struct amdgpu_device *adev); 67int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 9aca653bec07..b6333f92ba45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,6 +97,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
97} 97}
98 98
99/** 99/**
100 * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
101 *
102 * @bo: &amdgpu_bo buffer object (must be in VRAM)
103 *
104 * Returns:
105 * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
106 */
107u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
108{
109 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
110 struct ttm_mem_reg *mem = &bo->tbo.mem;
111 struct drm_mm_node *nodes = mem->mm_node;
112 unsigned pages = mem->num_pages;
113 u64 usage = 0;
114
115 if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
116 return 0;
117
118 if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
119 return amdgpu_bo_size(bo);
120
121 while (nodes && pages) {
122 usage += nodes->size << PAGE_SHIFT;
123 usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
124 pages -= nodes->size;
125 ++nodes;
126 }
127
128 return usage;
129}
130
131/**
100 * amdgpu_vram_mgr_new - allocate new ranges 132 * amdgpu_vram_mgr_new - allocate new ranges
101 * 133 *
102 * @man: TTM memory type manager 134 * @man: TTM memory type manager
@@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
135 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 167 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
136 } 168 }
137 169
138 nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); 170 nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
171 GFP_KERNEL | __GFP_ZERO);
139 if (!nodes) 172 if (!nodes)
140 return -ENOMEM; 173 return -ENOMEM;
141 174
@@ -190,7 +223,7 @@ error:
190 drm_mm_remove_node(&nodes[i]); 223 drm_mm_remove_node(&nodes[i]);
191 spin_unlock(&mgr->lock); 224 spin_unlock(&mgr->lock);
192 225
193 kfree(nodes); 226 kvfree(nodes);
194 return r == -ENOSPC ? 0 : r; 227 return r == -ENOSPC ? 0 : r;
195} 228}
196 229
@@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
229 atomic64_sub(usage, &mgr->usage); 262 atomic64_sub(usage, &mgr->usage);
230 atomic64_sub(vis_usage, &mgr->vis_usage); 263 atomic64_sub(vis_usage, &mgr->vis_usage);
231 264
232 kfree(mem->mm_node); 265 kvfree(mem->mm_node);
233 mem->mm_node = NULL; 266 mem->mm_node = NULL;
234} 267}
235 268
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index dbe4b1f66784..22364875a943 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
1090static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) 1090static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
1091{ 1091{
1092 struct amdgpu_device *adev = hwmgr->adev; 1092 struct amdgpu_device *adev = hwmgr->adev;
1093 int result; 1093 int result = 0;
1094 uint32_t num_se = 0; 1094 uint32_t num_se = 0;
1095 uint32_t count, data; 1095 uint32_t count, data;
1096 1096
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 73c875db45f4..47e0992f3908 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -839,7 +839,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
839 return ret; 839 return ret;
840 } 840 }
841 841
842 if (desc->layout.xstride && desc->layout.pstride) { 842 if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
843 int ret; 843 int ret;
844 844
845 ret = drm_plane_create_rotation_property(&plane->base, 845 ret = drm_plane_create_rotation_property(&plane->base,
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 7ab36042a822..250effa0e6b8 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -36,8 +36,11 @@
36 36
37#define SII8620_BURST_BUF_LEN 288 37#define SII8620_BURST_BUF_LEN 288
38#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) 38#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
39#define MHL1_MAX_LCLK 225000 39
40#define MHL3_MAX_LCLK 600000 40#define MHL1_MAX_PCLK 75000
41#define MHL1_MAX_PCLK_PP_MODE 150000
42#define MHL3_MAX_PCLK 200000
43#define MHL3_MAX_PCLK_PP_MODE 300000
41 44
42enum sii8620_mode { 45enum sii8620_mode {
43 CM_DISCONNECTED, 46 CM_DISCONNECTED,
@@ -80,6 +83,9 @@ struct sii8620 {
80 u8 devcap[MHL_DCAP_SIZE]; 83 u8 devcap[MHL_DCAP_SIZE];
81 u8 xdevcap[MHL_XDC_SIZE]; 84 u8 xdevcap[MHL_XDC_SIZE];
82 u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; 85 u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
86 bool feature_complete;
87 bool devcap_read;
88 bool sink_detected;
83 struct edid *edid; 89 struct edid *edid;
84 unsigned int gen2_write_burst:1; 90 unsigned int gen2_write_burst:1;
85 enum sii8620_mt_state mt_state; 91 enum sii8620_mt_state mt_state;
@@ -476,7 +482,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
476 } 482 }
477} 483}
478 484
479static void sii8620_sink_detected(struct sii8620 *ctx, int ret) 485static void sii8620_identify_sink(struct sii8620 *ctx)
480{ 486{
481 static const char * const sink_str[] = { 487 static const char * const sink_str[] = {
482 [SINK_NONE] = "NONE", 488 [SINK_NONE] = "NONE",
@@ -487,7 +493,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
487 char sink_name[20]; 493 char sink_name[20];
488 struct device *dev = ctx->dev; 494 struct device *dev = ctx->dev;
489 495
490 if (ret < 0) 496 if (!ctx->sink_detected || !ctx->devcap_read)
491 return; 497 return;
492 498
493 sii8620_fetch_edid(ctx); 499 sii8620_fetch_edid(ctx);
@@ -496,6 +502,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
496 sii8620_mhl_disconnected(ctx); 502 sii8620_mhl_disconnected(ctx);
497 return; 503 return;
498 } 504 }
505 sii8620_set_upstream_edid(ctx);
499 506
500 if (drm_detect_hdmi_monitor(ctx->edid)) 507 if (drm_detect_hdmi_monitor(ctx->edid))
501 ctx->sink_type = SINK_HDMI; 508 ctx->sink_type = SINK_HDMI;
@@ -508,53 +515,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
508 sink_str[ctx->sink_type], sink_name); 515 sink_str[ctx->sink_type], sink_name);
509} 516}
510 517
511static void sii8620_hsic_init(struct sii8620 *ctx)
512{
513 if (!sii8620_is_mhl3(ctx))
514 return;
515
516 sii8620_write(ctx, REG_FCGC,
517 BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
518 sii8620_setbits(ctx, REG_HRXCTRL3,
519 BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
520 sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
521 sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
522 sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
523 sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
524 sii8620_write_seq_static(ctx,
525 REG_TDMLLCTL, 0,
526 REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
527 BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
528 REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
529 REG_HRXINTL, 0xff,
530 REG_HRXINTH, 0xff,
531 REG_TTXINTL, 0xff,
532 REG_TTXINTH, 0xff,
533 REG_TRXINTL, 0xff,
534 REG_TRXINTH, 0xff,
535 REG_HTXINTL, 0xff,
536 REG_HTXINTH, 0xff,
537 REG_FCINTR0, 0xff,
538 REG_FCINTR1, 0xff,
539 REG_FCINTR2, 0xff,
540 REG_FCINTR3, 0xff,
541 REG_FCINTR4, 0xff,
542 REG_FCINTR5, 0xff,
543 REG_FCINTR6, 0xff,
544 REG_FCINTR7, 0xff
545 );
546}
547
548static void sii8620_edid_read(struct sii8620 *ctx, int ret)
549{
550 if (ret < 0)
551 return;
552
553 sii8620_set_upstream_edid(ctx);
554 sii8620_hsic_init(ctx);
555 sii8620_enable_hpd(ctx);
556}
557
558static void sii8620_mr_devcap(struct sii8620 *ctx) 518static void sii8620_mr_devcap(struct sii8620 *ctx)
559{ 519{
560 u8 dcap[MHL_DCAP_SIZE]; 520 u8 dcap[MHL_DCAP_SIZE];
@@ -570,6 +530,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
570 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L], 530 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
571 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]); 531 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
572 sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); 532 sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
533 ctx->devcap_read = true;
534 sii8620_identify_sink(ctx);
573} 535}
574 536
575static void sii8620_mr_xdevcap(struct sii8620 *ctx) 537static void sii8620_mr_xdevcap(struct sii8620 *ctx)
@@ -807,6 +769,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx)
807static void sii8620_fetch_edid(struct sii8620 *ctx) 769static void sii8620_fetch_edid(struct sii8620 *ctx)
808{ 770{
809 u8 lm_ddc, ddc_cmd, int3, cbus; 771 u8 lm_ddc, ddc_cmd, int3, cbus;
772 unsigned long timeout;
810 int fetched, i; 773 int fetched, i;
811 int edid_len = EDID_LENGTH; 774 int edid_len = EDID_LENGTH;
812 u8 *edid; 775 u8 *edid;
@@ -856,23 +819,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
856 REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 819 REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
857 ); 820 );
858 821
859 do { 822 int3 = 0;
860 int3 = sii8620_readb(ctx, REG_INTR3); 823 timeout = jiffies + msecs_to_jiffies(200);
824 for (;;) {
861 cbus = sii8620_readb(ctx, REG_CBUS_STATUS); 825 cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
862 826 if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
863 if (int3 & BIT_DDC_CMD_DONE) 827 kfree(edid);
864 break; 828 edid = NULL;
865 829 goto end;
866 if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { 830 }
831 if (int3 & BIT_DDC_CMD_DONE) {
832 if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
833 >= FETCH_SIZE)
834 break;
835 } else {
836 int3 = sii8620_readb(ctx, REG_INTR3);
837 }
838 if (time_is_before_jiffies(timeout)) {
839 ctx->error = -ETIMEDOUT;
840 dev_err(ctx->dev, "timeout during EDID read\n");
867 kfree(edid); 841 kfree(edid);
868 edid = NULL; 842 edid = NULL;
869 goto end; 843 goto end;
870 } 844 }
871 } while (1);
872
873 sii8620_readb(ctx, REG_DDC_STATUS);
874 while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
875 usleep_range(10, 20); 845 usleep_range(10, 20);
846 }
876 847
877 sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); 848 sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
878 if (fetched + FETCH_SIZE == EDID_LENGTH) { 849 if (fetched + FETCH_SIZE == EDID_LENGTH) {
@@ -971,8 +942,17 @@ static int sii8620_hw_on(struct sii8620 *ctx)
971 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 942 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
972 if (ret) 943 if (ret)
973 return ret; 944 return ret;
945
974 usleep_range(10000, 20000); 946 usleep_range(10000, 20000);
975 return clk_prepare_enable(ctx->clk_xtal); 947 ret = clk_prepare_enable(ctx->clk_xtal);
948 if (ret)
949 return ret;
950
951 msleep(100);
952 gpiod_set_value(ctx->gpio_reset, 0);
953 msleep(100);
954
955 return 0;
976} 956}
977 957
978static int sii8620_hw_off(struct sii8620 *ctx) 958static int sii8620_hw_off(struct sii8620 *ctx)
@@ -982,17 +962,6 @@ static int sii8620_hw_off(struct sii8620 *ctx)
982 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 962 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
983} 963}
984 964
985static void sii8620_hw_reset(struct sii8620 *ctx)
986{
987 usleep_range(10000, 20000);
988 gpiod_set_value(ctx->gpio_reset, 0);
989 usleep_range(5000, 20000);
990 gpiod_set_value(ctx->gpio_reset, 1);
991 usleep_range(10000, 20000);
992 gpiod_set_value(ctx->gpio_reset, 0);
993 msleep(300);
994}
995
996static void sii8620_cbus_reset(struct sii8620 *ctx) 965static void sii8620_cbus_reset(struct sii8620 *ctx)
997{ 966{
998 sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST 967 sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
@@ -1048,20 +1017,11 @@ static void sii8620_stop_video(struct sii8620 *ctx)
1048 1017
1049static void sii8620_set_format(struct sii8620 *ctx) 1018static void sii8620_set_format(struct sii8620 *ctx)
1050{ 1019{
1051 u8 out_fmt;
1052
1053 if (sii8620_is_mhl3(ctx)) { 1020 if (sii8620_is_mhl3(ctx)) {
1054 sii8620_setbits(ctx, REG_M3_P0CTRL, 1021 sii8620_setbits(ctx, REG_M3_P0CTRL,
1055 BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, 1022 BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
1056 ctx->use_packed_pixel ? ~0 : 0); 1023 ctx->use_packed_pixel ? ~0 : 0);
1057 } else { 1024 } else {
1058 if (ctx->use_packed_pixel)
1059 sii8620_write_seq_static(ctx,
1060 REG_VID_MODE, BIT_VID_MODE_M1080P,
1061 REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
1062 REG_MHLTX_CTL6, 0x60
1063 );
1064 else
1065 sii8620_write_seq_static(ctx, 1025 sii8620_write_seq_static(ctx,
1066 REG_VID_MODE, 0, 1026 REG_VID_MODE, 0,
1067 REG_MHL_TOP_CTL, 1, 1027 REG_MHL_TOP_CTL, 1,
@@ -1069,15 +1029,9 @@ static void sii8620_set_format(struct sii8620 *ctx)
1069 ); 1029 );
1070 } 1030 }
1071 1031
1072 if (ctx->use_packed_pixel)
1073 out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
1074 BIT_TPI_OUTPUT_CSCMODE709;
1075 else
1076 out_fmt = VAL_TPI_FORMAT(RGB, FULL);
1077
1078 sii8620_write_seq(ctx, 1032 sii8620_write_seq(ctx,
1079 REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), 1033 REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
1080 REG_TPI_OUTPUT, out_fmt, 1034 REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
1081 ); 1035 );
1082} 1036}
1083 1037
@@ -1216,7 +1170,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
1216 int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); 1170 int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
1217 int i; 1171 int i;
1218 1172
1219 for (i = 0; i < ARRAY_SIZE(clk_spec); ++i) 1173 for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
1220 if (clk < clk_spec[i].max_clk) 1174 if (clk < clk_spec[i].max_clk)
1221 break; 1175 break;
1222 1176
@@ -1534,6 +1488,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
1534 ); 1488 );
1535} 1489}
1536 1490
1491static void sii8620_hpd_unplugged(struct sii8620 *ctx)
1492{
1493 sii8620_disable_hpd(ctx);
1494 ctx->sink_type = SINK_NONE;
1495 ctx->sink_detected = false;
1496 ctx->feature_complete = false;
1497 kfree(ctx->edid);
1498 ctx->edid = NULL;
1499}
1500
1537static void sii8620_disconnect(struct sii8620 *ctx) 1501static void sii8620_disconnect(struct sii8620 *ctx)
1538{ 1502{
1539 sii8620_disable_gen2_write_burst(ctx); 1503 sii8620_disable_gen2_write_burst(ctx);
@@ -1561,7 +1525,7 @@ static void sii8620_disconnect(struct sii8620 *ctx)
1561 REG_MHL_DP_CTL6, 0x2A, 1525 REG_MHL_DP_CTL6, 0x2A,
1562 REG_MHL_DP_CTL7, 0x03 1526 REG_MHL_DP_CTL7, 0x03
1563 ); 1527 );
1564 sii8620_disable_hpd(ctx); 1528 sii8620_hpd_unplugged(ctx);
1565 sii8620_write_seq_static(ctx, 1529 sii8620_write_seq_static(ctx,
1566 REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, 1530 REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
1567 REG_MHL_COC_CTL1, 0x07, 1531 REG_MHL_COC_CTL1, 0x07,
@@ -1609,10 +1573,8 @@ static void sii8620_disconnect(struct sii8620 *ctx)
1609 memset(ctx->xstat, 0, sizeof(ctx->xstat)); 1573 memset(ctx->xstat, 0, sizeof(ctx->xstat));
1610 memset(ctx->devcap, 0, sizeof(ctx->devcap)); 1574 memset(ctx->devcap, 0, sizeof(ctx->devcap));
1611 memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); 1575 memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
1576 ctx->devcap_read = false;
1612 ctx->cbus_status = 0; 1577 ctx->cbus_status = 0;
1613 ctx->sink_type = SINK_NONE;
1614 kfree(ctx->edid);
1615 ctx->edid = NULL;
1616 sii8620_mt_cleanup(ctx); 1578 sii8620_mt_cleanup(ctx);
1617} 1579}
1618 1580
@@ -1703,9 +1665,6 @@ static void sii8620_status_changed_path(struct sii8620 *ctx)
1703 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1665 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
1704 MHL_DST_LM_CLK_MODE_NORMAL 1666 MHL_DST_LM_CLK_MODE_NORMAL
1705 | MHL_DST_LM_PATH_ENABLED); 1667 | MHL_DST_LM_PATH_ENABLED);
1706 if (!sii8620_is_mhl3(ctx))
1707 sii8620_mt_read_devcap(ctx, false);
1708 sii8620_mt_set_cont(ctx, sii8620_sink_detected);
1709 } else { 1668 } else {
1710 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1669 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
1711 MHL_DST_LM_CLK_MODE_NORMAL); 1670 MHL_DST_LM_CLK_MODE_NORMAL);
@@ -1722,9 +1681,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
1722 sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); 1681 sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
1723 sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); 1682 sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
1724 1683
1725 if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) 1684 if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] &
1685 MHL_DST_CONN_DCAP_RDY) {
1726 sii8620_status_dcap_ready(ctx); 1686 sii8620_status_dcap_ready(ctx);
1727 1687
1688 if (!sii8620_is_mhl3(ctx))
1689 sii8620_mt_read_devcap(ctx, false);
1690 }
1691
1728 if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) 1692 if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
1729 sii8620_status_changed_path(ctx); 1693 sii8620_status_changed_path(ctx);
1730} 1694}
@@ -1808,8 +1772,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
1808 } 1772 }
1809 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ) 1773 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
1810 sii8620_send_features(ctx); 1774 sii8620_send_features(ctx);
1811 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) 1775 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) {
1812 sii8620_edid_read(ctx, 0); 1776 ctx->feature_complete = true;
1777 if (ctx->edid)
1778 sii8620_enable_hpd(ctx);
1779 }
1813} 1780}
1814 1781
1815static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) 1782static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1884,6 +1851,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx)
1884 if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) 1851 if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
1885 sii8620_msc_mr_write_stat(ctx); 1852 sii8620_msc_mr_write_stat(ctx);
1886 1853
1854 if (stat & BIT_CBUS_HPD_CHG) {
1855 if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) {
1856 ctx->sink_detected = true;
1857 sii8620_identify_sink(ctx);
1858 } else {
1859 sii8620_hpd_unplugged(ctx);
1860 }
1861 }
1862
1887 if (stat & BIT_CBUS_MSC_MR_SET_INT) 1863 if (stat & BIT_CBUS_MSC_MR_SET_INT)
1888 sii8620_msc_mr_set_int(ctx); 1864 sii8620_msc_mr_set_int(ctx);
1889 1865
@@ -1931,14 +1907,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx)
1931 ctx->mt_state = MT_STATE_DONE; 1907 ctx->mt_state = MT_STATE_DONE;
1932} 1908}
1933 1909
1934static void sii8620_scdt_high(struct sii8620 *ctx)
1935{
1936 sii8620_write_seq_static(ctx,
1937 REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
1938 REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
1939 );
1940}
1941
1942static void sii8620_irq_scdt(struct sii8620 *ctx) 1910static void sii8620_irq_scdt(struct sii8620 *ctx)
1943{ 1911{
1944 u8 stat = sii8620_readb(ctx, REG_INTR5); 1912 u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1946,53 +1914,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
1946 if (stat & BIT_INTR_SCDT_CHANGE) { 1914 if (stat & BIT_INTR_SCDT_CHANGE) {
1947 u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); 1915 u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
1948 1916
1949 if (cstat & BIT_TMDS_CSTAT_P3_SCDT) { 1917 if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
1950 if (ctx->sink_type == SINK_HDMI) 1918 sii8620_start_video(ctx);
1951 /* enable infoframe interrupt */
1952 sii8620_scdt_high(ctx);
1953 else
1954 sii8620_start_video(ctx);
1955 }
1956 } 1919 }
1957 1920
1958 sii8620_write(ctx, REG_INTR5, stat); 1921 sii8620_write(ctx, REG_INTR5, stat);
1959} 1922}
1960 1923
1961static void sii8620_new_vsi(struct sii8620 *ctx)
1962{
1963 u8 vsif[11];
1964
1965 sii8620_write(ctx, REG_RX_HDMI_CTRL2,
1966 VAL_RX_HDMI_CTRL2_DEFVAL |
1967 BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
1968 sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
1969 ARRAY_SIZE(vsif));
1970}
1971
1972static void sii8620_new_avi(struct sii8620 *ctx)
1973{
1974 sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
1975 sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
1976 ARRAY_SIZE(ctx->avif));
1977}
1978
1979static void sii8620_irq_infr(struct sii8620 *ctx)
1980{
1981 u8 stat = sii8620_readb(ctx, REG_INTR8)
1982 & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
1983
1984 sii8620_write(ctx, REG_INTR8, stat);
1985
1986 if (stat & BIT_CEA_NEW_VSI)
1987 sii8620_new_vsi(ctx);
1988
1989 if (stat & BIT_CEA_NEW_AVI)
1990 sii8620_new_avi(ctx);
1991
1992 if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
1993 sii8620_start_video(ctx);
1994}
1995
1996static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret) 1924static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
1997{ 1925{
1998 if (ret < 0) 1926 if (ret < 0)
@@ -2043,11 +1971,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx)
2043 1971
2044 if (stat & BIT_DDC_CMD_DONE) { 1972 if (stat & BIT_DDC_CMD_DONE) {
2045 sii8620_write(ctx, REG_INTR3_MASK, 0); 1973 sii8620_write(ctx, REG_INTR3_MASK, 0);
2046 if (sii8620_is_mhl3(ctx)) 1974 if (sii8620_is_mhl3(ctx) && !ctx->feature_complete)
2047 sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), 1975 sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
2048 MHL_INT_RC_FEAT_REQ); 1976 MHL_INT_RC_FEAT_REQ);
2049 else 1977 else
2050 sii8620_edid_read(ctx, 0); 1978 sii8620_enable_hpd(ctx);
2051 } 1979 }
2052 sii8620_write(ctx, REG_INTR3, stat); 1980 sii8620_write(ctx, REG_INTR3, stat);
2053} 1981}
@@ -2074,7 +2002,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
2074 { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, 2002 { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
2075 { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc }, 2003 { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
2076 { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, 2004 { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
2077 { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
2078 }; 2005 };
2079 struct sii8620 *ctx = data; 2006 struct sii8620 *ctx = data;
2080 u8 stats[LEN_FAST_INTR_STAT]; 2007 u8 stats[LEN_FAST_INTR_STAT];
@@ -2112,7 +2039,6 @@ static void sii8620_cable_in(struct sii8620 *ctx)
2112 dev_err(dev, "Error powering on, %d.\n", ret); 2039 dev_err(dev, "Error powering on, %d.\n", ret);
2113 return; 2040 return;
2114 } 2041 }
2115 sii8620_hw_reset(ctx);
2116 2042
2117 sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); 2043 sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
2118 ret = sii8620_clear_error(ctx); 2044 ret = sii8620_clear_error(ctx);
@@ -2268,17 +2194,43 @@ static void sii8620_detach(struct drm_bridge *bridge)
2268 rc_unregister_device(ctx->rc_dev); 2194 rc_unregister_device(ctx->rc_dev);
2269} 2195}
2270 2196
2197static int sii8620_is_packing_required(struct sii8620 *ctx,
2198 const struct drm_display_mode *mode)
2199{
2200 int max_pclk, max_pclk_pp_mode;
2201
2202 if (sii8620_is_mhl3(ctx)) {
2203 max_pclk = MHL3_MAX_PCLK;
2204 max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE;
2205 } else {
2206 max_pclk = MHL1_MAX_PCLK;
2207 max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE;
2208 }
2209
2210 if (mode->clock < max_pclk)
2211 return 0;
2212 else if (mode->clock < max_pclk_pp_mode)
2213 return 1;
2214 else
2215 return -1;
2216}
2217
2271static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, 2218static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
2272 const struct drm_display_mode *mode) 2219 const struct drm_display_mode *mode)
2273{ 2220{
2274 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2221 struct sii8620 *ctx = bridge_to_sii8620(bridge);
2222 int pack_required = sii8620_is_packing_required(ctx, mode);
2275 bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] & 2223 bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
2276 MHL_DCAP_VID_LINK_PPIXEL; 2224 MHL_DCAP_VID_LINK_PPIXEL;
2277 unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
2278 MHL1_MAX_LCLK;
2279 max_pclk /= can_pack ? 2 : 3;
2280 2225
2281 return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK; 2226 switch (pack_required) {
2227 case 0:
2228 return MODE_OK;
2229 case 1:
2230 return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH;
2231 default:
2232 return MODE_CLOCK_HIGH;
2233 }
2282} 2234}
2283 2235
2284static bool sii8620_mode_fixup(struct drm_bridge *bridge, 2236static bool sii8620_mode_fixup(struct drm_bridge *bridge,
@@ -2286,43 +2238,16 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
2286 struct drm_display_mode *adjusted_mode) 2238 struct drm_display_mode *adjusted_mode)
2287{ 2239{
2288 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2240 struct sii8620 *ctx = bridge_to_sii8620(bridge);
2289 int max_lclk;
2290 bool ret = true;
2291 2241
2292 mutex_lock(&ctx->lock); 2242 mutex_lock(&ctx->lock);
2293 2243
2294 max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK; 2244 ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
2295 if (max_lclk > 3 * adjusted_mode->clock) { 2245 ctx->video_code = drm_match_cea_mode(adjusted_mode);
2296 ctx->use_packed_pixel = 0; 2246 ctx->pixel_clock = adjusted_mode->clock;
2297 goto end; 2247
2298 }
2299 if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
2300 max_lclk > 2 * adjusted_mode->clock) {
2301 ctx->use_packed_pixel = 1;
2302 goto end;
2303 }
2304 ret = false;
2305end:
2306 if (ret) {
2307 u8 vic = drm_match_cea_mode(adjusted_mode);
2308
2309 if (!vic) {
2310 union hdmi_infoframe frm;
2311 u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
2312
2313 /* FIXME: We need the connector here */
2314 drm_hdmi_vendor_infoframe_from_display_mode(
2315 &frm.vendor.hdmi, NULL, adjusted_mode);
2316 vic = frm.vendor.hdmi.vic;
2317 if (vic >= ARRAY_SIZE(mhl_vic))
2318 vic = 0;
2319 vic = mhl_vic[vic];
2320 }
2321 ctx->video_code = vic;
2322 ctx->pixel_clock = adjusted_mode->clock;
2323 }
2324 mutex_unlock(&ctx->lock); 2248 mutex_unlock(&ctx->lock);
2325 return ret; 2249
2250 return true;
2326} 2251}
2327 2252
2328static const struct drm_bridge_funcs sii8620_bridge_funcs = { 2253static const struct drm_bridge_funcs sii8620_bridge_funcs = {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b553a6f2ff0e..7af748ed1c58 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -369,13 +369,6 @@ EXPORT_SYMBOL(drm_dev_exit);
369 */ 369 */
370void drm_dev_unplug(struct drm_device *dev) 370void drm_dev_unplug(struct drm_device *dev)
371{ 371{
372 drm_dev_unregister(dev);
373
374 mutex_lock(&drm_global_mutex);
375 if (dev->open_count == 0)
376 drm_dev_put(dev);
377 mutex_unlock(&drm_global_mutex);
378
379 /* 372 /*
380 * After synchronizing any critical read section is guaranteed to see 373 * After synchronizing any critical read section is guaranteed to see
381 * the new value of ->unplugged, and any critical section which might 374 * the new value of ->unplugged, and any critical section which might
@@ -384,6 +377,13 @@ void drm_dev_unplug(struct drm_device *dev)
384 */ 377 */
385 dev->unplugged = true; 378 dev->unplugged = true;
386 synchronize_srcu(&drm_unplug_srcu); 379 synchronize_srcu(&drm_unplug_srcu);
380
381 drm_dev_unregister(dev);
382
383 mutex_lock(&drm_global_mutex);
384 if (dev->open_count == 0)
385 drm_dev_put(dev);
386 mutex_unlock(&drm_global_mutex);
387} 387}
388EXPORT_SYMBOL(drm_dev_unplug); 388EXPORT_SYMBOL(drm_dev_unplug);
389 389
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 34c125e2d90c..7014a96546f4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -340,14 +340,21 @@ struct drm_i915_file_private {
340 340
341 unsigned int bsd_engine; 341 unsigned int bsd_engine;
342 342
343/* Client can have a maximum of 3 contexts banned before 343/*
344 * it is denied of creating new contexts. As one context 344 * Every context ban increments per client ban score. Also
345 * ban needs 4 consecutive hangs, and more if there is 345 * hangs in short succession increments ban score. If ban threshold
346 * progress in between, this is a last resort stop gap measure 346 * is reached, client is considered banned and submitting more work
347 * to limit the badly behaving clients access to gpu. 347 * will fail. This is a stop gap measure to limit the badly behaving
348 * clients access to gpu. Note that unbannable contexts never increment
349 * the client ban score.
348 */ 350 */
349#define I915_MAX_CLIENT_CONTEXT_BANS 3 351#define I915_CLIENT_SCORE_HANG_FAST 1
350 atomic_t context_bans; 352#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
353#define I915_CLIENT_SCORE_CONTEXT_BAN 3
354#define I915_CLIENT_SCORE_BANNED 9
355 /** ban_score: Accumulated score of all ctx bans and fast hangs. */
356 atomic_t ban_score;
357 unsigned long hang_timestamp;
351}; 358};
352 359
353/* Interface history: 360/* Interface history:
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3704f4c0c2c9..d44ad7bc1e94 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2933,32 +2933,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2933 return 0; 2933 return 0;
2934} 2934}
2935 2935
2936static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
2937 const struct i915_gem_context *ctx)
2938{
2939 unsigned int score;
2940 unsigned long prev_hang;
2941
2942 if (i915_gem_context_is_banned(ctx))
2943 score = I915_CLIENT_SCORE_CONTEXT_BAN;
2944 else
2945 score = 0;
2946
2947 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
2948 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
2949 score += I915_CLIENT_SCORE_HANG_FAST;
2950
2951 if (score) {
2952 atomic_add(score, &file_priv->ban_score);
2953
2954 DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
2955 ctx->name, score,
2956 atomic_read(&file_priv->ban_score));
2957 }
2958}
2959
2936static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 2960static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2937{ 2961{
2938 bool banned; 2962 unsigned int score;
2963 bool banned, bannable;
2939 2964
2940 atomic_inc(&ctx->guilty_count); 2965 atomic_inc(&ctx->guilty_count);
2941 2966
2942 banned = false; 2967 bannable = i915_gem_context_is_bannable(ctx);
2943 if (i915_gem_context_is_bannable(ctx)) { 2968 score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
2944 unsigned int score; 2969 banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
2945 2970
2946 score = atomic_add_return(CONTEXT_SCORE_GUILTY, 2971 DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
2947 &ctx->ban_score); 2972 ctx->name, atomic_read(&ctx->guilty_count),
2948 banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; 2973 score, yesno(banned && bannable));
2949 2974
2950 DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", 2975 /* Cool contexts don't accumulate client ban score */
2951 ctx->name, score, yesno(banned)); 2976 if (!bannable)
2952 }
2953 if (!banned)
2954 return; 2977 return;
2955 2978
2956 i915_gem_context_set_banned(ctx); 2979 if (banned)
2957 if (!IS_ERR_OR_NULL(ctx->file_priv)) { 2980 i915_gem_context_set_banned(ctx);
2958 atomic_inc(&ctx->file_priv->context_bans); 2981
2959 DRM_DEBUG_DRIVER("client %s has had %d context banned\n", 2982 if (!IS_ERR_OR_NULL(ctx->file_priv))
2960 ctx->name, atomic_read(&ctx->file_priv->context_bans)); 2983 i915_gem_client_mark_guilty(ctx->file_priv, ctx);
2961 }
2962} 2984}
2963 2985
2964static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) 2986static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@@ -5736,6 +5758,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5736 INIT_LIST_HEAD(&file_priv->mm.request_list); 5758 INIT_LIST_HEAD(&file_priv->mm.request_list);
5737 5759
5738 file_priv->bsd_engine = -1; 5760 file_priv->bsd_engine = -1;
5761 file_priv->hang_timestamp = jiffies;
5739 5762
5740 ret = i915_gem_context_open(i915, file); 5763 ret = i915_gem_context_open(i915, file);
5741 if (ret) 5764 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 33f8a4b3c981..060335d3d9e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -652,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
652 652
653static bool client_is_banned(struct drm_i915_file_private *file_priv) 653static bool client_is_banned(struct drm_i915_file_private *file_priv)
654{ 654{
655 return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS; 655 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
656} 656}
657 657
658int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 658int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f627a8c47c58..22df17c8ca9b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -489,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
489} 489}
490 490
491static int 491static int
492eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) 492eb_add_vma(struct i915_execbuffer *eb,
493 unsigned int i, unsigned batch_idx,
494 struct i915_vma *vma)
493{ 495{
494 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 496 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
495 int err; 497 int err;
@@ -522,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
522 eb->flags[i] = entry->flags; 524 eb->flags[i] = entry->flags;
523 vma->exec_flags = &eb->flags[i]; 525 vma->exec_flags = &eb->flags[i];
524 526
527 /*
528 * SNA is doing fancy tricks with compressing batch buffers, which leads
529 * to negative relocation deltas. Usually that works out ok since the
530 * relocate address is still positive, except when the batch is placed
531 * very low in the GTT. Ensure this doesn't happen.
532 *
533 * Note that actual hangs have only been observed on gen7, but for
534 * paranoia do it everywhere.
535 */
536 if (i == batch_idx) {
537 if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
538 eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
539 if (eb->reloc_cache.has_fence)
540 eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
541
542 eb->batch = vma;
543 }
544
525 err = 0; 545 err = 0;
526 if (eb_pin_vma(eb, entry, vma)) { 546 if (eb_pin_vma(eb, entry, vma)) {
527 if (entry->offset != vma->node.start) { 547 if (entry->offset != vma->node.start) {
@@ -716,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
716{ 736{
717 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; 737 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
718 struct drm_i915_gem_object *obj; 738 struct drm_i915_gem_object *obj;
719 unsigned int i; 739 unsigned int i, batch;
720 int err; 740 int err;
721 741
722 if (unlikely(i915_gem_context_is_closed(eb->ctx))) 742 if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -728,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
728 INIT_LIST_HEAD(&eb->relocs); 748 INIT_LIST_HEAD(&eb->relocs);
729 INIT_LIST_HEAD(&eb->unbound); 749 INIT_LIST_HEAD(&eb->unbound);
730 750
751 batch = eb_batch_index(eb);
752
731 for (i = 0; i < eb->buffer_count; i++) { 753 for (i = 0; i < eb->buffer_count; i++) {
732 u32 handle = eb->exec[i].handle; 754 u32 handle = eb->exec[i].handle;
733 struct i915_lut_handle *lut; 755 struct i915_lut_handle *lut;
@@ -770,33 +792,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
770 lut->handle = handle; 792 lut->handle = handle;
771 793
772add_vma: 794add_vma:
773 err = eb_add_vma(eb, i, vma); 795 err = eb_add_vma(eb, i, batch, vma);
774 if (unlikely(err)) 796 if (unlikely(err))
775 goto err_vma; 797 goto err_vma;
776 798
777 GEM_BUG_ON(vma != eb->vma[i]); 799 GEM_BUG_ON(vma != eb->vma[i]);
778 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); 800 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
801 GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
802 eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
779 } 803 }
780 804
781 /* take note of the batch buffer before we might reorder the lists */
782 i = eb_batch_index(eb);
783 eb->batch = eb->vma[i];
784 GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
785
786 /*
787 * SNA is doing fancy tricks with compressing batch buffers, which leads
788 * to negative relocation deltas. Usually that works out ok since the
789 * relocate address is still positive, except when the batch is placed
790 * very low in the GTT. Ensure this doesn't happen.
791 *
792 * Note that actual hangs have only been observed on gen7, but for
793 * paranoia do it everywhere.
794 */
795 if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
796 eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
797 if (eb->reloc_cache.has_fence)
798 eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
799
800 eb->args->flags |= __EXEC_VALIDATED; 805 eb->args->flags |= __EXEC_VALIDATED;
801 return eb_reserve(eb); 806 return eb_reserve(eb);
802 807
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f9bc3aaa90d0..4a02747ac658 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1893,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1893 1893
1894 /* 1894 /*
1895 * Clear the PIPE*STAT regs before the IIR 1895 * Clear the PIPE*STAT regs before the IIR
1896 *
1897 * Toggle the enable bits to make sure we get an
1898 * edge in the ISR pipe event bit if we don't clear
1899 * all the enabled status bits. Otherwise the edge
1900 * triggered IIR on i965/g4x wouldn't notice that
1901 * an interrupt is still pending.
1896 */ 1902 */
1897 if (pipe_stats[pipe]) 1903 if (pipe_stats[pipe]) {
1898 I915_WRITE(reg, enable_mask | pipe_stats[pipe]); 1904 I915_WRITE(reg, pipe_stats[pipe]);
1905 I915_WRITE(reg, enable_mask);
1906 }
1899 } 1907 }
1900 spin_unlock(&dev_priv->irq_lock); 1908 spin_unlock(&dev_priv->irq_lock);
1901} 1909}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f11bb213ec07..7720569f2024 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2425,12 +2425,17 @@ enum i915_power_well_id {
2425#define _3D_CHICKEN _MMIO(0x2084) 2425#define _3D_CHICKEN _MMIO(0x2084)
2426#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) 2426#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
2427#define _3D_CHICKEN2 _MMIO(0x208c) 2427#define _3D_CHICKEN2 _MMIO(0x208c)
2428
2429#define FF_SLICE_CHICKEN _MMIO(0x2088)
2430#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
2431
2428/* Disables pipelining of read flushes past the SF-WIZ interface. 2432/* Disables pipelining of read flushes past the SF-WIZ interface.
2429 * Required on all Ironlake steppings according to the B-Spec, but the 2433 * Required on all Ironlake steppings according to the B-Spec, but the
2430 * particular danger of not doing so is not specified. 2434 * particular danger of not doing so is not specified.
2431 */ 2435 */
2432# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 2436# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
2433#define _3D_CHICKEN3 _MMIO(0x2090) 2437#define _3D_CHICKEN3 _MMIO(0x2090)
2438#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
2434#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 2439#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
2435#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) 2440#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
2436#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 2441#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index de0e22322c76..072b326d5ee0 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
304 int max_dotclk = dev_priv->max_dotclk_freq; 304 int max_dotclk = dev_priv->max_dotclk_freq;
305 int max_clock; 305 int max_clock;
306 306
307 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
308 return MODE_NO_DBLESCAN;
309
307 if (mode->clock < 25000) 310 if (mode->clock < 25000)
308 return MODE_CLOCK_LOW; 311 return MODE_CLOCK_LOW;
309 312
@@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
337 struct intel_crtc_state *pipe_config, 340 struct intel_crtc_state *pipe_config,
338 struct drm_connector_state *conn_state) 341 struct drm_connector_state *conn_state)
339{ 342{
343 struct drm_display_mode *adjusted_mode =
344 &pipe_config->base.adjusted_mode;
345
346 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
347 return false;
348
340 return true; 349 return true;
341} 350}
342 351
@@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
344 struct intel_crtc_state *pipe_config, 353 struct intel_crtc_state *pipe_config,
345 struct drm_connector_state *conn_state) 354 struct drm_connector_state *conn_state)
346{ 355{
356 struct drm_display_mode *adjusted_mode =
357 &pipe_config->base.adjusted_mode;
358
359 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
360 return false;
361
347 pipe_config->has_pch_encoder = true; 362 pipe_config->has_pch_encoder = true;
348 363
349 return true; 364 return true;
@@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
354 struct drm_connector_state *conn_state) 369 struct drm_connector_state *conn_state)
355{ 370{
356 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 371 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
372 struct drm_display_mode *adjusted_mode =
373 &pipe_config->base.adjusted_mode;
374
375 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
376 return false;
357 377
358 pipe_config->has_pch_encoder = true; 378 pipe_config->has_pch_encoder = true;
359 379
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dee3a8e659f1..2cc6faa1daa8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -14469,12 +14469,22 @@ static enum drm_mode_status
14469intel_mode_valid(struct drm_device *dev, 14469intel_mode_valid(struct drm_device *dev,
14470 const struct drm_display_mode *mode) 14470 const struct drm_display_mode *mode)
14471{ 14471{
14472 /*
14473 * Can't reject DBLSCAN here because Xorg ddxen can add piles
14474 * of DBLSCAN modes to the output's mode list when they detect
14475 * the scaling mode property on the connector. And they don't
14476 * ask the kernel to validate those modes in any way until
14477 * modeset time at which point the client gets a protocol error.
14478 * So in order to not upset those clients we silently ignore the
14479 * DBLSCAN flag on such connectors. For other connectors we will
14480 * reject modes with the DBLSCAN flag in encoder->compute_config().
14481 * And we always reject DBLSCAN modes in connector->mode_valid()
14482 * as we never want such modes on the connector's mode list.
14483 */
14484
14472 if (mode->vscan > 1) 14485 if (mode->vscan > 1)
14473 return MODE_NO_VSCAN; 14486 return MODE_NO_VSCAN;
14474 14487
14475 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
14476 return MODE_NO_DBLESCAN;
14477
14478 if (mode->flags & DRM_MODE_FLAG_HSKEW) 14488 if (mode->flags & DRM_MODE_FLAG_HSKEW)
14479 return MODE_H_ILLEGAL; 14489 return MODE_H_ILLEGAL;
14480 14490
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8320f0e8e3be..16faea30114a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -420,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
420 int max_rate, mode_rate, max_lanes, max_link_clock; 420 int max_rate, mode_rate, max_lanes, max_link_clock;
421 int max_dotclk; 421 int max_dotclk;
422 422
423 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
424 return MODE_NO_DBLESCAN;
425
423 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 426 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
424 427
425 if (intel_dp_is_edp(intel_dp) && fixed_mode) { 428 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -1862,7 +1865,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1862 conn_state->scaling_mode); 1865 conn_state->scaling_mode);
1863 } 1866 }
1864 1867
1865 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1868 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1869 return false;
1870
1871 if (HAS_GMCH_DISPLAY(dev_priv) &&
1866 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1872 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
1867 return false; 1873 return false;
1868 1874
@@ -2784,16 +2790,6 @@ static void g4x_disable_dp(struct intel_encoder *encoder,
2784 const struct drm_connector_state *old_conn_state) 2790 const struct drm_connector_state *old_conn_state)
2785{ 2791{
2786 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2792 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2787
2788 /* disable the port before the pipe on g4x */
2789 intel_dp_link_down(encoder, old_crtc_state);
2790}
2791
2792static void ilk_disable_dp(struct intel_encoder *encoder,
2793 const struct intel_crtc_state *old_crtc_state,
2794 const struct drm_connector_state *old_conn_state)
2795{
2796 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2797} 2793}
2798 2794
2799static void vlv_disable_dp(struct intel_encoder *encoder, 2795static void vlv_disable_dp(struct intel_encoder *encoder,
@@ -2807,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
2807 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2803 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
2808} 2804}
2809 2805
2810static void ilk_post_disable_dp(struct intel_encoder *encoder, 2806static void g4x_post_disable_dp(struct intel_encoder *encoder,
2811 const struct intel_crtc_state *old_crtc_state, 2807 const struct intel_crtc_state *old_crtc_state,
2812 const struct drm_connector_state *old_conn_state) 2808 const struct drm_connector_state *old_conn_state)
2813{ 2809{
2814 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2810 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2815 enum port port = encoder->port; 2811 enum port port = encoder->port;
2816 2812
2813 /*
2814 * Bspec does not list a specific disable sequence for g4x DP.
2815 * Follow the ilk+ sequence (disable pipe before the port) for
2816 * g4x DP as it does not suffer from underruns like the normal
2817 * g4x modeset sequence (disable pipe after the port).
2818 */
2817 intel_dp_link_down(encoder, old_crtc_state); 2819 intel_dp_link_down(encoder, old_crtc_state);
2818 2820
2819 /* Only ilk+ has port A */ 2821 /* Only ilk+ has port A */
@@ -6337,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
6337 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 6339 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6338 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6340 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6339 6341
6340 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 6342 if (!HAS_GMCH_DISPLAY(dev_priv))
6341 connector->interlace_allowed = true; 6343 connector->interlace_allowed = true;
6342 connector->doublescan_allowed = 0; 6344 connector->doublescan_allowed = 0;
6343 6345
@@ -6436,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
6436 intel_encoder->enable = vlv_enable_dp; 6438 intel_encoder->enable = vlv_enable_dp;
6437 intel_encoder->disable = vlv_disable_dp; 6439 intel_encoder->disable = vlv_disable_dp;
6438 intel_encoder->post_disable = vlv_post_disable_dp; 6440 intel_encoder->post_disable = vlv_post_disable_dp;
6439 } else if (INTEL_GEN(dev_priv) >= 5) {
6440 intel_encoder->pre_enable = g4x_pre_enable_dp;
6441 intel_encoder->enable = g4x_enable_dp;
6442 intel_encoder->disable = ilk_disable_dp;
6443 intel_encoder->post_disable = ilk_post_disable_dp;
6444 } else { 6441 } else {
6445 intel_encoder->pre_enable = g4x_pre_enable_dp; 6442 intel_encoder->pre_enable = g4x_pre_enable_dp;
6446 intel_encoder->enable = g4x_enable_dp; 6443 intel_encoder->enable = g4x_enable_dp;
6447 intel_encoder->disable = g4x_disable_dp; 6444 intel_encoder->disable = g4x_disable_dp;
6445 intel_encoder->post_disable = g4x_post_disable_dp;
6448 } 6446 }
6449 6447
6450 intel_dig_port->dp.output_reg = output_reg; 6448 intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9e6956c08688..5890500a3a8b 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
48 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 48 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
49 DP_DPCD_QUIRK_LIMITED_M_N); 49 DP_DPCD_QUIRK_LIMITED_M_N);
50 50
51 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
52 return false;
53
51 pipe_config->has_pch_encoder = false; 54 pipe_config->has_pch_encoder = false;
52 bpp = 24; 55 bpp = 24;
53 if (intel_dp->compliance.test_data.bpc) { 56 if (intel_dp->compliance.test_data.bpc) {
@@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
366 if (!intel_dp) 369 if (!intel_dp)
367 return MODE_ERROR; 370 return MODE_ERROR;
368 371
372 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
373 return MODE_NO_DBLESCAN;
374
369 max_link_clock = intel_dp_max_link_rate(intel_dp); 375 max_link_clock = intel_dp_max_link_rate(intel_dp);
370 max_lanes = intel_dp_max_lane_count(intel_dp); 376 max_lanes = intel_dp_max_lane_count(intel_dp);
371 377
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index cf39ca90d887..f349b3920199 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
326 conn_state->scaling_mode); 326 conn_state->scaling_mode);
327 } 327 }
328 328
329 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
330 return false;
331
329 /* DSI uses short packets for sync events, so clear mode flags for DSI */ 332 /* DSI uses short packets for sync events, so clear mode flags for DSI */
330 adjusted_mode->flags = 0; 333 adjusted_mode->flags = 0;
331 334
@@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
1266 1269
1267 DRM_DEBUG_KMS("\n"); 1270 DRM_DEBUG_KMS("\n");
1268 1271
1272 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1273 return MODE_NO_DBLESCAN;
1274
1269 if (fixed_mode) { 1275 if (fixed_mode) {
1270 if (mode->hdisplay > fixed_mode->hdisplay) 1276 if (mode->hdisplay > fixed_mode->hdisplay)
1271 return MODE_PANEL; 1277 return MODE_PANEL;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a70d767313aa..61d908e0df0e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
219 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 219 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
220 int target_clock = mode->clock; 220 int target_clock = mode->clock;
221 221
222 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
223 return MODE_NO_DBLESCAN;
224
222 /* XXX: Validate clock range */ 225 /* XXX: Validate clock range */
223 226
224 if (fixed_mode) { 227 if (fixed_mode) {
@@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
254 if (fixed_mode) 257 if (fixed_mode)
255 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 258 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
256 259
260 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
261 return false;
262
257 return true; 263 return true;
258} 264}
259 265
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ee929f31f7db..d8cb53ef4351 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
1557 bool force_dvi = 1557 bool force_dvi =
1558 READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI; 1558 READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
1559 1559
1560 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1561 return MODE_NO_DBLESCAN;
1562
1560 clock = mode->clock; 1563 clock = mode->clock;
1561 1564
1562 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) 1565 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
1677 int desired_bpp; 1680 int desired_bpp;
1678 bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; 1681 bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
1679 1682
1683 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1684 return false;
1685
1680 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; 1686 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
1681 1687
1682 if (pipe_config->has_hdmi_sink) 1688 if (pipe_config->has_hdmi_sink)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 15434cad5430..7c4c8fb1dae4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1545,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1545 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 1545 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
1546 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1546 batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1547 1547
1548 *batch++ = MI_LOAD_REGISTER_IMM(3);
1549
1548 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 1550 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
1549 *batch++ = MI_LOAD_REGISTER_IMM(1);
1550 *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); 1551 *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
1551 *batch++ = _MASKED_BIT_DISABLE( 1552 *batch++ = _MASKED_BIT_DISABLE(
1552 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); 1553 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
1554
1555 /* BSpec: 11391 */
1556 *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
1557 *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
1558
1559 /* BSpec: 11299 */
1560 *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
1561 *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
1562
1553 *batch++ = MI_NOOP; 1563 *batch++ = MI_NOOP;
1554 1564
1555 /* WaClearSlmSpaceAtContextSwitch:kbl */ 1565 /* WaClearSlmSpaceAtContextSwitch:kbl */
@@ -2641,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2641 context_size += LRC_HEADER_PAGES * PAGE_SIZE; 2651 context_size += LRC_HEADER_PAGES * PAGE_SIZE;
2642 2652
2643 ctx_obj = i915_gem_object_create(ctx->i915, context_size); 2653 ctx_obj = i915_gem_object_create(ctx->i915, context_size);
2644 if (IS_ERR(ctx_obj)) { 2654 if (IS_ERR(ctx_obj))
2645 ret = PTR_ERR(ctx_obj); 2655 return PTR_ERR(ctx_obj);
2646 goto error_deref_obj;
2647 }
2648 2656
2649 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); 2657 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
2650 if (IS_ERR(vma)) { 2658 if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d278f24ba6ae..48f618dc9abb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
380 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 380 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
381 int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; 381 int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
382 382
383 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
384 return MODE_NO_DBLESCAN;
383 if (mode->hdisplay > fixed_mode->hdisplay) 385 if (mode->hdisplay > fixed_mode->hdisplay)
384 return MODE_PANEL; 386 return MODE_PANEL;
385 if (mode->vdisplay > fixed_mode->vdisplay) 387 if (mode->vdisplay > fixed_mode->vdisplay)
@@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
429 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 431 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
430 adjusted_mode); 432 adjusted_mode);
431 433
434 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
435 return false;
436
432 if (HAS_PCH_SPLIT(dev_priv)) { 437 if (HAS_PCH_SPLIT(dev_priv)) {
433 pipe_config->has_pch_encoder = true; 438 pipe_config->has_pch_encoder = true;
434 439
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 25005023c243..26975df4e593 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1160 adjusted_mode); 1160 adjusted_mode);
1161 } 1161 }
1162 1162
1163 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
1164 return false;
1165
1163 /* 1166 /*
1164 * Make the CRTC code factor in the SDVO pixel multiplier. The 1167 * Make the CRTC code factor in the SDVO pixel multiplier. The
1165 * SDVO device will factor out the multiplier during mode_set. 1168 * SDVO device will factor out the multiplier during mode_set.
@@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
1621 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1624 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1622 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1625 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
1623 1626
1627 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1628 return MODE_NO_DBLESCAN;
1629
1624 if (intel_sdvo->pixel_clock_min > mode->clock) 1630 if (intel_sdvo->pixel_clock_min > mode->clock)
1625 return MODE_CLOCK_LOW; 1631 return MODE_CLOCK_LOW;
1626 1632
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 885fc3809f7f..b55b5c157e38 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
850 const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); 850 const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
851 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 851 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
852 852
853 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
854 return MODE_NO_DBLESCAN;
855
853 if (mode->clock > max_dotclk) 856 if (mode->clock > max_dotclk)
854 return MODE_CLOCK_HIGH; 857 return MODE_CLOCK_HIGH;
855 858
@@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
877 struct drm_connector_state *conn_state) 880 struct drm_connector_state *conn_state)
878{ 881{
879 const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); 882 const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
883 struct drm_display_mode *adjusted_mode =
884 &pipe_config->base.adjusted_mode;
880 885
881 if (!tv_mode) 886 if (!tv_mode)
882 return false; 887 return false;
883 888
884 pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock; 889 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
890 return false;
891
892 adjusted_mode->crtc_clock = tv_mode->clock;
885 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 893 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
886 pipe_config->pipe_bpp = 8*3; 894 pipe_config->pipe_bpp = 8*3;
887 895
888 /* TV has it's own notion of sync and other mode flags, so clear them. */ 896 /* TV has it's own notion of sync and other mode flags, so clear them. */
889 pipe_config->base.adjusted_mode.flags = 0; 897 adjusted_mode->flags = 0;
890 898
891 /* 899 /*
892 * FIXME: We don't check whether the input mode is actually what we want 900 * FIXME: We don't check whether the input mode is actually what we want
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 291c08117ab6..397143b639c6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
132 132
133 nvif_object_map(&wndw->wimm.base.user, NULL, 0); 133 nvif_object_map(&wndw->wimm.base.user, NULL, 0);
134 wndw->immd = func; 134 wndw->immd = func;
135 wndw->ctxdma.parent = &disp->core->chan.base.user; 135 wndw->ctxdma.parent = NULL;
136 return 0; 136 return 0;
137} 137}
138 138
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 224963b533a6..c5a9bc1af5af 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
444 if (ret) 444 if (ret)
445 return ret; 445 return ret;
446 446
447 ctxdma = nv50_wndw_ctxdma_new(wndw, fb); 447 if (wndw->ctxdma.parent) {
448 if (IS_ERR(ctxdma)) { 448 ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
449 nouveau_bo_unpin(fb->nvbo); 449 if (IS_ERR(ctxdma)) {
450 return PTR_ERR(ctxdma); 450 nouveau_bo_unpin(fb->nvbo);
451 return PTR_ERR(ctxdma);
452 }
453
454 asyw->image.handle[0] = ctxdma->object.handle;
451 } 455 }
452 456
453 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); 457 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
454 asyw->image.handle[0] = ctxdma->object.handle;
455 asyw->image.offset[0] = fb->nvbo->bo.offset; 458 asyw->image.offset[0] = fb->nvbo->bo.offset;
456 459
457 if (wndw->func->prepare) { 460 if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index b8cda9449241..768207fbbae3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
623 struct qxl_cursor_cmd *cmd; 623 struct qxl_cursor_cmd *cmd;
624 struct qxl_cursor *cursor; 624 struct qxl_cursor *cursor;
625 struct drm_gem_object *obj; 625 struct drm_gem_object *obj;
626 struct qxl_bo *cursor_bo = NULL, *user_bo = NULL; 626 struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
627 int ret; 627 int ret;
628 void *user_ptr; 628 void *user_ptr;
629 int size = 64*64*4; 629 int size = 64*64*4;
@@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
677 cursor_bo, 0); 677 cursor_bo, 0);
678 cmd->type = QXL_CURSOR_SET; 678 cmd->type = QXL_CURSOR_SET;
679 679
680 qxl_bo_unref(&qcrtc->cursor_bo); 680 old_cursor_bo = qcrtc->cursor_bo;
681 qcrtc->cursor_bo = cursor_bo; 681 qcrtc->cursor_bo = cursor_bo;
682 cursor_bo = NULL; 682 cursor_bo = NULL;
683 } else { 683 } else {
@@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
697 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 697 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
698 qxl_release_fence_buffer_objects(release); 698 qxl_release_fence_buffer_objects(release);
699 699
700 if (old_cursor_bo)
701 qxl_bo_unref(&old_cursor_bo);
702
700 qxl_bo_unref(&cursor_bo); 703 qxl_bo_unref(&cursor_bo);
701 704
702 return; 705 return;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 08747fc3ee71..8232b39e16ca 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -17,7 +17,6 @@
17#include <drm/drm_encoder.h> 17#include <drm/drm_encoder.h>
18#include <drm/drm_modes.h> 18#include <drm/drm_modes.h>
19#include <drm/drm_of.h> 19#include <drm/drm_of.h>
20#include <drm/drm_panel.h>
21 20
22#include <uapi/drm/drm_mode.h> 21#include <uapi/drm/drm_mode.h>
23 22
@@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
418static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, 417static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
419 const struct drm_display_mode *mode) 418 const struct drm_display_mode *mode)
420{ 419{
421 struct drm_panel *panel = tcon->panel;
422 struct drm_connector *connector = panel->connector;
423 struct drm_display_info display_info = connector->display_info;
424 unsigned int bp, hsync, vsync; 420 unsigned int bp, hsync, vsync;
425 u8 clk_delay; 421 u8 clk_delay;
426 u32 val = 0; 422 u32 val = 0;
@@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
478 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 474 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
479 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; 475 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
480 476
481 /*
482 * On A20 and similar SoCs, the only way to achieve Positive Edge
483 * (Rising Edge), is setting dclk clock phase to 2/3(240°).
484 * By default TCON works in Negative Edge(Falling Edge),
485 * this is why phase is set to 0 in that case.
486 * Unfortunately there's no way to logically invert dclk through
487 * IO_POL register.
488 * The only acceptable way to work, triple checked with scope,
489 * is using clock phase set to 0° for Negative Edge and set to 240°
490 * for Positive Edge.
491 * On A33 and similar SoCs there would be a 90° phase option,
492 * but it divides also dclk by 2.
493 * Following code is a way to avoid quirks all around TCON
494 * and DOTCLOCK drivers.
495 */
496 if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
497 clk_set_phase(tcon->dclk, 240);
498
499 if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
500 clk_set_phase(tcon->dclk, 0);
501
502 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, 477 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
503 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE, 478 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
504 val); 479 val);