aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-11 14:02:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-11 14:02:51 -0400
commit9cb1680c20037e54f202956adabc446c499b9b1e (patch)
treef7fc0f6f89db3b8e15f1c3cfa3b61e94de734fed
parent2278cb0bb3a177d3a3ef0bd332916180cb2f2121 (diff)
parent2d28b633c3fa8f53b919a5de86eb1c8e78dde818 (diff)
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "A bunch of fixes for radeon, intel, omap and one amdkfd fix. Radeon fixes are all over, but it does fix some cursor corruption across suspend/resume. i915 should fix the second warn you were seeing, so let us know if not. omap is a bunch of small fixes" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (28 commits) drm/radeon: disable vce init on cayman (v2) drm/amdgpu: fix timeout calculation drm/radeon: check if BO_VA is set before adding it to the invalidation list drm/radeon: allways add the VM clear duplicate Revert "Revert "drm/radeon: dont switch vt on suspend"" drm/radeon: Fold radeon_set_cursor() into radeon_show_cursor() drm/radeon: unpin cursor BOs on suspend and pin them again on resume (v2) drm/radeon: Clean up reference counting and pinning of the cursor BOs drm/amdkfd: validate pdd where it acquired first Revert "drm/i915: Allocate context objects from stolen" drm/i915: Declare the swizzling unknown for L-shaped configurations drm/radeon: fix underflow in r600_cp_dispatch_texture() drm/radeon: default to 2048 MB GART size on SI+ drm/radeon: fix HDP flushing drm/radeon: use RCU query for GEM_BUSY syscall drm/amdgpu: Handle irqs only based on irq ring, not irq status regs. drm/radeon: Handle irqs only based on irq ring, not irq status regs. drm/i915: Use crtc_state->active in primary check_plane func drm/i915: Check crtc->active in intel_crtc_disable_planes drm/i915: Restore all GGTT VMAs on resume ...
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c26
-rw-r--r--drivers/gpu/drm/radeon/cik.c336
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c392
-rw-r--r--drivers/gpu/drm/radeon/ni.c25
-rw-r--r--drivers/gpu/drm/radeon/r600.c155
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c109
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c66
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c40
-rw-r--r--drivers/gpu/drm/radeon/si.c336
27 files changed, 964 insertions, 717 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 975edb1000a2..ae43b58c9733 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -352,7 +352,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
352 if (((int64_t)timeout_ns) < 0) 352 if (((int64_t)timeout_ns) < 0)
353 return MAX_SCHEDULE_TIMEOUT; 353 return MAX_SCHEDULE_TIMEOUT;
354 354
355 timeout = ktime_sub_ns(ktime_get(), timeout_ns); 355 timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get());
356 if (ktime_to_ns(timeout) < 0) 356 if (ktime_to_ns(timeout) < 0)
357 return 0; 357 return 0;
358 358
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5cde635978f9..6e77964f1b64 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -3403,19 +3403,25 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
3403 3403
3404 switch (entry->src_data) { 3404 switch (entry->src_data) {
3405 case 0: /* vblank */ 3405 case 0: /* vblank */
3406 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3406 if (disp_int & interrupt_status_offsets[crtc].vblank)
3407 dce_v10_0_crtc_vblank_int_ack(adev, crtc); 3407 dce_v10_0_crtc_vblank_int_ack(adev, crtc);
3408 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3408 else
3409 drm_handle_vblank(adev->ddev, crtc); 3409 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3410 } 3410
3411 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3411 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3412 drm_handle_vblank(adev->ddev, crtc);
3412 } 3413 }
3414 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3415
3413 break; 3416 break;
3414 case 1: /* vline */ 3417 case 1: /* vline */
3415 if (disp_int & interrupt_status_offsets[crtc].vline) { 3418 if (disp_int & interrupt_status_offsets[crtc].vline)
3416 dce_v10_0_crtc_vline_int_ack(adev, crtc); 3419 dce_v10_0_crtc_vline_int_ack(adev, crtc);
3417 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3420 else
3418 } 3421 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3422
3423 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3424
3419 break; 3425 break;
3420 default: 3426 default:
3421 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3427 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 95efd98b202d..7f7abb0e0be5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3402,19 +3402,25 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3402 3402
3403 switch (entry->src_data) { 3403 switch (entry->src_data) {
3404 case 0: /* vblank */ 3404 case 0: /* vblank */
3405 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3405 if (disp_int & interrupt_status_offsets[crtc].vblank)
3406 dce_v11_0_crtc_vblank_int_ack(adev, crtc); 3406 dce_v11_0_crtc_vblank_int_ack(adev, crtc);
3407 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3407 else
3408 drm_handle_vblank(adev->ddev, crtc); 3408 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3409 } 3409
3410 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3410 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3411 drm_handle_vblank(adev->ddev, crtc);
3411 } 3412 }
3413 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3414
3412 break; 3415 break;
3413 case 1: /* vline */ 3416 case 1: /* vline */
3414 if (disp_int & interrupt_status_offsets[crtc].vline) { 3417 if (disp_int & interrupt_status_offsets[crtc].vline)
3415 dce_v11_0_crtc_vline_int_ack(adev, crtc); 3418 dce_v11_0_crtc_vline_int_ack(adev, crtc);
3416 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3419 else
3417 } 3420 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3421
3422 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3423
3418 break; 3424 break;
3419 default: 3425 default:
3420 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3426 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index aaca8d663f2c..08387dfd98a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -3237,19 +3237,25 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3237 3237
3238 switch (entry->src_data) { 3238 switch (entry->src_data) {
3239 case 0: /* vblank */ 3239 case 0: /* vblank */
3240 if (disp_int & interrupt_status_offsets[crtc].vblank) { 3240 if (disp_int & interrupt_status_offsets[crtc].vblank)
3241 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK); 3241 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3242 if (amdgpu_irq_enabled(adev, source, irq_type)) { 3242 else
3243 drm_handle_vblank(adev->ddev, crtc); 3243 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3244 } 3244
3245 DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3245 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3246 drm_handle_vblank(adev->ddev, crtc);
3246 } 3247 }
3248 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3249
3247 break; 3250 break;
3248 case 1: /* vline */ 3251 case 1: /* vline */
3249 if (disp_int & interrupt_status_offsets[crtc].vline) { 3252 if (disp_int & interrupt_status_offsets[crtc].vline)
3250 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK); 3253 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3251 DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3254 else
3252 } 3255 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3256
3257 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3258
3253 break; 3259 break;
3254 default: 3260 default:
3255 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); 3261 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8a1f999daa24..9be007081b72 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -420,6 +420,12 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
420 pqm_uninit(&p->pqm); 420 pqm_uninit(&p->pqm);
421 421
422 pdd = kfd_get_process_device_data(dev, p); 422 pdd = kfd_get_process_device_data(dev, p);
423
424 if (!pdd) {
425 mutex_unlock(&p->mutex);
426 return;
427 }
428
423 if (pdd->reset_wavefronts) { 429 if (pdd->reset_wavefronts) {
424 dbgdev_wave_reset_wavefronts(pdd->dev, p); 430 dbgdev_wave_reset_wavefronts(pdd->dev, p);
425 pdd->reset_wavefronts = false; 431 pdd->reset_wavefronts = false;
@@ -431,8 +437,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
431 * We don't call amd_iommu_unbind_pasid() here 437 * We don't call amd_iommu_unbind_pasid() here
432 * because the IOMMU called us. 438 * because the IOMMU called us.
433 */ 439 */
434 if (pdd) 440 pdd->bound = false;
435 pdd->bound = false;
436 441
437 mutex_unlock(&p->mutex); 442 mutex_unlock(&p->mutex);
438} 443}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8867818b1401..d65cbe6afb92 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -157,9 +157,7 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
157 struct drm_i915_gem_object *obj; 157 struct drm_i915_gem_object *obj;
158 int ret; 158 int ret;
159 159
160 obj = i915_gem_object_create_stolen(dev, size); 160 obj = i915_gem_alloc_object(dev, size);
161 if (obj == NULL)
162 obj = i915_gem_alloc_object(dev, size);
163 if (obj == NULL) 161 if (obj == NULL)
164 return ERR_PTR(-ENOMEM); 162 return ERR_PTR(-ENOMEM);
165 163
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9daa2883ac18..dcc6a88c560e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2546,6 +2546,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
2546 struct drm_i915_private *dev_priv = dev->dev_private; 2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547 struct drm_i915_gem_object *obj; 2547 struct drm_i915_gem_object *obj;
2548 struct i915_address_space *vm; 2548 struct i915_address_space *vm;
2549 struct i915_vma *vma;
2550 bool flush;
2549 2551
2550 i915_check_and_clear_faults(dev); 2552 i915_check_and_clear_faults(dev);
2551 2553
@@ -2555,16 +2557,23 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
2555 dev_priv->gtt.base.total, 2557 dev_priv->gtt.base.total,
2556 true); 2558 true);
2557 2559
2560 /* Cache flush objects bound into GGTT and rebind them. */
2561 vm = &dev_priv->gtt.base;
2558 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 2562 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2559 struct i915_vma *vma = i915_gem_obj_to_vma(obj, 2563 flush = false;
2560 &dev_priv->gtt.base); 2564 list_for_each_entry(vma, &obj->vma_list, vma_link) {
2561 if (!vma) 2565 if (vma->vm != vm)
2562 continue; 2566 continue;
2563 2567
2564 i915_gem_clflush_object(obj, obj->pin_display); 2568 WARN_ON(i915_vma_bind(vma, obj->cache_level,
2565 WARN_ON(i915_vma_bind(vma, obj->cache_level, PIN_UPDATE)); 2569 PIN_UPDATE));
2566 }
2567 2570
2571 flush = true;
2572 }
2573
2574 if (flush)
2575 i915_gem_clflush_object(obj, obj->pin_display);
2576 }
2568 2577
2569 if (INTEL_INFO(dev)->gen >= 8) { 2578 if (INTEL_INFO(dev)->gen >= 8) {
2570 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) 2579 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..d61e74a08f82 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -183,8 +183,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
183 if (IS_GEN4(dev)) { 183 if (IS_GEN4(dev)) {
184 uint32_t ddc2 = I915_READ(DCC2); 184 uint32_t ddc2 = I915_READ(DCC2);
185 185
186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) 186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE)) {
187 /* Since the swizzling may vary within an
188 * object, we have no idea what the swizzling
189 * is for any page in particular. Thus we
190 * cannot migrate tiled pages using the GPU,
191 * nor can we tell userspace what the exact
192 * swizzling is for any object.
193 */
187 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES; 194 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
195 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
196 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
197 }
188 } 198 }
189 199
190 if (dcc == 0xffffffff) { 200 if (dcc == 0xffffffff) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1b61f9810387..ba9321998a41 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4854,6 +4854,9 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4854 struct intel_plane *intel_plane; 4854 struct intel_plane *intel_plane;
4855 int pipe = intel_crtc->pipe; 4855 int pipe = intel_crtc->pipe;
4856 4856
4857 if (!intel_crtc->active)
4858 return;
4859
4857 intel_crtc_wait_for_pending_flips(crtc); 4860 intel_crtc_wait_for_pending_flips(crtc);
4858 4861
4859 intel_pre_disable_primary(crtc); 4862 intel_pre_disable_primary(crtc);
@@ -7887,7 +7890,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7887 int pipe = pipe_config->cpu_transcoder; 7890 int pipe = pipe_config->cpu_transcoder;
7888 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7891 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7889 intel_clock_t clock; 7892 intel_clock_t clock;
7890 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2; 7893 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
7891 int refclk = 100000; 7894 int refclk = 100000;
7892 7895
7893 mutex_lock(&dev_priv->sb_lock); 7896 mutex_lock(&dev_priv->sb_lock);
@@ -7895,10 +7898,13 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
7895 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7898 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
7896 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7899 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
7897 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7900 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
7901 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7898 mutex_unlock(&dev_priv->sb_lock); 7902 mutex_unlock(&dev_priv->sb_lock);
7899 7903
7900 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7904 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
7901 clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff); 7905 clock.m2 = (pll_dw0 & 0xff) << 22;
7906 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
7907 clock.m2 |= pll_dw2 & 0x3fffff;
7902 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7908 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
7903 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7909 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
7904 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7910 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
@@ -13270,7 +13276,7 @@ intel_check_primary_plane(struct drm_plane *plane,
13270 if (ret) 13276 if (ret)
13271 return ret; 13277 return ret;
13272 13278
13273 if (intel_crtc->active) { 13279 if (crtc_state->base.active) {
13274 struct intel_plane_state *old_state = 13280 struct intel_plane_state *old_state =
13275 to_intel_plane_state(plane->state); 13281 to_intel_plane_state(plane->state);
13276 13282
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index f2daad8c3d96..7841970de48d 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -285,7 +285,7 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
285 285
286 if (wait) { 286 if (wait) {
287 if (!wait_for_completion_timeout(&engine->compl, 287 if (!wait_for_completion_timeout(&engine->compl,
288 msecs_to_jiffies(1))) { 288 msecs_to_jiffies(100))) {
289 dev_err(dmm->dev, "timed out waiting for done\n"); 289 dev_err(dmm->dev, "timed out waiting for done\n");
290 ret = -ETIMEDOUT; 290 ret = -ETIMEDOUT;
291 } 291 }
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index ae2df41f216f..12081e61d45a 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -177,7 +177,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
177 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 177 struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
178struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p); 178struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
179int omap_framebuffer_pin(struct drm_framebuffer *fb); 179int omap_framebuffer_pin(struct drm_framebuffer *fb);
180int omap_framebuffer_unpin(struct drm_framebuffer *fb); 180void omap_framebuffer_unpin(struct drm_framebuffer *fb);
181void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 181void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
182 struct omap_drm_window *win, struct omap_overlay_info *info); 182 struct omap_drm_window *win, struct omap_overlay_info *info);
183struct drm_connector *omap_framebuffer_get_next_connector( 183struct drm_connector *omap_framebuffer_get_next_connector(
@@ -211,7 +211,7 @@ void omap_gem_dma_sync(struct drm_gem_object *obj,
211 enum dma_data_direction dir); 211 enum dma_data_direction dir);
212int omap_gem_get_paddr(struct drm_gem_object *obj, 212int omap_gem_get_paddr(struct drm_gem_object *obj,
213 dma_addr_t *paddr, bool remap); 213 dma_addr_t *paddr, bool remap);
214int omap_gem_put_paddr(struct drm_gem_object *obj); 214void omap_gem_put_paddr(struct drm_gem_object *obj);
215int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages, 215int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
216 bool remap); 216 bool remap);
217int omap_gem_put_pages(struct drm_gem_object *obj); 217int omap_gem_put_pages(struct drm_gem_object *obj);
@@ -236,7 +236,7 @@ static inline int align_pitch(int pitch, int width, int bpp)
236 /* PVR needs alignment to 8 pixels.. right now that is the most 236 /* PVR needs alignment to 8 pixels.. right now that is the most
237 * restrictive stride requirement.. 237 * restrictive stride requirement..
238 */ 238 */
239 return ALIGN(pitch, 8 * bytespp); 239 return roundup(pitch, 8 * bytespp);
240} 240}
241 241
242/* map crtc to vblank mask */ 242/* map crtc to vblank mask */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 0b967e76df1a..51b1219af87f 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -287,10 +287,10 @@ fail:
287} 287}
288 288
289/* unpin, no longer being scanned out: */ 289/* unpin, no longer being scanned out: */
290int omap_framebuffer_unpin(struct drm_framebuffer *fb) 290void omap_framebuffer_unpin(struct drm_framebuffer *fb)
291{ 291{
292 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); 292 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
293 int ret, i, n = drm_format_num_planes(fb->pixel_format); 293 int i, n = drm_format_num_planes(fb->pixel_format);
294 294
295 mutex_lock(&omap_fb->lock); 295 mutex_lock(&omap_fb->lock);
296 296
@@ -298,24 +298,16 @@ int omap_framebuffer_unpin(struct drm_framebuffer *fb)
298 298
299 if (omap_fb->pin_count > 0) { 299 if (omap_fb->pin_count > 0) {
300 mutex_unlock(&omap_fb->lock); 300 mutex_unlock(&omap_fb->lock);
301 return 0; 301 return;
302 } 302 }
303 303
304 for (i = 0; i < n; i++) { 304 for (i = 0; i < n; i++) {
305 struct plane *plane = &omap_fb->planes[i]; 305 struct plane *plane = &omap_fb->planes[i];
306 ret = omap_gem_put_paddr(plane->bo); 306 omap_gem_put_paddr(plane->bo);
307 if (ret)
308 goto fail;
309 plane->paddr = 0; 307 plane->paddr = 0;
310 } 308 }
311 309
312 mutex_unlock(&omap_fb->lock); 310 mutex_unlock(&omap_fb->lock);
313
314 return 0;
315
316fail:
317 mutex_unlock(&omap_fb->lock);
318 return ret;
319} 311}
320 312
321struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p) 313struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 23b5a84389e3..720d16bce7e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -135,7 +135,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
135 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; 135 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
136 if (fbdev->ywrap_enabled) { 136 if (fbdev->ywrap_enabled) {
137 /* need to align pitch to page size if using DMM scrolling */ 137 /* need to align pitch to page size if using DMM scrolling */
138 mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE); 138 mode_cmd.pitches[0] = PAGE_ALIGN(mode_cmd.pitches[0]);
139 } 139 }
140 140
141 /* allocate backing bo */ 141 /* allocate backing bo */
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 2ab77801cf5f..7ed08fdc4c42 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -808,10 +808,10 @@ fail:
808/* Release physical address, when DMA is no longer being performed.. this 808/* Release physical address, when DMA is no longer being performed.. this
809 * could potentially unpin and unmap buffers from TILER 809 * could potentially unpin and unmap buffers from TILER
810 */ 810 */
811int omap_gem_put_paddr(struct drm_gem_object *obj) 811void omap_gem_put_paddr(struct drm_gem_object *obj)
812{ 812{
813 struct omap_gem_object *omap_obj = to_omap_bo(obj); 813 struct omap_gem_object *omap_obj = to_omap_bo(obj);
814 int ret = 0; 814 int ret;
815 815
816 mutex_lock(&obj->dev->struct_mutex); 816 mutex_lock(&obj->dev->struct_mutex);
817 if (omap_obj->paddr_cnt > 0) { 817 if (omap_obj->paddr_cnt > 0) {
@@ -821,7 +821,6 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
821 if (ret) { 821 if (ret) {
822 dev_err(obj->dev->dev, 822 dev_err(obj->dev->dev,
823 "could not unpin pages: %d\n", ret); 823 "could not unpin pages: %d\n", ret);
824 goto fail;
825 } 824 }
826 ret = tiler_release(omap_obj->block); 825 ret = tiler_release(omap_obj->block);
827 if (ret) { 826 if (ret) {
@@ -832,9 +831,8 @@ int omap_gem_put_paddr(struct drm_gem_object *obj)
832 omap_obj->block = NULL; 831 omap_obj->block = NULL;
833 } 832 }
834 } 833 }
835fail: 834
836 mutex_unlock(&obj->dev->struct_mutex); 835 mutex_unlock(&obj->dev->struct_mutex);
837 return ret;
838} 836}
839 837
840/* Get rotated scanout address (only valid if already pinned), at the 838/* Get rotated scanout address (only valid if already pinned), at the
@@ -1378,11 +1376,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1378 1376
1379 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL); 1377 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1380 if (!omap_obj) 1378 if (!omap_obj)
1381 goto fail; 1379 return NULL;
1382
1383 spin_lock(&priv->list_lock);
1384 list_add(&omap_obj->mm_list, &priv->obj_list);
1385 spin_unlock(&priv->list_lock);
1386 1380
1387 obj = &omap_obj->base; 1381 obj = &omap_obj->base;
1388 1382
@@ -1392,11 +1386,19 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1392 */ 1386 */
1393 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size, 1387 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1394 &omap_obj->paddr, GFP_KERNEL); 1388 &omap_obj->paddr, GFP_KERNEL);
1395 if (omap_obj->vaddr) 1389 if (!omap_obj->vaddr) {
1396 flags |= OMAP_BO_DMA; 1390 kfree(omap_obj);
1391
1392 return NULL;
1393 }
1397 1394
1395 flags |= OMAP_BO_DMA;
1398 } 1396 }
1399 1397
1398 spin_lock(&priv->list_lock);
1399 list_add(&omap_obj->mm_list, &priv->obj_list);
1400 spin_unlock(&priv->list_lock);
1401
1400 omap_obj->flags = flags; 1402 omap_obj->flags = flags;
1401 1403
1402 if (flags & OMAP_BO_TILED) { 1404 if (flags & OMAP_BO_TILED) {
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index cfa8276c4deb..098904696a5c 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,6 +17,7 @@
17 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */ 18 */
19 19
20#include <drm/drm_atomic.h>
20#include <drm/drm_atomic_helper.h> 21#include <drm/drm_atomic_helper.h>
21#include <drm/drm_plane_helper.h> 22#include <drm/drm_plane_helper.h>
22 23
@@ -153,9 +154,34 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
153 dispc_ovl_enable(omap_plane->id, false); 154 dispc_ovl_enable(omap_plane->id, false);
154} 155}
155 156
157static int omap_plane_atomic_check(struct drm_plane *plane,
158 struct drm_plane_state *state)
159{
160 struct drm_crtc_state *crtc_state;
161
162 if (!state->crtc)
163 return 0;
164
165 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
166 if (IS_ERR(crtc_state))
167 return PTR_ERR(crtc_state);
168
169 if (state->crtc_x < 0 || state->crtc_y < 0)
170 return -EINVAL;
171
172 if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
173 return -EINVAL;
174
175 if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
176 return -EINVAL;
177
178 return 0;
179}
180
156static const struct drm_plane_helper_funcs omap_plane_helper_funcs = { 181static const struct drm_plane_helper_funcs omap_plane_helper_funcs = {
157 .prepare_fb = omap_plane_prepare_fb, 182 .prepare_fb = omap_plane_prepare_fb,
158 .cleanup_fb = omap_plane_cleanup_fb, 183 .cleanup_fb = omap_plane_cleanup_fb,
184 .atomic_check = omap_plane_atomic_check,
159 .atomic_update = omap_plane_atomic_update, 185 .atomic_update = omap_plane_atomic_update,
160 .atomic_disable = omap_plane_atomic_disable, 186 .atomic_disable = omap_plane_atomic_disable,
161}; 187};
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 4ecf5caa8c6d..248953d2fdb7 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7964,23 +7964,27 @@ restart_ih:
7964 case 1: /* D1 vblank/vline */ 7964 case 1: /* D1 vblank/vline */
7965 switch (src_data) { 7965 switch (src_data) {
7966 case 0: /* D1 vblank */ 7966 case 0: /* D1 vblank */
7967 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) { 7967 if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
7968 if (rdev->irq.crtc_vblank_int[0]) { 7968 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7969 drm_handle_vblank(rdev->ddev, 0); 7969
7970 rdev->pm.vblank_sync = true; 7970 if (rdev->irq.crtc_vblank_int[0]) {
7971 wake_up(&rdev->irq.vblank_queue); 7971 drm_handle_vblank(rdev->ddev, 0);
7972 } 7972 rdev->pm.vblank_sync = true;
7973 if (atomic_read(&rdev->irq.pflip[0])) 7973 wake_up(&rdev->irq.vblank_queue);
7974 radeon_crtc_handle_vblank(rdev, 0);
7975 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7976 DRM_DEBUG("IH: D1 vblank\n");
7977 } 7974 }
7975 if (atomic_read(&rdev->irq.pflip[0]))
7976 radeon_crtc_handle_vblank(rdev, 0);
7977 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
7978 DRM_DEBUG("IH: D1 vblank\n");
7979
7978 break; 7980 break;
7979 case 1: /* D1 vline */ 7981 case 1: /* D1 vline */
7980 if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) { 7982 if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
7981 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT; 7983 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7982 DRM_DEBUG("IH: D1 vline\n"); 7984
7983 } 7985 rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
7986 DRM_DEBUG("IH: D1 vline\n");
7987
7984 break; 7988 break;
7985 default: 7989 default:
7986 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 7990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -7990,23 +7994,27 @@ restart_ih:
7990 case 2: /* D2 vblank/vline */ 7994 case 2: /* D2 vblank/vline */
7991 switch (src_data) { 7995 switch (src_data) {
7992 case 0: /* D2 vblank */ 7996 case 0: /* D2 vblank */
7993 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 7997 if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
7994 if (rdev->irq.crtc_vblank_int[1]) { 7998 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
7995 drm_handle_vblank(rdev->ddev, 1); 7999
7996 rdev->pm.vblank_sync = true; 8000 if (rdev->irq.crtc_vblank_int[1]) {
7997 wake_up(&rdev->irq.vblank_queue); 8001 drm_handle_vblank(rdev->ddev, 1);
7998 } 8002 rdev->pm.vblank_sync = true;
7999 if (atomic_read(&rdev->irq.pflip[1])) 8003 wake_up(&rdev->irq.vblank_queue);
8000 radeon_crtc_handle_vblank(rdev, 1);
8001 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
8002 DRM_DEBUG("IH: D2 vblank\n");
8003 } 8004 }
8005 if (atomic_read(&rdev->irq.pflip[1]))
8006 radeon_crtc_handle_vblank(rdev, 1);
8007 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
8008 DRM_DEBUG("IH: D2 vblank\n");
8009
8004 break; 8010 break;
8005 case 1: /* D2 vline */ 8011 case 1: /* D2 vline */
8006 if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 8012 if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
8007 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 8013 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8008 DRM_DEBUG("IH: D2 vline\n"); 8014
8009 } 8015 rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
8016 DRM_DEBUG("IH: D2 vline\n");
8017
8010 break; 8018 break;
8011 default: 8019 default:
8012 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8020 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8016,23 +8024,27 @@ restart_ih:
8016 case 3: /* D3 vblank/vline */ 8024 case 3: /* D3 vblank/vline */
8017 switch (src_data) { 8025 switch (src_data) {
8018 case 0: /* D3 vblank */ 8026 case 0: /* D3 vblank */
8019 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 8027 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
8020 if (rdev->irq.crtc_vblank_int[2]) { 8028 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8021 drm_handle_vblank(rdev->ddev, 2); 8029
8022 rdev->pm.vblank_sync = true; 8030 if (rdev->irq.crtc_vblank_int[2]) {
8023 wake_up(&rdev->irq.vblank_queue); 8031 drm_handle_vblank(rdev->ddev, 2);
8024 } 8032 rdev->pm.vblank_sync = true;
8025 if (atomic_read(&rdev->irq.pflip[2])) 8033 wake_up(&rdev->irq.vblank_queue);
8026 radeon_crtc_handle_vblank(rdev, 2);
8027 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
8028 DRM_DEBUG("IH: D3 vblank\n");
8029 } 8034 }
8035 if (atomic_read(&rdev->irq.pflip[2]))
8036 radeon_crtc_handle_vblank(rdev, 2);
8037 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
8038 DRM_DEBUG("IH: D3 vblank\n");
8039
8030 break; 8040 break;
8031 case 1: /* D3 vline */ 8041 case 1: /* D3 vline */
8032 if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 8042 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
8033 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 8043 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8034 DRM_DEBUG("IH: D3 vline\n"); 8044
8035 } 8045 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
8046 DRM_DEBUG("IH: D3 vline\n");
8047
8036 break; 8048 break;
8037 default: 8049 default:
8038 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8050 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8042,23 +8054,27 @@ restart_ih:
8042 case 4: /* D4 vblank/vline */ 8054 case 4: /* D4 vblank/vline */
8043 switch (src_data) { 8055 switch (src_data) {
8044 case 0: /* D4 vblank */ 8056 case 0: /* D4 vblank */
8045 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 8057 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
8046 if (rdev->irq.crtc_vblank_int[3]) { 8058 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8047 drm_handle_vblank(rdev->ddev, 3); 8059
8048 rdev->pm.vblank_sync = true; 8060 if (rdev->irq.crtc_vblank_int[3]) {
8049 wake_up(&rdev->irq.vblank_queue); 8061 drm_handle_vblank(rdev->ddev, 3);
8050 } 8062 rdev->pm.vblank_sync = true;
8051 if (atomic_read(&rdev->irq.pflip[3])) 8063 wake_up(&rdev->irq.vblank_queue);
8052 radeon_crtc_handle_vblank(rdev, 3);
8053 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
8054 DRM_DEBUG("IH: D4 vblank\n");
8055 } 8064 }
8065 if (atomic_read(&rdev->irq.pflip[3]))
8066 radeon_crtc_handle_vblank(rdev, 3);
8067 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
8068 DRM_DEBUG("IH: D4 vblank\n");
8069
8056 break; 8070 break;
8057 case 1: /* D4 vline */ 8071 case 1: /* D4 vline */
8058 if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 8072 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
8059 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 8073 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8060 DRM_DEBUG("IH: D4 vline\n"); 8074
8061 } 8075 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
8076 DRM_DEBUG("IH: D4 vline\n");
8077
8062 break; 8078 break;
8063 default: 8079 default:
8064 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8080 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8068,23 +8084,27 @@ restart_ih:
8068 case 5: /* D5 vblank/vline */ 8084 case 5: /* D5 vblank/vline */
8069 switch (src_data) { 8085 switch (src_data) {
8070 case 0: /* D5 vblank */ 8086 case 0: /* D5 vblank */
8071 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 8087 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
8072 if (rdev->irq.crtc_vblank_int[4]) { 8088 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8073 drm_handle_vblank(rdev->ddev, 4); 8089
8074 rdev->pm.vblank_sync = true; 8090 if (rdev->irq.crtc_vblank_int[4]) {
8075 wake_up(&rdev->irq.vblank_queue); 8091 drm_handle_vblank(rdev->ddev, 4);
8076 } 8092 rdev->pm.vblank_sync = true;
8077 if (atomic_read(&rdev->irq.pflip[4])) 8093 wake_up(&rdev->irq.vblank_queue);
8078 radeon_crtc_handle_vblank(rdev, 4);
8079 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
8080 DRM_DEBUG("IH: D5 vblank\n");
8081 } 8094 }
8095 if (atomic_read(&rdev->irq.pflip[4]))
8096 radeon_crtc_handle_vblank(rdev, 4);
8097 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
8098 DRM_DEBUG("IH: D5 vblank\n");
8099
8082 break; 8100 break;
8083 case 1: /* D5 vline */ 8101 case 1: /* D5 vline */
8084 if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 8102 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
8085 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 8103 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8086 DRM_DEBUG("IH: D5 vline\n"); 8104
8087 } 8105 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
8106 DRM_DEBUG("IH: D5 vline\n");
8107
8088 break; 8108 break;
8089 default: 8109 default:
8090 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8110 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8094,23 +8114,27 @@ restart_ih:
8094 case 6: /* D6 vblank/vline */ 8114 case 6: /* D6 vblank/vline */
8095 switch (src_data) { 8115 switch (src_data) {
8096 case 0: /* D6 vblank */ 8116 case 0: /* D6 vblank */
8097 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 8117 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
8098 if (rdev->irq.crtc_vblank_int[5]) { 8118 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8099 drm_handle_vblank(rdev->ddev, 5); 8119
8100 rdev->pm.vblank_sync = true; 8120 if (rdev->irq.crtc_vblank_int[5]) {
8101 wake_up(&rdev->irq.vblank_queue); 8121 drm_handle_vblank(rdev->ddev, 5);
8102 } 8122 rdev->pm.vblank_sync = true;
8103 if (atomic_read(&rdev->irq.pflip[5])) 8123 wake_up(&rdev->irq.vblank_queue);
8104 radeon_crtc_handle_vblank(rdev, 5);
8105 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
8106 DRM_DEBUG("IH: D6 vblank\n");
8107 } 8124 }
8125 if (atomic_read(&rdev->irq.pflip[5]))
8126 radeon_crtc_handle_vblank(rdev, 5);
8127 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
8128 DRM_DEBUG("IH: D6 vblank\n");
8129
8108 break; 8130 break;
8109 case 1: /* D6 vline */ 8131 case 1: /* D6 vline */
8110 if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 8132 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
8111 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 8133 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8112 DRM_DEBUG("IH: D6 vline\n"); 8134
8113 } 8135 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
8136 DRM_DEBUG("IH: D6 vline\n");
8137
8114 break; 8138 break;
8115 default: 8139 default:
8116 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8140 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -8130,88 +8154,112 @@ restart_ih:
8130 case 42: /* HPD hotplug */ 8154 case 42: /* HPD hotplug */
8131 switch (src_data) { 8155 switch (src_data) {
8132 case 0: 8156 case 0:
8133 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) { 8157 if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
8134 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT; 8158 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8135 queue_hotplug = true; 8159
8136 DRM_DEBUG("IH: HPD1\n"); 8160 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
8137 } 8161 queue_hotplug = true;
8162 DRM_DEBUG("IH: HPD1\n");
8163
8138 break; 8164 break;
8139 case 1: 8165 case 1:
8140 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) { 8166 if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
8141 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT; 8167 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8142 queue_hotplug = true; 8168
8143 DRM_DEBUG("IH: HPD2\n"); 8169 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
8144 } 8170 queue_hotplug = true;
8171 DRM_DEBUG("IH: HPD2\n");
8172
8145 break; 8173 break;
8146 case 2: 8174 case 2:
8147 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) { 8175 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
8148 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 8176 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8149 queue_hotplug = true; 8177
8150 DRM_DEBUG("IH: HPD3\n"); 8178 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
8151 } 8179 queue_hotplug = true;
8180 DRM_DEBUG("IH: HPD3\n");
8181
8152 break; 8182 break;
8153 case 3: 8183 case 3:
8154 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) { 8184 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
8155 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 8185 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8156 queue_hotplug = true; 8186
8157 DRM_DEBUG("IH: HPD4\n"); 8187 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
8158 } 8188 queue_hotplug = true;
8189 DRM_DEBUG("IH: HPD4\n");
8190
8159 break; 8191 break;
8160 case 4: 8192 case 4:
8161 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) { 8193 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
8162 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 8194 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8163 queue_hotplug = true; 8195
8164 DRM_DEBUG("IH: HPD5\n"); 8196 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
8165 } 8197 queue_hotplug = true;
8198 DRM_DEBUG("IH: HPD5\n");
8199
8166 break; 8200 break;
8167 case 5: 8201 case 5:
8168 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) { 8202 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
8169 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 8203 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8170 queue_hotplug = true; 8204
8171 DRM_DEBUG("IH: HPD6\n"); 8205 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
8172 } 8206 queue_hotplug = true;
8207 DRM_DEBUG("IH: HPD6\n");
8208
8173 break; 8209 break;
8174 case 6: 8210 case 6:
8175 if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) { 8211 if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
8176 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT; 8212 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8177 queue_dp = true; 8213
8178 DRM_DEBUG("IH: HPD_RX 1\n"); 8214 rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
8179 } 8215 queue_dp = true;
8216 DRM_DEBUG("IH: HPD_RX 1\n");
8217
8180 break; 8218 break;
8181 case 7: 8219 case 7:
8182 if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 8220 if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
8183 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 8221 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8184 queue_dp = true; 8222
8185 DRM_DEBUG("IH: HPD_RX 2\n"); 8223 rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
8186 } 8224 queue_dp = true;
8225 DRM_DEBUG("IH: HPD_RX 2\n");
8226
8187 break; 8227 break;
8188 case 8: 8228 case 8:
8189 if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 8229 if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
8190 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 8230 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8191 queue_dp = true; 8231
8192 DRM_DEBUG("IH: HPD_RX 3\n"); 8232 rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
8193 } 8233 queue_dp = true;
8234 DRM_DEBUG("IH: HPD_RX 3\n");
8235
8194 break; 8236 break;
8195 case 9: 8237 case 9:
8196 if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 8238 if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
8197 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 8239 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8198 queue_dp = true; 8240
8199 DRM_DEBUG("IH: HPD_RX 4\n"); 8241 rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
8200 } 8242 queue_dp = true;
8243 DRM_DEBUG("IH: HPD_RX 4\n");
8244
8201 break; 8245 break;
8202 case 10: 8246 case 10:
8203 if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 8247 if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
8204 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 8248 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8205 queue_dp = true; 8249
8206 DRM_DEBUG("IH: HPD_RX 5\n"); 8250 rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
8207 } 8251 queue_dp = true;
8252 DRM_DEBUG("IH: HPD_RX 5\n");
8253
8208 break; 8254 break;
8209 case 11: 8255 case 11:
8210 if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 8256 if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
8211 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 8257 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
8212 queue_dp = true; 8258
8213 DRM_DEBUG("IH: HPD_RX 6\n"); 8259 rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
8214 } 8260 queue_dp = true;
8261 DRM_DEBUG("IH: HPD_RX 6\n");
8262
8215 break; 8263 break;
8216 default: 8264 default:
8217 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 8265 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3a6d483a2c36..0acde1949c18 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@ restart_ih:
4924 return IRQ_NONE; 4924 return IRQ_NONE;
4925 4925
4926 rptr = rdev->ih.rptr; 4926 rptr = rdev->ih.rptr;
4927 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 4927 DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
4928 4928
4929 /* Order reading of wptr vs. reading of IH ring data */ 4929 /* Order reading of wptr vs. reading of IH ring data */
4930 rmb(); 4930 rmb();
@@ -4942,23 +4942,27 @@ restart_ih:
4942 case 1: /* D1 vblank/vline */ 4942 case 1: /* D1 vblank/vline */
4943 switch (src_data) { 4943 switch (src_data) {
4944 case 0: /* D1 vblank */ 4944 case 0: /* D1 vblank */
4945 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 4945 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
4946 if (rdev->irq.crtc_vblank_int[0]) { 4946 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4947 drm_handle_vblank(rdev->ddev, 0); 4947
4948 rdev->pm.vblank_sync = true; 4948 if (rdev->irq.crtc_vblank_int[0]) {
4949 wake_up(&rdev->irq.vblank_queue); 4949 drm_handle_vblank(rdev->ddev, 0);
4950 } 4950 rdev->pm.vblank_sync = true;
4951 if (atomic_read(&rdev->irq.pflip[0])) 4951 wake_up(&rdev->irq.vblank_queue);
4952 radeon_crtc_handle_vblank(rdev, 0);
4953 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4954 DRM_DEBUG("IH: D1 vblank\n");
4955 } 4952 }
4953 if (atomic_read(&rdev->irq.pflip[0]))
4954 radeon_crtc_handle_vblank(rdev, 0);
4955 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4956 DRM_DEBUG("IH: D1 vblank\n");
4957
4956 break; 4958 break;
4957 case 1: /* D1 vline */ 4959 case 1: /* D1 vline */
4958 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 4960 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
4959 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4961 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4960 DRM_DEBUG("IH: D1 vline\n"); 4962
4961 } 4963 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4964 DRM_DEBUG("IH: D1 vline\n");
4965
4962 break; 4966 break;
4963 default: 4967 default:
4964 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4968 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4968,23 +4972,27 @@ restart_ih:
4968 case 2: /* D2 vblank/vline */ 4972 case 2: /* D2 vblank/vline */
4969 switch (src_data) { 4973 switch (src_data) {
4970 case 0: /* D2 vblank */ 4974 case 0: /* D2 vblank */
4971 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 4975 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
4972 if (rdev->irq.crtc_vblank_int[1]) { 4976 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4973 drm_handle_vblank(rdev->ddev, 1); 4977
4974 rdev->pm.vblank_sync = true; 4978 if (rdev->irq.crtc_vblank_int[1]) {
4975 wake_up(&rdev->irq.vblank_queue); 4979 drm_handle_vblank(rdev->ddev, 1);
4976 } 4980 rdev->pm.vblank_sync = true;
4977 if (atomic_read(&rdev->irq.pflip[1])) 4981 wake_up(&rdev->irq.vblank_queue);
4978 radeon_crtc_handle_vblank(rdev, 1);
4979 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4980 DRM_DEBUG("IH: D2 vblank\n");
4981 } 4982 }
4983 if (atomic_read(&rdev->irq.pflip[1]))
4984 radeon_crtc_handle_vblank(rdev, 1);
4985 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
4986 DRM_DEBUG("IH: D2 vblank\n");
4987
4982 break; 4988 break;
4983 case 1: /* D2 vline */ 4989 case 1: /* D2 vline */
4984 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 4990 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
4985 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 4991 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4986 DRM_DEBUG("IH: D2 vline\n"); 4992
4987 } 4993 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
4994 DRM_DEBUG("IH: D2 vline\n");
4995
4988 break; 4996 break;
4989 default: 4997 default:
4990 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4998 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4994,23 +5002,27 @@ restart_ih:
4994 case 3: /* D3 vblank/vline */ 5002 case 3: /* D3 vblank/vline */
4995 switch (src_data) { 5003 switch (src_data) {
4996 case 0: /* D3 vblank */ 5004 case 0: /* D3 vblank */
4997 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 5005 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
4998 if (rdev->irq.crtc_vblank_int[2]) { 5006 DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
4999 drm_handle_vblank(rdev->ddev, 2); 5007
5000 rdev->pm.vblank_sync = true; 5008 if (rdev->irq.crtc_vblank_int[2]) {
5001 wake_up(&rdev->irq.vblank_queue); 5009 drm_handle_vblank(rdev->ddev, 2);
5002 } 5010 rdev->pm.vblank_sync = true;
5003 if (atomic_read(&rdev->irq.pflip[2])) 5011 wake_up(&rdev->irq.vblank_queue);
5004 radeon_crtc_handle_vblank(rdev, 2);
5005 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5006 DRM_DEBUG("IH: D3 vblank\n");
5007 } 5012 }
5013 if (atomic_read(&rdev->irq.pflip[2]))
5014 radeon_crtc_handle_vblank(rdev, 2);
5015 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
5016 DRM_DEBUG("IH: D3 vblank\n");
5017
5008 break; 5018 break;
5009 case 1: /* D3 vline */ 5019 case 1: /* D3 vline */
5010 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 5020 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
5011 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 5021 DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
5012 DRM_DEBUG("IH: D3 vline\n"); 5022
5013 } 5023 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
5024 DRM_DEBUG("IH: D3 vline\n");
5025
5014 break; 5026 break;
5015 default: 5027 default:
5016 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5028 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5020,23 +5032,27 @@ restart_ih:
5020 case 4: /* D4 vblank/vline */ 5032 case 4: /* D4 vblank/vline */
5021 switch (src_data) { 5033 switch (src_data) {
5022 case 0: /* D4 vblank */ 5034 case 0: /* D4 vblank */
5023 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 5035 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
5024 if (rdev->irq.crtc_vblank_int[3]) { 5036 DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
5025 drm_handle_vblank(rdev->ddev, 3); 5037
5026 rdev->pm.vblank_sync = true; 5038 if (rdev->irq.crtc_vblank_int[3]) {
5027 wake_up(&rdev->irq.vblank_queue); 5039 drm_handle_vblank(rdev->ddev, 3);
5028 } 5040 rdev->pm.vblank_sync = true;
5029 if (atomic_read(&rdev->irq.pflip[3])) 5041 wake_up(&rdev->irq.vblank_queue);
5030 radeon_crtc_handle_vblank(rdev, 3);
5031 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5032 DRM_DEBUG("IH: D4 vblank\n");
5033 } 5042 }
5043 if (atomic_read(&rdev->irq.pflip[3]))
5044 radeon_crtc_handle_vblank(rdev, 3);
5045 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
5046 DRM_DEBUG("IH: D4 vblank\n");
5047
5034 break; 5048 break;
5035 case 1: /* D4 vline */ 5049 case 1: /* D4 vline */
5036 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 5050 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
5037 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 5051 DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
5038 DRM_DEBUG("IH: D4 vline\n"); 5052
5039 } 5053 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
5054 DRM_DEBUG("IH: D4 vline\n");
5055
5040 break; 5056 break;
5041 default: 5057 default:
5042 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5058 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5046,23 +5062,27 @@ restart_ih:
5046 case 5: /* D5 vblank/vline */ 5062 case 5: /* D5 vblank/vline */
5047 switch (src_data) { 5063 switch (src_data) {
5048 case 0: /* D5 vblank */ 5064 case 0: /* D5 vblank */
5049 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 5065 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
5050 if (rdev->irq.crtc_vblank_int[4]) { 5066 DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
5051 drm_handle_vblank(rdev->ddev, 4); 5067
5052 rdev->pm.vblank_sync = true; 5068 if (rdev->irq.crtc_vblank_int[4]) {
5053 wake_up(&rdev->irq.vblank_queue); 5069 drm_handle_vblank(rdev->ddev, 4);
5054 } 5070 rdev->pm.vblank_sync = true;
5055 if (atomic_read(&rdev->irq.pflip[4])) 5071 wake_up(&rdev->irq.vblank_queue);
5056 radeon_crtc_handle_vblank(rdev, 4);
5057 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5058 DRM_DEBUG("IH: D5 vblank\n");
5059 } 5072 }
5073 if (atomic_read(&rdev->irq.pflip[4]))
5074 radeon_crtc_handle_vblank(rdev, 4);
5075 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
5076 DRM_DEBUG("IH: D5 vblank\n");
5077
5060 break; 5078 break;
5061 case 1: /* D5 vline */ 5079 case 1: /* D5 vline */
5062 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 5080 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
5063 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 5081 DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
5064 DRM_DEBUG("IH: D5 vline\n"); 5082
5065 } 5083 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
5084 DRM_DEBUG("IH: D5 vline\n");
5085
5066 break; 5086 break;
5067 default: 5087 default:
5068 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5088 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5072,23 +5092,27 @@ restart_ih:
5072 case 6: /* D6 vblank/vline */ 5092 case 6: /* D6 vblank/vline */
5073 switch (src_data) { 5093 switch (src_data) {
5074 case 0: /* D6 vblank */ 5094 case 0: /* D6 vblank */
5075 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 5095 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
5076 if (rdev->irq.crtc_vblank_int[5]) { 5096 DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
5077 drm_handle_vblank(rdev->ddev, 5); 5097
5078 rdev->pm.vblank_sync = true; 5098 if (rdev->irq.crtc_vblank_int[5]) {
5079 wake_up(&rdev->irq.vblank_queue); 5099 drm_handle_vblank(rdev->ddev, 5);
5080 } 5100 rdev->pm.vblank_sync = true;
5081 if (atomic_read(&rdev->irq.pflip[5])) 5101 wake_up(&rdev->irq.vblank_queue);
5082 radeon_crtc_handle_vblank(rdev, 5);
5083 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5084 DRM_DEBUG("IH: D6 vblank\n");
5085 } 5102 }
5103 if (atomic_read(&rdev->irq.pflip[5]))
5104 radeon_crtc_handle_vblank(rdev, 5);
5105 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
5106 DRM_DEBUG("IH: D6 vblank\n");
5107
5086 break; 5108 break;
5087 case 1: /* D6 vline */ 5109 case 1: /* D6 vline */
5088 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 5110 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
5089 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 5111 DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
5090 DRM_DEBUG("IH: D6 vline\n"); 5112
5091 } 5113 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
5114 DRM_DEBUG("IH: D6 vline\n");
5115
5092 break; 5116 break;
5093 default: 5117 default:
5094 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5118 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5108,88 +5132,100 @@ restart_ih:
5108 case 42: /* HPD hotplug */ 5132 case 42: /* HPD hotplug */
5109 switch (src_data) { 5133 switch (src_data) {
5110 case 0: 5134 case 0:
5111 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 5135 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
5112 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 5136 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5113 queue_hotplug = true; 5137
5114 DRM_DEBUG("IH: HPD1\n"); 5138 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
5115 } 5139 queue_hotplug = true;
5140 DRM_DEBUG("IH: HPD1\n");
5116 break; 5141 break;
5117 case 1: 5142 case 1:
5118 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 5143 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
5119 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 5144 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5120 queue_hotplug = true; 5145
5121 DRM_DEBUG("IH: HPD2\n"); 5146 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
5122 } 5147 queue_hotplug = true;
5148 DRM_DEBUG("IH: HPD2\n");
5123 break; 5149 break;
5124 case 2: 5150 case 2:
5125 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 5151 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
5126 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 5152 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5127 queue_hotplug = true; 5153
5128 DRM_DEBUG("IH: HPD3\n"); 5154 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
5129 } 5155 queue_hotplug = true;
5156 DRM_DEBUG("IH: HPD3\n");
5130 break; 5157 break;
5131 case 3: 5158 case 3:
5132 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 5159 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
5133 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 5160 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5134 queue_hotplug = true; 5161
5135 DRM_DEBUG("IH: HPD4\n"); 5162 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
5136 } 5163 queue_hotplug = true;
5164 DRM_DEBUG("IH: HPD4\n");
5137 break; 5165 break;
5138 case 4: 5166 case 4:
5139 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 5167 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
5140 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 5168 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5141 queue_hotplug = true; 5169
5142 DRM_DEBUG("IH: HPD5\n"); 5170 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
5143 } 5171 queue_hotplug = true;
5172 DRM_DEBUG("IH: HPD5\n");
5144 break; 5173 break;
5145 case 5: 5174 case 5:
5146 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 5175 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
5147 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 5176 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5148 queue_hotplug = true; 5177
5149 DRM_DEBUG("IH: HPD6\n"); 5178 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
5150 } 5179 queue_hotplug = true;
5180 DRM_DEBUG("IH: HPD6\n");
5151 break; 5181 break;
5152 case 6: 5182 case 6:
5153 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { 5183 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
5154 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; 5184 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5155 queue_dp = true; 5185
5156 DRM_DEBUG("IH: HPD_RX 1\n"); 5186 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
5157 } 5187 queue_dp = true;
5188 DRM_DEBUG("IH: HPD_RX 1\n");
5158 break; 5189 break;
5159 case 7: 5190 case 7:
5160 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 5191 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
5161 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 5192 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5162 queue_dp = true; 5193
5163 DRM_DEBUG("IH: HPD_RX 2\n"); 5194 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
5164 } 5195 queue_dp = true;
5196 DRM_DEBUG("IH: HPD_RX 2\n");
5165 break; 5197 break;
5166 case 8: 5198 case 8:
5167 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 5199 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
5168 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 5200 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5169 queue_dp = true; 5201
5170 DRM_DEBUG("IH: HPD_RX 3\n"); 5202 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
5171 } 5203 queue_dp = true;
5204 DRM_DEBUG("IH: HPD_RX 3\n");
5172 break; 5205 break;
5173 case 9: 5206 case 9:
5174 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 5207 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
5175 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 5208 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5176 queue_dp = true; 5209
5177 DRM_DEBUG("IH: HPD_RX 4\n"); 5210 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
5178 } 5211 queue_dp = true;
5212 DRM_DEBUG("IH: HPD_RX 4\n");
5179 break; 5213 break;
5180 case 10: 5214 case 10:
5181 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 5215 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
5182 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 5216 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5183 queue_dp = true; 5217
5184 DRM_DEBUG("IH: HPD_RX 5\n"); 5218 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
5185 } 5219 queue_dp = true;
5220 DRM_DEBUG("IH: HPD_RX 5\n");
5186 break; 5221 break;
5187 case 11: 5222 case 11:
5188 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 5223 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
5189 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 5224 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5190 queue_dp = true; 5225
5191 DRM_DEBUG("IH: HPD_RX 6\n"); 5226 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
5192 } 5227 queue_dp = true;
5228 DRM_DEBUG("IH: HPD_RX 6\n");
5193 break; 5229 break;
5194 default: 5230 default:
5195 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 5231 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -5199,46 +5235,52 @@ restart_ih:
5199 case 44: /* hdmi */ 5235 case 44: /* hdmi */
5200 switch (src_data) { 5236 switch (src_data) {
5201 case 0: 5237 case 0:
5202 if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) { 5238 if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
5203 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG; 5239 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5204 queue_hdmi = true; 5240
5205 DRM_DEBUG("IH: HDMI0\n"); 5241 rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
5206 } 5242 queue_hdmi = true;
5243 DRM_DEBUG("IH: HDMI0\n");
5207 break; 5244 break;
5208 case 1: 5245 case 1:
5209 if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) { 5246 if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
5210 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG; 5247 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5211 queue_hdmi = true; 5248
5212 DRM_DEBUG("IH: HDMI1\n"); 5249 rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
5213 } 5250 queue_hdmi = true;
5251 DRM_DEBUG("IH: HDMI1\n");
5214 break; 5252 break;
5215 case 2: 5253 case 2:
5216 if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) { 5254 if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
5217 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG; 5255 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5218 queue_hdmi = true; 5256
5219 DRM_DEBUG("IH: HDMI2\n"); 5257 rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
5220 } 5258 queue_hdmi = true;
5259 DRM_DEBUG("IH: HDMI2\n");
5221 break; 5260 break;
5222 case 3: 5261 case 3:
5223 if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) { 5262 if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
5224 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG; 5263 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5225 queue_hdmi = true; 5264
5226 DRM_DEBUG("IH: HDMI3\n"); 5265 rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
5227 } 5266 queue_hdmi = true;
5267 DRM_DEBUG("IH: HDMI3\n");
5228 break; 5268 break;
5229 case 4: 5269 case 4:
5230 if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) { 5270 if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
5231 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG; 5271 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5232 queue_hdmi = true; 5272
5233 DRM_DEBUG("IH: HDMI4\n"); 5273 rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
5234 } 5274 queue_hdmi = true;
5275 DRM_DEBUG("IH: HDMI4\n");
5235 break; 5276 break;
5236 case 5: 5277 case 5:
5237 if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) { 5278 if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
5238 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG; 5279 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
5239 queue_hdmi = true; 5280
5240 DRM_DEBUG("IH: HDMI5\n"); 5281 rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
5241 } 5282 queue_hdmi = true;
5283 DRM_DEBUG("IH: HDMI5\n");
5242 break; 5284 break;
5243 default: 5285 default:
5244 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 5286 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 8e5aeeb058a5..158872eb78e4 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2162,18 +2162,20 @@ static int cayman_startup(struct radeon_device *rdev)
2162 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 2162 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
2163 } 2163 }
2164 2164
2165 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX]; 2165 if (rdev->family == CHIP_ARUBA) {
2166 if (ring->ring_size) 2166 ring = &rdev->ring[TN_RING_TYPE_VCE1_INDEX];
2167 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2167 if (ring->ring_size)
2168 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2168 2169
2169 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX]; 2170 ring = &rdev->ring[TN_RING_TYPE_VCE2_INDEX];
2170 if (ring->ring_size) 2171 if (ring->ring_size)
2171 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0); 2172 r = radeon_ring_init(rdev, ring, ring->ring_size, 0, 0x0);
2172 2173
2173 if (!r) 2174 if (!r)
2174 r = vce_v1_0_init(rdev); 2175 r = vce_v1_0_init(rdev);
2175 else if (r != -ENOENT) 2176 if (r)
2176 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r); 2177 DRM_ERROR("radeon: failed initializing VCE (%d).\n", r);
2178 }
2177 2179
2178 r = radeon_ib_pool_init(rdev); 2180 r = radeon_ib_pool_init(rdev);
2179 if (r) { 2181 if (r) {
@@ -2396,7 +2398,8 @@ void cayman_fini(struct radeon_device *rdev)
2396 radeon_irq_kms_fini(rdev); 2398 radeon_irq_kms_fini(rdev);
2397 uvd_v1_0_fini(rdev); 2399 uvd_v1_0_fini(rdev);
2398 radeon_uvd_fini(rdev); 2400 radeon_uvd_fini(rdev);
2399 radeon_vce_fini(rdev); 2401 if (rdev->family == CHIP_ARUBA)
2402 radeon_vce_fini(rdev);
2400 cayman_pcie_gart_fini(rdev); 2403 cayman_pcie_gart_fini(rdev);
2401 r600_vram_scratch_fini(rdev); 2404 r600_vram_scratch_fini(rdev);
2402 radeon_gem_fini(rdev); 2405 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 35dafd77a639..4ea5b10ff5f4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -4086,23 +4086,27 @@ restart_ih:
4086 case 1: /* D1 vblank/vline */ 4086 case 1: /* D1 vblank/vline */
4087 switch (src_data) { 4087 switch (src_data) {
4088 case 0: /* D1 vblank */ 4088 case 0: /* D1 vblank */
4089 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) { 4089 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
4090 if (rdev->irq.crtc_vblank_int[0]) { 4090 DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
4091 drm_handle_vblank(rdev->ddev, 0); 4091
4092 rdev->pm.vblank_sync = true; 4092 if (rdev->irq.crtc_vblank_int[0]) {
4093 wake_up(&rdev->irq.vblank_queue); 4093 drm_handle_vblank(rdev->ddev, 0);
4094 } 4094 rdev->pm.vblank_sync = true;
4095 if (atomic_read(&rdev->irq.pflip[0])) 4095 wake_up(&rdev->irq.vblank_queue);
4096 radeon_crtc_handle_vblank(rdev, 0);
4097 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4098 DRM_DEBUG("IH: D1 vblank\n");
4099 } 4096 }
4097 if (atomic_read(&rdev->irq.pflip[0]))
4098 radeon_crtc_handle_vblank(rdev, 0);
4099 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4100 DRM_DEBUG("IH: D1 vblank\n");
4101
4100 break; 4102 break;
4101 case 1: /* D1 vline */ 4103 case 1: /* D1 vline */
4102 if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) { 4104 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
4103 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT; 4105 DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
4104 DRM_DEBUG("IH: D1 vline\n"); 4106
4105 } 4107 rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4108 DRM_DEBUG("IH: D1 vline\n");
4109
4106 break; 4110 break;
4107 default: 4111 default:
4108 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4112 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4112,23 +4116,27 @@ restart_ih:
4112 case 5: /* D2 vblank/vline */ 4116 case 5: /* D2 vblank/vline */
4113 switch (src_data) { 4117 switch (src_data) {
4114 case 0: /* D2 vblank */ 4118 case 0: /* D2 vblank */
4115 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) { 4119 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
4116 if (rdev->irq.crtc_vblank_int[1]) { 4120 DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
4117 drm_handle_vblank(rdev->ddev, 1); 4121
4118 rdev->pm.vblank_sync = true; 4122 if (rdev->irq.crtc_vblank_int[1]) {
4119 wake_up(&rdev->irq.vblank_queue); 4123 drm_handle_vblank(rdev->ddev, 1);
4120 } 4124 rdev->pm.vblank_sync = true;
4121 if (atomic_read(&rdev->irq.pflip[1])) 4125 wake_up(&rdev->irq.vblank_queue);
4122 radeon_crtc_handle_vblank(rdev, 1);
4123 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4124 DRM_DEBUG("IH: D2 vblank\n");
4125 } 4126 }
4127 if (atomic_read(&rdev->irq.pflip[1]))
4128 radeon_crtc_handle_vblank(rdev, 1);
4129 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4130 DRM_DEBUG("IH: D2 vblank\n");
4131
4126 break; 4132 break;
4127 case 1: /* D1 vline */ 4133 case 1: /* D1 vline */
4128 if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) { 4134 if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
4129 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT; 4135 DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
4130 DRM_DEBUG("IH: D2 vline\n"); 4136
4131 } 4137 rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4138 DRM_DEBUG("IH: D2 vline\n");
4139
4132 break; 4140 break;
4133 default: 4141 default:
4134 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4142 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4148,46 +4156,53 @@ restart_ih:
4148 case 19: /* HPD/DAC hotplug */ 4156 case 19: /* HPD/DAC hotplug */
4149 switch (src_data) { 4157 switch (src_data) {
4150 case 0: 4158 case 0:
4151 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) { 4159 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
4152 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT; 4160 DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
4153 queue_hotplug = true; 4161
4154 DRM_DEBUG("IH: HPD1\n"); 4162 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4155 } 4163 queue_hotplug = true;
4164 DRM_DEBUG("IH: HPD1\n");
4156 break; 4165 break;
4157 case 1: 4166 case 1:
4158 if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) { 4167 if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
4159 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT; 4168 DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
4160 queue_hotplug = true; 4169
4161 DRM_DEBUG("IH: HPD2\n"); 4170 rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4162 } 4171 queue_hotplug = true;
4172 DRM_DEBUG("IH: HPD2\n");
4163 break; 4173 break;
4164 case 4: 4174 case 4:
4165 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) { 4175 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
4166 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT; 4176 DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
4167 queue_hotplug = true; 4177
4168 DRM_DEBUG("IH: HPD3\n"); 4178 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4169 } 4179 queue_hotplug = true;
4180 DRM_DEBUG("IH: HPD3\n");
4170 break; 4181 break;
4171 case 5: 4182 case 5:
4172 if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) { 4183 if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
4173 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT; 4184 DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
4174 queue_hotplug = true; 4185
4175 DRM_DEBUG("IH: HPD4\n"); 4186 rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4176 } 4187 queue_hotplug = true;
4188 DRM_DEBUG("IH: HPD4\n");
4177 break; 4189 break;
4178 case 10: 4190 case 10:
4179 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) { 4191 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
4180 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT; 4192 DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
4181 queue_hotplug = true; 4193
4182 DRM_DEBUG("IH: HPD5\n"); 4194 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4183 } 4195 queue_hotplug = true;
4196 DRM_DEBUG("IH: HPD5\n");
4184 break; 4197 break;
4185 case 12: 4198 case 12:
4186 if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) { 4199 if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
4187 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT; 4200 DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
4188 queue_hotplug = true; 4201
4189 DRM_DEBUG("IH: HPD6\n"); 4202 rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4190 } 4203 queue_hotplug = true;
4204 DRM_DEBUG("IH: HPD6\n");
4205
4191 break; 4206 break;
4192 default: 4207 default:
4193 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 4208 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -4197,18 +4212,22 @@ restart_ih:
4197 case 21: /* hdmi */ 4212 case 21: /* hdmi */
4198 switch (src_data) { 4213 switch (src_data) {
4199 case 4: 4214 case 4:
4200 if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) { 4215 if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
4201 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4216 DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
4202 queue_hdmi = true; 4217
4203 DRM_DEBUG("IH: HDMI0\n"); 4218 rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4204 } 4219 queue_hdmi = true;
4220 DRM_DEBUG("IH: HDMI0\n");
4221
4205 break; 4222 break;
4206 case 5: 4223 case 5:
4207 if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) { 4224 if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
4208 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG; 4225 DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4209 queue_hdmi = true; 4226
4210 DRM_DEBUG("IH: HDMI1\n"); 4227 rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4211 } 4228 queue_hdmi = true;
4229 DRM_DEBUG("IH: HDMI1\n");
4230
4212 break; 4231 break;
4213 default: 4232 default:
4214 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); 4233 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 09e3f39925fa..98f9adaccc3d 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2483,7 +2483,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev,
2483 struct drm_buf *buf; 2483 struct drm_buf *buf;
2484 u32 *buffer; 2484 u32 *buffer;
2485 const u8 __user *data; 2485 const u8 __user *data;
2486 int size, pass_size; 2486 unsigned int size, pass_size;
2487 u64 src_offset, dst_offset; 2487 u64 src_offset, dst_offset;
2488 2488
2489 if (!radeon_check_offset(dev_priv, tex->offset)) { 2489 if (!radeon_check_offset(dev_priv, tex->offset)) {
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 45e54060ee97..afaf346bd50e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -91,15 +91,34 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
91 struct radeon_device *rdev = crtc->dev->dev_private; 91 struct radeon_device *rdev = crtc->dev->dev_private;
92 92
93 if (ASIC_IS_DCE4(rdev)) { 93 if (ASIC_IS_DCE4(rdev)) {
94 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
95 upper_32_bits(radeon_crtc->cursor_addr));
96 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
97 lower_32_bits(radeon_crtc->cursor_addr));
94 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); 98 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
95 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | 99 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
96 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | 100 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
97 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); 101 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
98 } else if (ASIC_IS_AVIVO(rdev)) { 102 } else if (ASIC_IS_AVIVO(rdev)) {
103 if (rdev->family >= CHIP_RV770) {
104 if (radeon_crtc->crtc_id)
105 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
106 upper_32_bits(radeon_crtc->cursor_addr));
107 else
108 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
109 upper_32_bits(radeon_crtc->cursor_addr));
110 }
111
112 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
113 lower_32_bits(radeon_crtc->cursor_addr));
99 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 114 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
100 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | 115 WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
101 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 116 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
102 } else { 117 } else {
118 /* offset is from DISP(2)_BASE_ADDRESS */
119 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
120 radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
121
103 switch (radeon_crtc->crtc_id) { 122 switch (radeon_crtc->crtc_id) {
104 case 0: 123 case 0:
105 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); 124 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL);
@@ -205,8 +224,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
205 | (x << 16) 224 | (x << 16)
206 | y)); 225 | y));
207 /* offset is from DISP(2)_BASE_ADDRESS */ 226 /* offset is from DISP(2)_BASE_ADDRESS */
208 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + 227 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
209 (yorigin * 256))); 228 radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
229 yorigin * 256);
210 } 230 }
211 231
212 radeon_crtc->cursor_x = x; 232 radeon_crtc->cursor_x = x;
@@ -227,53 +247,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
227 return ret; 247 return ret;
228} 248}
229 249
230static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
231{
232 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
233 struct radeon_device *rdev = crtc->dev->dev_private;
234 struct radeon_bo *robj = gem_to_radeon_bo(obj);
235 uint64_t gpu_addr;
236 int ret;
237
238 ret = radeon_bo_reserve(robj, false);
239 if (unlikely(ret != 0))
240 goto fail;
241 /* Only 27 bit offset for legacy cursor */
242 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
243 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
244 &gpu_addr);
245 radeon_bo_unreserve(robj);
246 if (ret)
247 goto fail;
248
249 if (ASIC_IS_DCE4(rdev)) {
250 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
251 upper_32_bits(gpu_addr));
252 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
253 gpu_addr & 0xffffffff);
254 } else if (ASIC_IS_AVIVO(rdev)) {
255 if (rdev->family >= CHIP_RV770) {
256 if (radeon_crtc->crtc_id)
257 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
258 else
259 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
260 }
261 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
262 gpu_addr & 0xffffffff);
263 } else {
264 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
265 /* offset is from DISP(2)_BASE_ADDRESS */
266 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
267 }
268
269 return 0;
270
271fail:
272 drm_gem_object_unreference_unlocked(obj);
273
274 return ret;
275}
276
277int radeon_crtc_cursor_set2(struct drm_crtc *crtc, 250int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
278 struct drm_file *file_priv, 251 struct drm_file *file_priv,
279 uint32_t handle, 252 uint32_t handle,
@@ -283,7 +256,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
283 int32_t hot_y) 256 int32_t hot_y)
284{ 257{
285 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 258 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
259 struct radeon_device *rdev = crtc->dev->dev_private;
286 struct drm_gem_object *obj; 260 struct drm_gem_object *obj;
261 struct radeon_bo *robj;
287 int ret; 262 int ret;
288 263
289 if (!handle) { 264 if (!handle) {
@@ -305,6 +280,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
305 return -ENOENT; 280 return -ENOENT;
306 } 281 }
307 282
283 robj = gem_to_radeon_bo(obj);
284 ret = radeon_bo_reserve(robj, false);
285 if (ret != 0) {
286 drm_gem_object_unreference_unlocked(obj);
287 return ret;
288 }
289 /* Only 27 bit offset for legacy cursor */
290 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
291 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
292 &radeon_crtc->cursor_addr);
293 radeon_bo_unreserve(robj);
294 if (ret) {
295 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
296 drm_gem_object_unreference_unlocked(obj);
297 return ret;
298 }
299
308 radeon_crtc->cursor_width = width; 300 radeon_crtc->cursor_width = width;
309 radeon_crtc->cursor_height = height; 301 radeon_crtc->cursor_height = height;
310 302
@@ -323,13 +315,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
323 radeon_crtc->cursor_hot_y = hot_y; 315 radeon_crtc->cursor_hot_y = hot_y;
324 } 316 }
325 317
326 ret = radeon_set_cursor(crtc, obj); 318 radeon_show_cursor(crtc);
327
328 if (ret)
329 DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
330 ret);
331 else
332 radeon_show_cursor(crtc);
333 319
334 radeon_lock_cursor(crtc, false); 320 radeon_lock_cursor(crtc, false);
335 321
@@ -341,8 +327,7 @@ unpin:
341 radeon_bo_unpin(robj); 327 radeon_bo_unpin(robj);
342 radeon_bo_unreserve(robj); 328 radeon_bo_unreserve(robj);
343 } 329 }
344 if (radeon_crtc->cursor_bo != obj) 330 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
345 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
346 } 331 }
347 332
348 radeon_crtc->cursor_bo = obj; 333 radeon_crtc->cursor_bo = obj;
@@ -360,7 +345,6 @@ unpin:
360void radeon_cursor_reset(struct drm_crtc *crtc) 345void radeon_cursor_reset(struct drm_crtc *crtc)
361{ 346{
362 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 347 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
363 int ret;
364 348
365 if (radeon_crtc->cursor_bo) { 349 if (radeon_crtc->cursor_bo) {
366 radeon_lock_cursor(crtc, true); 350 radeon_lock_cursor(crtc, true);
@@ -368,12 +352,7 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
368 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x, 352 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
369 radeon_crtc->cursor_y); 353 radeon_crtc->cursor_y);
370 354
371 ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo); 355 radeon_show_cursor(crtc);
372 if (ret)
373 DRM_ERROR("radeon_set_cursor returned %d, not showing "
374 "cursor\n", ret);
375 else
376 radeon_show_cursor(crtc);
377 356
378 radeon_lock_cursor(crtc, false); 357 radeon_lock_cursor(crtc, false);
379 } 358 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 2593b1168bd6..d8319dae8358 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1080,6 +1080,22 @@ static bool radeon_check_pot_argument(int arg)
1080} 1080}
1081 1081
1082/** 1082/**
1083 * Determine a sensible default GART size according to ASIC family.
1084 *
1085 * @family ASIC family name
1086 */
1087static int radeon_gart_size_auto(enum radeon_family family)
1088{
1089 /* default to a larger gart size on newer asics */
1090 if (family >= CHIP_TAHITI)
1091 return 2048;
1092 else if (family >= CHIP_RV770)
1093 return 1024;
1094 else
1095 return 512;
1096}
1097
1098/**
1083 * radeon_check_arguments - validate module params 1099 * radeon_check_arguments - validate module params
1084 * 1100 *
1085 * @rdev: radeon_device pointer 1101 * @rdev: radeon_device pointer
@@ -1097,27 +1113,17 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1097 } 1113 }
1098 1114
1099 if (radeon_gart_size == -1) { 1115 if (radeon_gart_size == -1) {
1100 /* default to a larger gart size on newer asics */ 1116 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1101 if (rdev->family >= CHIP_RV770)
1102 radeon_gart_size = 1024;
1103 else
1104 radeon_gart_size = 512;
1105 } 1117 }
1106 /* gtt size must be power of two and greater or equal to 32M */ 1118 /* gtt size must be power of two and greater or equal to 32M */
1107 if (radeon_gart_size < 32) { 1119 if (radeon_gart_size < 32) {
1108 dev_warn(rdev->dev, "gart size (%d) too small\n", 1120 dev_warn(rdev->dev, "gart size (%d) too small\n",
1109 radeon_gart_size); 1121 radeon_gart_size);
1110 if (rdev->family >= CHIP_RV770) 1122 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1111 radeon_gart_size = 1024;
1112 else
1113 radeon_gart_size = 512;
1114 } else if (!radeon_check_pot_argument(radeon_gart_size)) { 1123 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
1115 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", 1124 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1116 radeon_gart_size); 1125 radeon_gart_size);
1117 if (rdev->family >= CHIP_RV770) 1126 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1118 radeon_gart_size = 1024;
1119 else
1120 radeon_gart_size = 512;
1121 } 1127 }
1122 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20; 1128 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1123 1129
@@ -1572,11 +1578,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
1572 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 1578 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1573 } 1579 }
1574 1580
1575 /* unpin the front buffers */ 1581 /* unpin the front buffers and cursors */
1576 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1582 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1583 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1577 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb); 1584 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
1578 struct radeon_bo *robj; 1585 struct radeon_bo *robj;
1579 1586
1587 if (radeon_crtc->cursor_bo) {
1588 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1589 r = radeon_bo_reserve(robj, false);
1590 if (r == 0) {
1591 radeon_bo_unpin(robj);
1592 radeon_bo_unreserve(robj);
1593 }
1594 }
1595
1580 if (rfb == NULL || rfb->obj == NULL) { 1596 if (rfb == NULL || rfb->obj == NULL) {
1581 continue; 1597 continue;
1582 } 1598 }
@@ -1639,6 +1655,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1639{ 1655{
1640 struct drm_connector *connector; 1656 struct drm_connector *connector;
1641 struct radeon_device *rdev = dev->dev_private; 1657 struct radeon_device *rdev = dev->dev_private;
1658 struct drm_crtc *crtc;
1642 int r; 1659 int r;
1643 1660
1644 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1661 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1678,6 +1695,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1678 1695
1679 radeon_restore_bios_scratch_regs(rdev); 1696 radeon_restore_bios_scratch_regs(rdev);
1680 1697
1698 /* pin cursors */
1699 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1700 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1701
1702 if (radeon_crtc->cursor_bo) {
1703 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1704 r = radeon_bo_reserve(robj, false);
1705 if (r == 0) {
1706 /* Only 27 bit offset for legacy cursor */
1707 r = radeon_bo_pin_restricted(robj,
1708 RADEON_GEM_DOMAIN_VRAM,
1709 ASIC_IS_AVIVO(rdev) ?
1710 0 : 1 << 27,
1711 &radeon_crtc->cursor_addr);
1712 if (r != 0)
1713 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1714 radeon_bo_unreserve(robj);
1715 }
1716 }
1717 }
1718
1681 /* init dig PHYs, disp eng pll */ 1719 /* init dig PHYs, disp eng pll */
1682 if (rdev->is_atom_bios) { 1720 if (rdev->is_atom_bios) {
1683 radeon_atom_encoder_init(rdev); 1721 radeon_atom_encoder_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 634793ea8418..aeb676708e60 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -257,6 +257,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
257 } 257 }
258 258
259 info->par = rfbdev; 259 info->par = rfbdev;
260 info->skip_vt_switch = true;
260 261
261 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); 262 ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
262 if (ret) { 263 if (ret) {
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index ac3c1310b953..013ec7106e55 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -428,7 +428,6 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
428int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 428int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *filp) 429 struct drm_file *filp)
430{ 430{
431 struct radeon_device *rdev = dev->dev_private;
432 struct drm_radeon_gem_busy *args = data; 431 struct drm_radeon_gem_busy *args = data;
433 struct drm_gem_object *gobj; 432 struct drm_gem_object *gobj;
434 struct radeon_bo *robj; 433 struct radeon_bo *robj;
@@ -440,10 +439,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
440 return -ENOENT; 439 return -ENOENT;
441 } 440 }
442 robj = gem_to_radeon_bo(gobj); 441 robj = gem_to_radeon_bo(gobj);
443 r = radeon_bo_wait(robj, &cur_placement, true); 442
443 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
444 if (r == 0)
445 r = -EBUSY;
446 else
447 r = 0;
448
449 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
444 args->domain = radeon_mem_type_to_domain(cur_placement); 450 args->domain = radeon_mem_type_to_domain(cur_placement);
445 drm_gem_object_unreference_unlocked(gobj); 451 drm_gem_object_unreference_unlocked(gobj);
446 r = radeon_gem_handle_lockup(rdev, r);
447 return r; 452 return r;
448} 453}
449 454
@@ -471,6 +476,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
471 r = ret; 476 r = ret;
472 477
473 /* Flush HDP cache via MMIO if necessary */ 478 /* Flush HDP cache via MMIO if necessary */
479 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
474 if (rdev->asic->mmio_hdp_flush && 480 if (rdev->asic->mmio_hdp_flush &&
475 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 481 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
476 robj->rdev->asic->mmio_hdp_flush(rdev); 482 robj->rdev->asic->mmio_hdp_flush(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6de5459316b5..07909d817381 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -343,7 +343,6 @@ struct radeon_crtc {
343 int max_cursor_width; 343 int max_cursor_width;
344 int max_cursor_height; 344 int max_cursor_height;
345 uint32_t legacy_display_base_addr; 345 uint32_t legacy_display_base_addr;
346 uint32_t legacy_cursor_offset;
347 enum radeon_rmx_type rmx_type; 346 enum radeon_rmx_type rmx_type;
348 u8 h_border; 347 u8 h_border;
349 u8 v_border; 348 u8 v_border;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index ec10533a49b8..48d97c040f49 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -493,38 +493,35 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
493 } 493 }
494 494
495 if (bo_va->it.start || bo_va->it.last) { 495 if (bo_va->it.start || bo_va->it.last) {
496 spin_lock(&vm->status_lock); 496 /* add a clone of the bo_va to clear the old address */
497 if (list_empty(&bo_va->vm_status)) { 497 struct radeon_bo_va *tmp;
498 /* add a clone of the bo_va to clear the old address */ 498 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
499 struct radeon_bo_va *tmp; 499 if (!tmp) {
500 spin_unlock(&vm->status_lock); 500 mutex_unlock(&vm->mutex);
501 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 501 r = -ENOMEM;
502 if (!tmp) { 502 goto error_unreserve;
503 mutex_unlock(&vm->mutex);
504 r = -ENOMEM;
505 goto error_unreserve;
506 }
507 tmp->it.start = bo_va->it.start;
508 tmp->it.last = bo_va->it.last;
509 tmp->vm = vm;
510 tmp->bo = radeon_bo_ref(bo_va->bo);
511 spin_lock(&vm->status_lock);
512 list_add(&tmp->vm_status, &vm->freed);
513 } 503 }
514 spin_unlock(&vm->status_lock); 504 tmp->it.start = bo_va->it.start;
505 tmp->it.last = bo_va->it.last;
506 tmp->vm = vm;
507 tmp->bo = radeon_bo_ref(bo_va->bo);
515 508
516 interval_tree_remove(&bo_va->it, &vm->va); 509 interval_tree_remove(&bo_va->it, &vm->va);
510 spin_lock(&vm->status_lock);
517 bo_va->it.start = 0; 511 bo_va->it.start = 0;
518 bo_va->it.last = 0; 512 bo_va->it.last = 0;
513 list_del_init(&bo_va->vm_status);
514 list_add(&tmp->vm_status, &vm->freed);
515 spin_unlock(&vm->status_lock);
519 } 516 }
520 517
521 if (soffset || eoffset) { 518 if (soffset || eoffset) {
519 spin_lock(&vm->status_lock);
522 bo_va->it.start = soffset; 520 bo_va->it.start = soffset;
523 bo_va->it.last = eoffset - 1; 521 bo_va->it.last = eoffset - 1;
524 interval_tree_insert(&bo_va->it, &vm->va);
525 spin_lock(&vm->status_lock);
526 list_add(&bo_va->vm_status, &vm->cleared); 522 list_add(&bo_va->vm_status, &vm->cleared);
527 spin_unlock(&vm->status_lock); 523 spin_unlock(&vm->status_lock);
524 interval_tree_insert(&bo_va->it, &vm->va);
528 } 525 }
529 526
530 bo_va->flags = flags; 527 bo_va->flags = flags;
@@ -1158,7 +1155,8 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1158 1155
1159 list_for_each_entry(bo_va, &bo->va, bo_list) { 1156 list_for_each_entry(bo_va, &bo->va, bo_list) {
1160 spin_lock(&bo_va->vm->status_lock); 1157 spin_lock(&bo_va->vm->status_lock);
1161 if (list_empty(&bo_va->vm_status)) 1158 if (list_empty(&bo_va->vm_status) &&
1159 (bo_va->it.start || bo_va->it.last))
1162 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1160 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1163 spin_unlock(&bo_va->vm->status_lock); 1161 spin_unlock(&bo_va->vm->status_lock);
1164 } 1162 }
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 26388b5dd6ed..07037e32dea3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6466,23 +6466,27 @@ restart_ih:
6466 case 1: /* D1 vblank/vline */ 6466 case 1: /* D1 vblank/vline */
6467 switch (src_data) { 6467 switch (src_data) {
6468 case 0: /* D1 vblank */ 6468 case 0: /* D1 vblank */
6469 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) { 6469 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
6470 if (rdev->irq.crtc_vblank_int[0]) { 6470 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6471 drm_handle_vblank(rdev->ddev, 0); 6471
6472 rdev->pm.vblank_sync = true; 6472 if (rdev->irq.crtc_vblank_int[0]) {
6473 wake_up(&rdev->irq.vblank_queue); 6473 drm_handle_vblank(rdev->ddev, 0);
6474 } 6474 rdev->pm.vblank_sync = true;
6475 if (atomic_read(&rdev->irq.pflip[0])) 6475 wake_up(&rdev->irq.vblank_queue);
6476 radeon_crtc_handle_vblank(rdev, 0);
6477 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6478 DRM_DEBUG("IH: D1 vblank\n");
6479 } 6476 }
6477 if (atomic_read(&rdev->irq.pflip[0]))
6478 radeon_crtc_handle_vblank(rdev, 0);
6479 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
6480 DRM_DEBUG("IH: D1 vblank\n");
6481
6480 break; 6482 break;
6481 case 1: /* D1 vline */ 6483 case 1: /* D1 vline */
6482 if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) { 6484 if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
6483 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT; 6485 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6484 DRM_DEBUG("IH: D1 vline\n"); 6486
6485 } 6487 rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
6488 DRM_DEBUG("IH: D1 vline\n");
6489
6486 break; 6490 break;
6487 default: 6491 default:
6488 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6492 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6492,23 +6496,27 @@ restart_ih:
6492 case 2: /* D2 vblank/vline */ 6496 case 2: /* D2 vblank/vline */
6493 switch (src_data) { 6497 switch (src_data) {
6494 case 0: /* D2 vblank */ 6498 case 0: /* D2 vblank */
6495 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) { 6499 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
6496 if (rdev->irq.crtc_vblank_int[1]) { 6500 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6497 drm_handle_vblank(rdev->ddev, 1); 6501
6498 rdev->pm.vblank_sync = true; 6502 if (rdev->irq.crtc_vblank_int[1]) {
6499 wake_up(&rdev->irq.vblank_queue); 6503 drm_handle_vblank(rdev->ddev, 1);
6500 } 6504 rdev->pm.vblank_sync = true;
6501 if (atomic_read(&rdev->irq.pflip[1])) 6505 wake_up(&rdev->irq.vblank_queue);
6502 radeon_crtc_handle_vblank(rdev, 1);
6503 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6504 DRM_DEBUG("IH: D2 vblank\n");
6505 } 6506 }
6507 if (atomic_read(&rdev->irq.pflip[1]))
6508 radeon_crtc_handle_vblank(rdev, 1);
6509 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
6510 DRM_DEBUG("IH: D2 vblank\n");
6511
6506 break; 6512 break;
6507 case 1: /* D2 vline */ 6513 case 1: /* D2 vline */
6508 if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) { 6514 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
6509 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT; 6515 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6510 DRM_DEBUG("IH: D2 vline\n"); 6516
6511 } 6517 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
6518 DRM_DEBUG("IH: D2 vline\n");
6519
6512 break; 6520 break;
6513 default: 6521 default:
6514 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6522 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6518,23 +6526,27 @@ restart_ih:
6518 case 3: /* D3 vblank/vline */ 6526 case 3: /* D3 vblank/vline */
6519 switch (src_data) { 6527 switch (src_data) {
6520 case 0: /* D3 vblank */ 6528 case 0: /* D3 vblank */
6521 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { 6529 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
6522 if (rdev->irq.crtc_vblank_int[2]) { 6530 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6523 drm_handle_vblank(rdev->ddev, 2); 6531
6524 rdev->pm.vblank_sync = true; 6532 if (rdev->irq.crtc_vblank_int[2]) {
6525 wake_up(&rdev->irq.vblank_queue); 6533 drm_handle_vblank(rdev->ddev, 2);
6526 } 6534 rdev->pm.vblank_sync = true;
6527 if (atomic_read(&rdev->irq.pflip[2])) 6535 wake_up(&rdev->irq.vblank_queue);
6528 radeon_crtc_handle_vblank(rdev, 2);
6529 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6530 DRM_DEBUG("IH: D3 vblank\n");
6531 } 6536 }
6537 if (atomic_read(&rdev->irq.pflip[2]))
6538 radeon_crtc_handle_vblank(rdev, 2);
6539 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
6540 DRM_DEBUG("IH: D3 vblank\n");
6541
6532 break; 6542 break;
6533 case 1: /* D3 vline */ 6543 case 1: /* D3 vline */
6534 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) { 6544 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
6535 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT; 6545 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6536 DRM_DEBUG("IH: D3 vline\n"); 6546
6537 } 6547 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
6548 DRM_DEBUG("IH: D3 vline\n");
6549
6538 break; 6550 break;
6539 default: 6551 default:
6540 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6552 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6544,23 +6556,27 @@ restart_ih:
6544 case 4: /* D4 vblank/vline */ 6556 case 4: /* D4 vblank/vline */
6545 switch (src_data) { 6557 switch (src_data) {
6546 case 0: /* D4 vblank */ 6558 case 0: /* D4 vblank */
6547 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { 6559 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
6548 if (rdev->irq.crtc_vblank_int[3]) { 6560 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6549 drm_handle_vblank(rdev->ddev, 3); 6561
6550 rdev->pm.vblank_sync = true; 6562 if (rdev->irq.crtc_vblank_int[3]) {
6551 wake_up(&rdev->irq.vblank_queue); 6563 drm_handle_vblank(rdev->ddev, 3);
6552 } 6564 rdev->pm.vblank_sync = true;
6553 if (atomic_read(&rdev->irq.pflip[3])) 6565 wake_up(&rdev->irq.vblank_queue);
6554 radeon_crtc_handle_vblank(rdev, 3);
6555 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6556 DRM_DEBUG("IH: D4 vblank\n");
6557 } 6566 }
6567 if (atomic_read(&rdev->irq.pflip[3]))
6568 radeon_crtc_handle_vblank(rdev, 3);
6569 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
6570 DRM_DEBUG("IH: D4 vblank\n");
6571
6558 break; 6572 break;
6559 case 1: /* D4 vline */ 6573 case 1: /* D4 vline */
6560 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) { 6574 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
6561 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT; 6575 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6562 DRM_DEBUG("IH: D4 vline\n"); 6576
6563 } 6577 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
6578 DRM_DEBUG("IH: D4 vline\n");
6579
6564 break; 6580 break;
6565 default: 6581 default:
6566 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6582 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6570,23 +6586,27 @@ restart_ih:
6570 case 5: /* D5 vblank/vline */ 6586 case 5: /* D5 vblank/vline */
6571 switch (src_data) { 6587 switch (src_data) {
6572 case 0: /* D5 vblank */ 6588 case 0: /* D5 vblank */
6573 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { 6589 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
6574 if (rdev->irq.crtc_vblank_int[4]) { 6590 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6575 drm_handle_vblank(rdev->ddev, 4); 6591
6576 rdev->pm.vblank_sync = true; 6592 if (rdev->irq.crtc_vblank_int[4]) {
6577 wake_up(&rdev->irq.vblank_queue); 6593 drm_handle_vblank(rdev->ddev, 4);
6578 } 6594 rdev->pm.vblank_sync = true;
6579 if (atomic_read(&rdev->irq.pflip[4])) 6595 wake_up(&rdev->irq.vblank_queue);
6580 radeon_crtc_handle_vblank(rdev, 4);
6581 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6582 DRM_DEBUG("IH: D5 vblank\n");
6583 } 6596 }
6597 if (atomic_read(&rdev->irq.pflip[4]))
6598 radeon_crtc_handle_vblank(rdev, 4);
6599 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
6600 DRM_DEBUG("IH: D5 vblank\n");
6601
6584 break; 6602 break;
6585 case 1: /* D5 vline */ 6603 case 1: /* D5 vline */
6586 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) { 6604 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
6587 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT; 6605 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6588 DRM_DEBUG("IH: D5 vline\n"); 6606
6589 } 6607 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
6608 DRM_DEBUG("IH: D5 vline\n");
6609
6590 break; 6610 break;
6591 default: 6611 default:
6592 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6612 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6596,23 +6616,27 @@ restart_ih:
6596 case 6: /* D6 vblank/vline */ 6616 case 6: /* D6 vblank/vline */
6597 switch (src_data) { 6617 switch (src_data) {
6598 case 0: /* D6 vblank */ 6618 case 0: /* D6 vblank */
6599 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { 6619 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
6600 if (rdev->irq.crtc_vblank_int[5]) { 6620 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6601 drm_handle_vblank(rdev->ddev, 5); 6621
6602 rdev->pm.vblank_sync = true; 6622 if (rdev->irq.crtc_vblank_int[5]) {
6603 wake_up(&rdev->irq.vblank_queue); 6623 drm_handle_vblank(rdev->ddev, 5);
6604 } 6624 rdev->pm.vblank_sync = true;
6605 if (atomic_read(&rdev->irq.pflip[5])) 6625 wake_up(&rdev->irq.vblank_queue);
6606 radeon_crtc_handle_vblank(rdev, 5);
6607 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6608 DRM_DEBUG("IH: D6 vblank\n");
6609 } 6626 }
6627 if (atomic_read(&rdev->irq.pflip[5]))
6628 radeon_crtc_handle_vblank(rdev, 5);
6629 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
6630 DRM_DEBUG("IH: D6 vblank\n");
6631
6610 break; 6632 break;
6611 case 1: /* D6 vline */ 6633 case 1: /* D6 vline */
6612 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) { 6634 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
6613 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT; 6635 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6614 DRM_DEBUG("IH: D6 vline\n"); 6636
6615 } 6637 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
6638 DRM_DEBUG("IH: D6 vline\n");
6639
6616 break; 6640 break;
6617 default: 6641 default:
6618 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6642 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
@@ -6632,88 +6656,112 @@ restart_ih:
6632 case 42: /* HPD hotplug */ 6656 case 42: /* HPD hotplug */
6633 switch (src_data) { 6657 switch (src_data) {
6634 case 0: 6658 case 0:
6635 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) { 6659 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
6636 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT; 6660 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6637 queue_hotplug = true; 6661
6638 DRM_DEBUG("IH: HPD1\n"); 6662 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
6639 } 6663 queue_hotplug = true;
6664 DRM_DEBUG("IH: HPD1\n");
6665
6640 break; 6666 break;
6641 case 1: 6667 case 1:
6642 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) { 6668 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
6643 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT; 6669 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6644 queue_hotplug = true; 6670
6645 DRM_DEBUG("IH: HPD2\n"); 6671 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
6646 } 6672 queue_hotplug = true;
6673 DRM_DEBUG("IH: HPD2\n");
6674
6647 break; 6675 break;
6648 case 2: 6676 case 2:
6649 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) { 6677 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
6650 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT; 6678 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6651 queue_hotplug = true; 6679
6652 DRM_DEBUG("IH: HPD3\n"); 6680 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
6653 } 6681 queue_hotplug = true;
6682 DRM_DEBUG("IH: HPD3\n");
6683
6654 break; 6684 break;
6655 case 3: 6685 case 3:
6656 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) { 6686 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
6657 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT; 6687 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6658 queue_hotplug = true; 6688
6659 DRM_DEBUG("IH: HPD4\n"); 6689 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
6660 } 6690 queue_hotplug = true;
6691 DRM_DEBUG("IH: HPD4\n");
6692
6661 break; 6693 break;
6662 case 4: 6694 case 4:
6663 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) { 6695 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
6664 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT; 6696 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6665 queue_hotplug = true; 6697
6666 DRM_DEBUG("IH: HPD5\n"); 6698 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
6667 } 6699 queue_hotplug = true;
6700 DRM_DEBUG("IH: HPD5\n");
6701
6668 break; 6702 break;
6669 case 5: 6703 case 5:
6670 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) { 6704 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
6671 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT; 6705 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6672 queue_hotplug = true; 6706
6673 DRM_DEBUG("IH: HPD6\n"); 6707 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
6674 } 6708 queue_hotplug = true;
6709 DRM_DEBUG("IH: HPD6\n");
6710
6675 break; 6711 break;
6676 case 6: 6712 case 6:
6677 if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { 6713 if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
6678 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; 6714 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6679 queue_dp = true; 6715
6680 DRM_DEBUG("IH: HPD_RX 1\n"); 6716 rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
6681 } 6717 queue_dp = true;
6718 DRM_DEBUG("IH: HPD_RX 1\n");
6719
6682 break; 6720 break;
6683 case 7: 6721 case 7:
6684 if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { 6722 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
6685 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; 6723 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6686 queue_dp = true; 6724
6687 DRM_DEBUG("IH: HPD_RX 2\n"); 6725 rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
6688 } 6726 queue_dp = true;
6727 DRM_DEBUG("IH: HPD_RX 2\n");
6728
6689 break; 6729 break;
6690 case 8: 6730 case 8:
6691 if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { 6731 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
6692 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; 6732 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6693 queue_dp = true; 6733
6694 DRM_DEBUG("IH: HPD_RX 3\n"); 6734 rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
6695 } 6735 queue_dp = true;
6736 DRM_DEBUG("IH: HPD_RX 3\n");
6737
6696 break; 6738 break;
6697 case 9: 6739 case 9:
6698 if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { 6740 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
6699 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; 6741 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6700 queue_dp = true; 6742
6701 DRM_DEBUG("IH: HPD_RX 4\n"); 6743 rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
6702 } 6744 queue_dp = true;
6745 DRM_DEBUG("IH: HPD_RX 4\n");
6746
6703 break; 6747 break;
6704 case 10: 6748 case 10:
6705 if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { 6749 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
6706 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; 6750 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6707 queue_dp = true; 6751
6708 DRM_DEBUG("IH: HPD_RX 5\n"); 6752 rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
6709 } 6753 queue_dp = true;
6754 DRM_DEBUG("IH: HPD_RX 5\n");
6755
6710 break; 6756 break;
6711 case 11: 6757 case 11:
6712 if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { 6758 if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
6713 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; 6759 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
6714 queue_dp = true; 6760
6715 DRM_DEBUG("IH: HPD_RX 6\n"); 6761 rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
6716 } 6762 queue_dp = true;
6763 DRM_DEBUG("IH: HPD_RX 6\n");
6764
6717 break; 6765 break;
6718 default: 6766 default:
6719 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 6767 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);