aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
17 files changed, 104 insertions, 63 deletions
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 7496f55611a5..ef5feeecec84 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -226,7 +226,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
226 226
227 obj->dev_addr = DMA_ERROR_CODE; 227 obj->dev_addr = DMA_ERROR_CODE;
228 228
229 mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping; 229 mapping = file_inode(obj->obj.filp)->i_mapping;
230 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); 230 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
231 231
232 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); 232 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 68d38eb6774d..835b6af00970 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1817,7 +1817,7 @@ static int fimc_resume(struct device *dev)
1817} 1817}
1818#endif 1818#endif
1819 1819
1820#ifdef CONFIG_PM_RUNTIME 1820#ifdef CONFIG_PM
1821static int fimc_runtime_suspend(struct device *dev) 1821static int fimc_runtime_suspend(struct device *dev)
1822{ 1822{
1823 struct fimc_context *ctx = get_fimc_context(dev); 1823 struct fimc_context *ctx = get_fimc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 6ff8599f6cbf..81a250830808 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1540,7 +1540,7 @@ static int g2d_resume(struct device *dev)
1540} 1540}
1541#endif 1541#endif
1542 1542
1543#ifdef CONFIG_PM_RUNTIME 1543#ifdef CONFIG_PM
1544static int g2d_runtime_suspend(struct device *dev) 1544static int g2d_runtime_suspend(struct device *dev)
1545{ 1545{
1546 struct g2d_data *g2d = dev_get_drvdata(dev); 1546 struct g2d_data *g2d = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index c6a013fc321c..0261468c8019 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1764,7 +1764,7 @@ static int gsc_resume(struct device *dev)
1764} 1764}
1765#endif 1765#endif
1766 1766
1767#ifdef CONFIG_PM_RUNTIME 1767#ifdef CONFIG_PM
1768static int gsc_runtime_suspend(struct device *dev) 1768static int gsc_runtime_suspend(struct device *dev)
1769{ 1769{
1770 struct gsc_context *ctx = get_gsc_context(dev); 1770 struct gsc_context *ctx = get_gsc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index b6a37d4f5b13..425e70625388 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -822,7 +822,7 @@ static int rotator_resume(struct device *dev)
822} 822}
823#endif 823#endif
824 824
825#ifdef CONFIG_PM_RUNTIME 825#ifdef CONFIG_PM
826static int rotator_runtime_suspend(struct device *dev) 826static int rotator_runtime_suspend(struct device *dev)
827{ 827{
828 struct rot_context *rot = dev_get_drvdata(dev); 828 struct rot_context *rot = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8bcdb981d540..9cb5c95d5898 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4325,7 +4325,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4325 ironlake_fdi_disable(crtc); 4325 ironlake_fdi_disable(crtc);
4326 4326
4327 ironlake_disable_pch_transcoder(dev_priv, pipe); 4327 ironlake_disable_pch_transcoder(dev_priv, pipe);
4328 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4329 4328
4330 if (HAS_PCH_CPT(dev)) { 4329 if (HAS_PCH_CPT(dev)) {
4331 /* disable TRANS_DP_CTL */ 4330 /* disable TRANS_DP_CTL */
@@ -4389,7 +4388,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4389 4388
4390 if (intel_crtc->config.has_pch_encoder) { 4389 if (intel_crtc->config.has_pch_encoder) {
4391 lpt_disable_pch_transcoder(dev_priv); 4390 lpt_disable_pch_transcoder(dev_priv);
4392 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4393 intel_ddi_fdi_disable(crtc); 4391 intel_ddi_fdi_disable(crtc);
4394 } 4392 }
4395 4393
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index a6bd1422e38f..c0bbf2172446 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -899,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev)
899 int pipe; 899 int pipe;
900 u8 pin; 900 u8 pin;
901 901
902 /*
903 * Unlock registers and just leave them unlocked. Do this before
904 * checking quirk lists to avoid bogus WARNINGs.
905 */
906 if (HAS_PCH_SPLIT(dev)) {
907 I915_WRITE(PCH_PP_CONTROL,
908 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
909 } else {
910 I915_WRITE(PP_CONTROL,
911 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
912 }
902 if (!intel_lvds_supported(dev)) 913 if (!intel_lvds_supported(dev))
903 return; 914 return;
904 915
@@ -1097,17 +1108,6 @@ out:
1097 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & 1108 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
1098 LVDS_A3_POWER_MASK; 1109 LVDS_A3_POWER_MASK;
1099 1110
1100 /*
1101 * Unlock registers and just
1102 * leave them unlocked
1103 */
1104 if (HAS_PCH_SPLIT(dev)) {
1105 I915_WRITE(PCH_PP_CONTROL,
1106 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
1107 } else {
1108 I915_WRITE(PP_CONTROL,
1109 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1110 }
1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
1113 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1113 DRM_DEBUG_KMS("lid notifier registration failed\n");
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index cd05677ad4b7..72a40f95d048 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass; 221 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
223 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 222 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
224 break; 223 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 5ae6a43893b5..1931057f9962 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
551 } 551 }
552 552
553 if (status & 0x40000000) { 553 if (status & 0x40000000) {
554 nouveau_fifo_uevent(&priv->base);
555 nv_wr32(priv, 0x002100, 0x40000000); 554 nv_wr32(priv, 0x002100, 0x40000000);
555 nouveau_fifo_uevent(&priv->base);
556 status &= ~0x40000000; 556 status &= ~0x40000000;
557 } 557 }
558 } 558 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 1fe1f8fbda0c..074d434c3077 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
740 u32 inte = nv_rd32(priv, 0x002628); 740 u32 inte = nv_rd32(priv, 0x002628);
741 u32 unkn; 741 u32 unkn;
742 742
743 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
744
743 for (unkn = 0; unkn < 8; unkn++) { 745 for (unkn = 0; unkn < 8; unkn++) {
744 u32 ints = (intr >> (unkn * 0x04)) & inte; 746 u32 ints = (intr >> (unkn * 0x04)) & inte;
745 if (ints & 0x1) { 747 if (ints & 0x1) {
@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
751 nv_mask(priv, 0x002628, ints, 0); 753 nv_mask(priv, 0x002628, ints, 0);
752 } 754 }
753 } 755 }
754
755 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
756} 756}
757 757
758static void 758static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index d2f0fd39c145..f8734eb74eaa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -952,8 +952,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
952 } 952 }
953 953
954 if (stat & 0x80000000) { 954 if (stat & 0x80000000) {
955 nve0_fifo_intr_engine(priv);
956 nv_wr32(priv, 0x002100, 0x80000000); 955 nv_wr32(priv, 0x002100, 0x80000000);
956 nve0_fifo_intr_engine(priv);
957 stat &= ~0x80000000; 957 stat &= ~0x80000000;
958 } 958 }
959 959
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 57238076049f..62b97c4eef8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -629,7 +629,6 @@ int nouveau_pmops_suspend(struct device *dev)
629 629
630 pci_save_state(pdev); 630 pci_save_state(pdev);
631 pci_disable_device(pdev); 631 pci_disable_device(pdev);
632 pci_ignore_hotplug(pdev);
633 pci_set_power_state(pdev, PCI_D3hot); 632 pci_set_power_state(pdev, PCI_D3hot);
634 return 0; 633 return 0;
635} 634}
@@ -933,6 +932,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
933 ret = nouveau_do_suspend(drm_dev, true); 932 ret = nouveau_do_suspend(drm_dev, true);
934 pci_save_state(pdev); 933 pci_save_state(pdev);
935 pci_disable_device(pdev); 934 pci_disable_device(pdev);
935 pci_ignore_hotplug(pdev);
936 pci_set_power_state(pdev, PCI_D3cold); 936 pci_set_power_state(pdev, PCI_D3cold);
937 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; 937 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
938 return ret; 938 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 515cd9aebb99..f32a434724e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock); 52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53} 53}
54 54
55static void 55static int
56nouveau_fence_signal(struct nouveau_fence *fence) 56nouveau_fence_signal(struct nouveau_fence *fence)
57{ 57{
58 int drop = 0;
59
58 fence_signal_locked(&fence->base); 60 fence_signal_locked(&fence->base);
59 list_del(&fence->head); 61 list_del(&fence->head);
62 rcu_assign_pointer(fence->channel, NULL);
60 63
61 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { 64 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
62 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 65 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
63 66
64 if (!--fctx->notify_ref) 67 if (!--fctx->notify_ref)
65 nvif_notify_put(&fctx->notify); 68 drop = 1;
66 } 69 }
67 70
68 fence_put(&fence->base); 71 fence_put(&fence->base);
72 return drop;
69} 73}
70 74
71static struct nouveau_fence * 75static struct nouveau_fence *
@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
88{ 92{
89 struct nouveau_fence *fence; 93 struct nouveau_fence *fence;
90 94
91 nvif_notify_fini(&fctx->notify);
92
93 spin_lock_irq(&fctx->lock); 95 spin_lock_irq(&fctx->lock);
94 while (!list_empty(&fctx->pending)) { 96 while (!list_empty(&fctx->pending)) {
95 fence = list_entry(fctx->pending.next, typeof(*fence), head); 97 fence = list_entry(fctx->pending.next, typeof(*fence), head);
96 98
97 nouveau_fence_signal(fence); 99 if (nouveau_fence_signal(fence))
98 fence->channel = NULL; 100 nvif_notify_put(&fctx->notify);
99 } 101 }
100 spin_unlock_irq(&fctx->lock); 102 spin_unlock_irq(&fctx->lock);
103
104 nvif_notify_fini(&fctx->notify);
105 fctx->dead = 1;
106
107 /*
108 * Ensure that all accesses to fence->channel complete before freeing
109 * the channel.
110 */
111 synchronize_rcu();
101} 112}
102 113
103static void 114static void
@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
112 kref_put(&fctx->fence_ref, nouveau_fence_context_put); 123 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
113} 124}
114 125
115static void 126static int
116nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) 127nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
117{ 128{
118 struct nouveau_fence *fence; 129 struct nouveau_fence *fence;
119 130 int drop = 0;
120 u32 seq = fctx->read(chan); 131 u32 seq = fctx->read(chan);
121 132
122 while (!list_empty(&fctx->pending)) { 133 while (!list_empty(&fctx->pending)) {
123 fence = list_entry(fctx->pending.next, typeof(*fence), head); 134 fence = list_entry(fctx->pending.next, typeof(*fence), head);
124 135
125 if ((int)(seq - fence->base.seqno) < 0) 136 if ((int)(seq - fence->base.seqno) < 0)
126 return; 137 break;
127 138
128 nouveau_fence_signal(fence); 139 drop |= nouveau_fence_signal(fence);
129 } 140 }
141
142 return drop;
130} 143}
131 144
132static int 145static int
@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
135 struct nouveau_fence_chan *fctx = 148 struct nouveau_fence_chan *fctx =
136 container_of(notify, typeof(*fctx), notify); 149 container_of(notify, typeof(*fctx), notify);
137 unsigned long flags; 150 unsigned long flags;
151 int ret = NVIF_NOTIFY_KEEP;
138 152
139 spin_lock_irqsave(&fctx->lock, flags); 153 spin_lock_irqsave(&fctx->lock, flags);
140 if (!list_empty(&fctx->pending)) { 154 if (!list_empty(&fctx->pending)) {
141 struct nouveau_fence *fence; 155 struct nouveau_fence *fence;
156 struct nouveau_channel *chan;
142 157
143 fence = list_entry(fctx->pending.next, typeof(*fence), head); 158 fence = list_entry(fctx->pending.next, typeof(*fence), head);
144 nouveau_fence_update(fence->channel, fctx); 159 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
160 if (nouveau_fence_update(fence->channel, fctx))
161 ret = NVIF_NOTIFY_DROP;
145 } 162 }
146 spin_unlock_irqrestore(&fctx->lock, flags); 163 spin_unlock_irqrestore(&fctx->lock, flags);
147 164
148 /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */ 165 return ret;
149 return NVIF_NOTIFY_KEEP;
150} 166}
151 167
152void 168void
@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
262 if (!ret) { 278 if (!ret) {
263 fence_get(&fence->base); 279 fence_get(&fence->base);
264 spin_lock_irq(&fctx->lock); 280 spin_lock_irq(&fctx->lock);
265 nouveau_fence_update(chan, fctx); 281
282 if (nouveau_fence_update(chan, fctx))
283 nvif_notify_put(&fctx->notify);
284
266 list_add_tail(&fence->head, &fctx->pending); 285 list_add_tail(&fence->head, &fctx->pending);
267 spin_unlock_irq(&fctx->lock); 286 spin_unlock_irq(&fctx->lock);
268 } 287 }
@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
276 if (fence->base.ops == &nouveau_fence_ops_legacy || 295 if (fence->base.ops == &nouveau_fence_ops_legacy ||
277 fence->base.ops == &nouveau_fence_ops_uevent) { 296 fence->base.ops == &nouveau_fence_ops_uevent) {
278 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 297 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
298 struct nouveau_channel *chan;
279 unsigned long flags; 299 unsigned long flags;
280 300
281 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 301 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
282 return true; 302 return true;
283 303
284 spin_lock_irqsave(&fctx->lock, flags); 304 spin_lock_irqsave(&fctx->lock, flags);
285 nouveau_fence_update(fence->channel, fctx); 305 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
306 if (chan && nouveau_fence_update(chan, fctx))
307 nvif_notify_put(&fctx->notify);
286 spin_unlock_irqrestore(&fctx->lock, flags); 308 spin_unlock_irqrestore(&fctx->lock, flags);
287 } 309 }
288 return fence_is_signaled(&fence->base); 310 return fence_is_signaled(&fence->base);
@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
387 409
388 if (fence && (!exclusive || !fobj || !fobj->shared_count)) { 410 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
389 struct nouveau_channel *prev = NULL; 411 struct nouveau_channel *prev = NULL;
412 bool must_wait = true;
390 413
391 f = nouveau_local_fence(fence, chan->drm); 414 f = nouveau_local_fence(fence, chan->drm);
392 if (f) 415 if (f) {
393 prev = f->channel; 416 rcu_read_lock();
417 prev = rcu_dereference(f->channel);
418 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
419 must_wait = false;
420 rcu_read_unlock();
421 }
394 422
395 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 423 if (must_wait)
396 ret = fence_wait(fence, intr); 424 ret = fence_wait(fence, intr);
397 425
398 return ret; 426 return ret;
@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
403 431
404 for (i = 0; i < fobj->shared_count && !ret; ++i) { 432 for (i = 0; i < fobj->shared_count && !ret; ++i) {
405 struct nouveau_channel *prev = NULL; 433 struct nouveau_channel *prev = NULL;
434 bool must_wait = true;
406 435
407 fence = rcu_dereference_protected(fobj->shared[i], 436 fence = rcu_dereference_protected(fobj->shared[i],
408 reservation_object_held(resv)); 437 reservation_object_held(resv));
409 438
410 f = nouveau_local_fence(fence, chan->drm); 439 f = nouveau_local_fence(fence, chan->drm);
411 if (f) 440 if (f) {
412 prev = f->channel; 441 rcu_read_lock();
442 prev = rcu_dereference(f->channel);
443 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
444 must_wait = false;
445 rcu_read_unlock();
446 }
413 447
414 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 448 if (must_wait)
415 ret = fence_wait(fence, intr); 449 ret = fence_wait(fence, intr);
416
417 if (ret)
418 break;
419 } 450 }
420 451
421 return ret; 452 return ret;
@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
463 struct nouveau_fence *fence = from_fence(f); 494 struct nouveau_fence *fence = from_fence(f);
464 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 495 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
465 496
466 return fence->channel ? fctx->name : "dead channel"; 497 return !fctx->dead ? fctx->name : "dead channel";
467} 498}
468 499
469/* 500/*
@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
476{ 507{
477 struct nouveau_fence *fence = from_fence(f); 508 struct nouveau_fence *fence = from_fence(f);
478 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 509 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
479 struct nouveau_channel *chan = fence->channel; 510 struct nouveau_channel *chan;
511 bool ret = false;
512
513 rcu_read_lock();
514 chan = rcu_dereference(fence->channel);
515 if (chan)
516 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
517 rcu_read_unlock();
480 518
481 return (int)(fctx->read(chan) - fence->base.seqno) >= 0; 519 return ret;
482} 520}
483 521
484static bool nouveau_fence_no_signaling(struct fence *f) 522static bool nouveau_fence_no_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 943b0b17b1fc..96e461c6f68f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -14,7 +14,7 @@ struct nouveau_fence {
14 14
15 bool sysmem; 15 bool sysmem;
16 16
17 struct nouveau_channel *channel; 17 struct nouveau_channel __rcu *channel;
18 unsigned long timeout; 18 unsigned long timeout;
19}; 19};
20 20
@@ -47,7 +47,7 @@ struct nouveau_fence_chan {
47 char name[32]; 47 char name[32];
48 48
49 struct nvif_notify notify; 49 struct nvif_notify notify;
50 int notify_ref; 50 int notify_ref, dead;
51}; 51};
52 52
53struct nouveau_fence_priv { 53struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a3e7aed7e680..6f377de099f9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -251,22 +251,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
251 251
252static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 252static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
253{ 253{
254 int i, r = 0; 254 struct radeon_cs_reloc *reloc;
255 int r;
255 256
256 for (i = 0; i < p->nrelocs; i++) { 257 list_for_each_entry(reloc, &p->validated, tv.head) {
257 struct reservation_object *resv; 258 struct reservation_object *resv;
258 259
259 if (!p->relocs[i].robj) 260 resv = reloc->robj->tbo.resv;
260 continue;
261
262 resv = p->relocs[i].robj->tbo.resv;
263 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, 261 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
264 p->relocs[i].tv.shared); 262 reloc->tv.shared);
265
266 if (r) 263 if (r)
267 break; 264 return r;
268 } 265 }
269 return r; 266 return 0;
270} 267}
271 268
272/* XXX: note that this is called from the legacy UMS CS ioctl as well */ 269/* XXX: note that this is called from the legacy UMS CS ioctl as well */
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8309b11e674d..03586763ee86 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -795,6 +795,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
795 795
796 /* Get associated drm_crtc: */ 796 /* Get associated drm_crtc: */
797 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 797 drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
798 if (!drmcrtc)
799 return -EINVAL;
798 800
799 /* Helper routine in DRM core does all the work: */ 801 /* Helper routine in DRM core does all the work: */
800 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 802 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 99a960a4f302..4c0d786d5c7a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -213,6 +213,13 @@ int radeon_bo_create(struct radeon_device *rdev,
213 if (!(rdev->flags & RADEON_IS_PCIE)) 213 if (!(rdev->flags & RADEON_IS_PCIE))
214 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 214 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
215 215
216#ifdef CONFIG_X86_32
217 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
218 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
219 */
220 bo->flags &= ~RADEON_GEM_GTT_WC;
221#endif
222
216 radeon_ttm_placement_from_domain(bo, domain); 223 radeon_ttm_placement_from_domain(bo, domain);
217 /* Kernel allocation are uninterruptible */ 224 /* Kernel allocation are uninterruptible */
218 down_read(&rdev->pm.mclk_lock); 225 down_read(&rdev->pm.mclk_lock);