aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-03 16:51:50 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-03 16:51:50 -0500
commit46d967ae74ba4412b39f4df9b4fcbe0a766989f4 (patch)
tree9b22d258f8c5055e894544b557221a9339002c48
parent9044f940ea7479cbda4cf015ec5727fbdb048080 (diff)
parent00d6a9b6be5885ad38234cd171f6fb18a87faa7c (diff)
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "Radeon and Nouveau fixes: So nouveau had a few regression introduced, Ben and Maarten finally tracked down the one that was causing problems on my MacBookPro, also nvidia gave some info on the an engine we were using incorrectly, so disable our use of it, and one regresion with pci hotplug affecting optimus users. Radeon has an oops fixs, sync fix, and one workaround to avoid broken functionality on 32-bit x86, this needs better root causing and a better fix, but the bandaid is a lot safer at this point" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/radeon: kernel panic in drm_calc_vbltimestamp_from_scanoutpos with 3.18.0-rc6 drm/radeon: Ignore RADEON_GEM_GTT_WC on 32-bit x86 drm/radeon: sync all BOs involved in a CS v2 nouveau: move the hotplug ignore to correct place. drm/nouveau/gf116: remove copy1 engine drm/nouveau: prevent stale fence->channel pointers, and protect with rcu drm/nouveau/fifo/g84-: ack non-stall interrupt before handling it
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
10 files changed, 88 insertions, 45 deletions
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index cd05677ad4b7..72a40f95d048 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass; 221 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
223 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 222 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
224 break; 223 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 5ae6a43893b5..1931057f9962 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
551 } 551 }
552 552
553 if (status & 0x40000000) { 553 if (status & 0x40000000) {
554 nouveau_fifo_uevent(&priv->base);
555 nv_wr32(priv, 0x002100, 0x40000000); 554 nv_wr32(priv, 0x002100, 0x40000000);
555 nouveau_fifo_uevent(&priv->base);
556 status &= ~0x40000000; 556 status &= ~0x40000000;
557 } 557 }
558 } 558 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 1fe1f8fbda0c..074d434c3077 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
740 u32 inte = nv_rd32(priv, 0x002628); 740 u32 inte = nv_rd32(priv, 0x002628);
741 u32 unkn; 741 u32 unkn;
742 742
743 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
744
743 for (unkn = 0; unkn < 8; unkn++) { 745 for (unkn = 0; unkn < 8; unkn++) {
744 u32 ints = (intr >> (unkn * 0x04)) & inte; 746 u32 ints = (intr >> (unkn * 0x04)) & inte;
745 if (ints & 0x1) { 747 if (ints & 0x1) {
@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
751 nv_mask(priv, 0x002628, ints, 0); 753 nv_mask(priv, 0x002628, ints, 0);
752 } 754 }
753 } 755 }
754
755 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
756} 756}
757 757
758static void 758static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index d2f0fd39c145..f8734eb74eaa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -952,8 +952,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
952 } 952 }
953 953
954 if (stat & 0x80000000) { 954 if (stat & 0x80000000) {
955 nve0_fifo_intr_engine(priv);
956 nv_wr32(priv, 0x002100, 0x80000000); 955 nv_wr32(priv, 0x002100, 0x80000000);
956 nve0_fifo_intr_engine(priv);
957 stat &= ~0x80000000; 957 stat &= ~0x80000000;
958 } 958 }
959 959
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 57238076049f..62b97c4eef8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -629,7 +629,6 @@ int nouveau_pmops_suspend(struct device *dev)
629 629
630 pci_save_state(pdev); 630 pci_save_state(pdev);
631 pci_disable_device(pdev); 631 pci_disable_device(pdev);
632 pci_ignore_hotplug(pdev);
633 pci_set_power_state(pdev, PCI_D3hot); 632 pci_set_power_state(pdev, PCI_D3hot);
634 return 0; 633 return 0;
635} 634}
@@ -933,6 +932,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
933 ret = nouveau_do_suspend(drm_dev, true); 932 ret = nouveau_do_suspend(drm_dev, true);
934 pci_save_state(pdev); 933 pci_save_state(pdev);
935 pci_disable_device(pdev); 934 pci_disable_device(pdev);
935 pci_ignore_hotplug(pdev);
936 pci_set_power_state(pdev, PCI_D3cold); 936 pci_set_power_state(pdev, PCI_D3cold);
937 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; 937 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
938 return ret; 938 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 515cd9aebb99..f32a434724e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock); 52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53} 53}
54 54
55static void 55static int
56nouveau_fence_signal(struct nouveau_fence *fence) 56nouveau_fence_signal(struct nouveau_fence *fence)
57{ 57{
58 int drop = 0;
59
58 fence_signal_locked(&fence->base); 60 fence_signal_locked(&fence->base);
59 list_del(&fence->head); 61 list_del(&fence->head);
62 rcu_assign_pointer(fence->channel, NULL);
60 63
61 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { 64 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
62 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 65 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
63 66
64 if (!--fctx->notify_ref) 67 if (!--fctx->notify_ref)
65 nvif_notify_put(&fctx->notify); 68 drop = 1;
66 } 69 }
67 70
68 fence_put(&fence->base); 71 fence_put(&fence->base);
72 return drop;
69} 73}
70 74
71static struct nouveau_fence * 75static struct nouveau_fence *
@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
88{ 92{
89 struct nouveau_fence *fence; 93 struct nouveau_fence *fence;
90 94
91 nvif_notify_fini(&fctx->notify);
92
93 spin_lock_irq(&fctx->lock); 95 spin_lock_irq(&fctx->lock);
94 while (!list_empty(&fctx->pending)) { 96 while (!list_empty(&fctx->pending)) {
95 fence = list_entry(fctx->pending.next, typeof(*fence), head); 97 fence = list_entry(fctx->pending.next, typeof(*fence), head);
96 98
97 nouveau_fence_signal(fence); 99 if (nouveau_fence_signal(fence))
98 fence->channel = NULL; 100 nvif_notify_put(&fctx->notify);
99 } 101 }
100 spin_unlock_irq(&fctx->lock); 102 spin_unlock_irq(&fctx->lock);
103
104 nvif_notify_fini(&fctx->notify);
105 fctx->dead = 1;
106
107 /*
108 * Ensure that all accesses to fence->channel complete before freeing
109 * the channel.
110 */
111 synchronize_rcu();
101} 112}
102 113
103static void 114static void
@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
112 kref_put(&fctx->fence_ref, nouveau_fence_context_put); 123 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
113} 124}
114 125
115static void 126static int
116nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) 127nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
117{ 128{
118 struct nouveau_fence *fence; 129 struct nouveau_fence *fence;
119 130 int drop = 0;
120 u32 seq = fctx->read(chan); 131 u32 seq = fctx->read(chan);
121 132
122 while (!list_empty(&fctx->pending)) { 133 while (!list_empty(&fctx->pending)) {
123 fence = list_entry(fctx->pending.next, typeof(*fence), head); 134 fence = list_entry(fctx->pending.next, typeof(*fence), head);
124 135
125 if ((int)(seq - fence->base.seqno) < 0) 136 if ((int)(seq - fence->base.seqno) < 0)
126 return; 137 break;
127 138
128 nouveau_fence_signal(fence); 139 drop |= nouveau_fence_signal(fence);
129 } 140 }
141
142 return drop;
130} 143}
131 144
132static int 145static int
@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
135 struct nouveau_fence_chan *fctx = 148 struct nouveau_fence_chan *fctx =
136 container_of(notify, typeof(*fctx), notify); 149 container_of(notify, typeof(*fctx), notify);
137 unsigned long flags; 150 unsigned long flags;
151 int ret = NVIF_NOTIFY_KEEP;
138 152
139 spin_lock_irqsave(&fctx->lock, flags); 153 spin_lock_irqsave(&fctx->lock, flags);
140 if (!list_empty(&fctx->pending)) { 154 if (!list_empty(&fctx->pending)) {
141 struct nouveau_fence *fence; 155 struct nouveau_fence *fence;
156 struct nouveau_channel *chan;
142 157
143 fence = list_entry(fctx->pending.next, typeof(*fence), head); 158 fence = list_entry(fctx->pending.next, typeof(*fence), head);
144 nouveau_fence_update(fence->channel, fctx); 159 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
160 if (nouveau_fence_update(fence->channel, fctx))
161 ret = NVIF_NOTIFY_DROP;
145 } 162 }
146 spin_unlock_irqrestore(&fctx->lock, flags); 163 spin_unlock_irqrestore(&fctx->lock, flags);
147 164
148 /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */ 165 return ret;
149 return NVIF_NOTIFY_KEEP;
150} 166}
151 167
152void 168void
@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
262 if (!ret) { 278 if (!ret) {
263 fence_get(&fence->base); 279 fence_get(&fence->base);
264 spin_lock_irq(&fctx->lock); 280 spin_lock_irq(&fctx->lock);
265 nouveau_fence_update(chan, fctx); 281
282 if (nouveau_fence_update(chan, fctx))
283 nvif_notify_put(&fctx->notify);
284
266 list_add_tail(&fence->head, &fctx->pending); 285 list_add_tail(&fence->head, &fctx->pending);
267 spin_unlock_irq(&fctx->lock); 286 spin_unlock_irq(&fctx->lock);
268 } 287 }
@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
276 if (fence->base.ops == &nouveau_fence_ops_legacy || 295 if (fence->base.ops == &nouveau_fence_ops_legacy ||
277 fence->base.ops == &nouveau_fence_ops_uevent) { 296 fence->base.ops == &nouveau_fence_ops_uevent) {
278 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 297 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
298 struct nouveau_channel *chan;
279 unsigned long flags; 299 unsigned long flags;
280 300
281 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 301 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
282 return true; 302 return true;
283 303
284 spin_lock_irqsave(&fctx->lock, flags); 304 spin_lock_irqsave(&fctx->lock, flags);
285 nouveau_fence_update(fence->channel, fctx); 305 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
306 if (chan && nouveau_fence_update(chan, fctx))
307 nvif_notify_put(&fctx->notify);
286 spin_unlock_irqrestore(&fctx->lock, flags); 308 spin_unlock_irqrestore(&fctx->lock, flags);
287 } 309 }
288 return fence_is_signaled(&fence->base); 310 return fence_is_signaled(&fence->base);
@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
387 409
388 if (fence && (!exclusive || !fobj || !fobj->shared_count)) { 410 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
389 struct nouveau_channel *prev = NULL; 411 struct nouveau_channel *prev = NULL;
412 bool must_wait = true;
390 413
391 f = nouveau_local_fence(fence, chan->drm); 414 f = nouveau_local_fence(fence, chan->drm);
392 if (f) 415 if (f) {
393 prev = f->channel; 416 rcu_read_lock();
417 prev = rcu_dereference(f->channel);
418 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
419 must_wait = false;
420 rcu_read_unlock();
421 }
394 422
395 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 423 if (must_wait)
396 ret = fence_wait(fence, intr); 424 ret = fence_wait(fence, intr);
397 425
398 return ret; 426 return ret;
@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
403 431
404 for (i = 0; i < fobj->shared_count && !ret; ++i) { 432 for (i = 0; i < fobj->shared_count && !ret; ++i) {
405 struct nouveau_channel *prev = NULL; 433 struct nouveau_channel *prev = NULL;
434 bool must_wait = true;
406 435
407 fence = rcu_dereference_protected(fobj->shared[i], 436 fence = rcu_dereference_protected(fobj->shared[i],
408 reservation_object_held(resv)); 437 reservation_object_held(resv));
409 438
410 f = nouveau_local_fence(fence, chan->drm); 439 f = nouveau_local_fence(fence, chan->drm);
411 if (f) 440 if (f) {
412 prev = f->channel; 441 rcu_read_lock();
442 prev = rcu_dereference(f->channel);
443 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
444 must_wait = false;
445 rcu_read_unlock();
446 }
413 447
414 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 448 if (must_wait)
415 ret = fence_wait(fence, intr); 449 ret = fence_wait(fence, intr);
416
417 if (ret)
418 break;
419 } 450 }
420 451
421 return ret; 452 return ret;
@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
463 struct nouveau_fence *fence = from_fence(f); 494 struct nouveau_fence *fence = from_fence(f);
464 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 495 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
465 496
466 return fence->channel ? fctx->name : "dead channel"; 497 return !fctx->dead ? fctx->name : "dead channel";
467} 498}
468 499
469/* 500/*
@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
476{ 507{
477 struct nouveau_fence *fence = from_fence(f); 508 struct nouveau_fence *fence = from_fence(f);
478 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 509 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
479 struct nouveau_channel *chan = fence->channel; 510 struct nouveau_channel *chan;
511 bool ret = false;
512
513 rcu_read_lock();
514 chan = rcu_dereference(fence->channel);
515 if (chan)
516 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
517 rcu_read_unlock();
480 518
481 return (int)(fctx->read(chan) - fence->base.seqno) >= 0; 519 return ret;
482} 520}
483 521
484static bool nouveau_fence_no_signaling(struct fence *f) 522static bool nouveau_fence_no_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 943b0b17b1fc..96e461c6f68f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -14,7 +14,7 @@ struct nouveau_fence {
14 14
15 bool sysmem; 15 bool sysmem;
16 16
17 struct nouveau_channel *channel; 17 struct nouveau_channel __rcu *channel;
18 unsigned long timeout; 18 unsigned long timeout;
19}; 19};
20 20
@@ -47,7 +47,7 @@ struct nouveau_fence_chan {
47 char name[32]; 47 char name[32];
48 48
49 struct nvif_notify notify; 49 struct nvif_notify notify;
50 int notify_ref; 50 int notify_ref, dead;
51}; 51};
52 52
53struct nouveau_fence_priv { 53struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a3e7aed7e680..6f377de099f9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -251,22 +251,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
251 251
252static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 252static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
253{ 253{
254 int i, r = 0; 254 struct radeon_cs_reloc *reloc;
255 int r;
255 256
256 for (i = 0; i < p->nrelocs; i++) { 257 list_for_each_entry(reloc, &p->validated, tv.head) {
257 struct reservation_object *resv; 258 struct reservation_object *resv;
258 259
259 if (!p->relocs[i].robj) 260 resv = reloc->robj->tbo.resv;
260 continue;
261
262 resv = p->relocs[i].robj->tbo.resv;
263 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, 261 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
264 p->relocs[i].tv.shared); 262 reloc->tv.shared);
265
266 if (r) 263 if (r)
267 break; 264 return r;
268 } 265 }
269 return r; 266 return 0;
270} 267}
271 268
272/* XXX: note that this is called from the legacy UMS CS ioctl as well */ 269/* XXX: note that this is called from the legacy UMS CS ioctl as well */
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8309b11e674d..03586763ee86 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -795,6 +795,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
795 795
796 /* Get associated drm_crtc: */ 796 /* Get associated drm_crtc: */
797 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 797 drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
798 if (!drmcrtc)
799 return -EINVAL;
798 800
799 /* Helper routine in DRM core does all the work: */ 801 /* Helper routine in DRM core does all the work: */
800 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 802 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 99a960a4f302..4c0d786d5c7a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -213,6 +213,13 @@ int radeon_bo_create(struct radeon_device *rdev,
213 if (!(rdev->flags & RADEON_IS_PCIE)) 213 if (!(rdev->flags & RADEON_IS_PCIE))
214 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 214 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
215 215
216#ifdef CONFIG_X86_32
217 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
218 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
219 */
220 bo->flags &= ~RADEON_GEM_GTT_WC;
221#endif
222
216 radeon_ttm_placement_from_domain(bo, domain); 223 radeon_ttm_placement_from_domain(bo, domain);
217 /* Kernel allocation are uninterruptible */ 224 /* Kernel allocation are uninterruptible */
218 down_read(&rdev->pm.mclk_lock); 225 down_read(&rdev->pm.mclk_lock);