aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2012-04-29 23:55:29 -0400
committerBen Skeggs <bskeggs@redhat.com>2012-05-24 02:55:53 -0400
commit5e120f6e4b3f35b741c5445dfc755f50128c3c44 (patch)
tree210b2bb8f5dccfcb4a6c134341fa31a633ba5243 /drivers/gpu/drm/nouveau
parentd375e7d56dffa564a6c337d2ed3217fb94826100 (diff)
drm/nouveau/fence: convert to exec engine, and improve channel sync
Now have a somewhat simpler semaphore sync implementation for nv17:nv84, and a switched to using semaphores as fences on nv84+ and making use of the hardware's >= acquire operation. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c49
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c474
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c33
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c139
-rw-r--r--drivers/gpu/drm/nouveau/nv04_software.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c212
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c174
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c182
14 files changed, 851 insertions, 473 deletions
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index ba3e57c7892d..ce222eb0a318 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -18,6 +18,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
18 nv50_fb.o nvc0_fb.o \ 18 nv50_fb.o nvc0_fb.o \
19 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \ 19 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
20 nve0_fifo.o \ 20 nve0_fifo.o \
21 nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
21 nv04_software.o nv50_software.o nvc0_software.o \ 22 nv04_software.o nv50_software.o nvc0_software.o \
22 nv04_graph.o nv10_graph.o nv20_graph.o \ 23 nv04_graph.o nv10_graph.o nv20_graph.o \
23 nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \ 24 nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 694f6325311a..d25dc249535b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -119,6 +119,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
119 struct drm_file *file_priv, 119 struct drm_file *file_priv,
120 uint32_t vram_handle, uint32_t gart_handle) 120 uint32_t vram_handle, uint32_t gart_handle)
121{ 121{
122 struct nouveau_exec_engine *fence = nv_engine(dev, NVOBJ_ENGINE_FENCE);
122 struct drm_nouveau_private *dev_priv = dev->dev_private; 123 struct drm_nouveau_private *dev_priv = dev->dev_private;
123 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; 124 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
124 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); 125 struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv);
@@ -157,8 +158,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
157 } 158 }
158 159
159 NV_DEBUG(dev, "initialising channel %d\n", chan->id); 160 NV_DEBUG(dev, "initialising channel %d\n", chan->id);
160 INIT_LIST_HEAD(&chan->fence.pending);
161 spin_lock_init(&chan->fence.lock);
162 161
163 /* setup channel's memory and vm */ 162 /* setup channel's memory and vm */
164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 163 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
@@ -188,7 +187,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
188 chan->user_put = 0x40; 187 chan->user_put = 0x40;
189 chan->user_get = 0x44; 188 chan->user_get = 0x44;
190 if (dev_priv->card_type >= NV_50) 189 if (dev_priv->card_type >= NV_50)
191 chan->user_get_hi = 0x60; 190 chan->user_get_hi = 0x60;
192 191
193 /* disable the fifo caches */ 192 /* disable the fifo caches */
194 pfifo->reassign(dev, false); 193 pfifo->reassign(dev, false);
@@ -211,7 +210,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
211 210
212 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 211 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
213 OUT_RING (chan, 0x00000000); 212 OUT_RING (chan, 0x00000000);
214 FIRE_RING(chan);
215 213
216 ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev)); 214 ret = nouveau_gpuobj_gr_new(chan, NvSw, nouveau_software_class(dev));
217 if (ret) { 215 if (ret) {
@@ -219,7 +217,21 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
219 return ret; 217 return ret;
220 } 218 }
221 219
222 ret = nouveau_fence_channel_init(chan); 220 if (dev_priv->card_type < NV_C0) {
221 ret = RING_SPACE(chan, 2);
222 if (ret) {
223 nouveau_channel_put(&chan);
224 return ret;
225 }
226
227 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
228 OUT_RING (chan, NvSw);
229 FIRE_RING (chan);
230 }
231
232 FIRE_RING(chan);
233
234 ret = fence->context_new(chan, NVOBJ_ENGINE_FENCE);
223 if (ret) { 235 if (ret) {
224 nouveau_channel_put(&chan); 236 nouveau_channel_put(&chan);
225 return ret; 237 return ret;
@@ -291,12 +303,6 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
291 /* give it chance to idle */ 303 /* give it chance to idle */
292 nouveau_channel_idle(chan); 304 nouveau_channel_idle(chan);
293 305
294 /* ensure all outstanding fences are signaled. they should be if the
295 * above attempts at idling were OK, but if we failed this'll tell TTM
296 * we're done with the buffers.
297 */
298 nouveau_fence_channel_fini(chan);
299
300 /* boot it off the hardware */ 306 /* boot it off the hardware */
301 pfifo->reassign(dev, false); 307 pfifo->reassign(dev, false);
302 308
@@ -305,6 +311,9 @@ nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
305 for (i = 0; i < NVOBJ_ENGINE_NR; i++) { 311 for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
306 if (chan->engctx[i]) 312 if (chan->engctx[i])
307 dev_priv->eng[i]->context_del(chan, i); 313 dev_priv->eng[i]->context_del(chan, i);
314 /*XXX: clean this up later, order is important */
315 if (i == NVOBJ_ENGINE_FENCE)
316 pfifo->destroy_context(chan);
308 } 317 }
309 318
310 pfifo->reassign(dev, true); 319 pfifo->reassign(dev, true);
@@ -367,18 +376,14 @@ nouveau_channel_idle(struct nouveau_channel *chan)
367 struct nouveau_fence *fence = NULL; 376 struct nouveau_fence *fence = NULL;
368 int ret; 377 int ret;
369 378
370 nouveau_fence_update(chan); 379 ret = nouveau_fence_new(chan, &fence);
371 380 if (!ret) {
372 if (chan->fence.sequence != chan->fence.sequence_ack) { 381 ret = nouveau_fence_wait(fence, false, false);
373 ret = nouveau_fence_new(chan, &fence); 382 nouveau_fence_unref(&fence);
374 if (!ret) {
375 ret = nouveau_fence_wait(fence, false, false);
376 nouveau_fence_unref(&fence);
377 }
378
379 if (ret)
380 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
381 } 383 }
384
385 if (ret)
386 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
382} 387}
383 388
384/* cleans up all the fifos from file_priv */ 389/* cleans up all the fifos from file_priv */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index fa2ec491f6a7..188c92b327e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -67,8 +67,6 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
67 nvchan_rd32(chan, 0x8c)); 67 nvchan_rd32(chan, 0x8c));
68 } 68 }
69 69
70 seq_printf(m, "last fence : %d\n", chan->fence.sequence);
71 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
72 return 0; 70 return 0;
73} 71}
74 72
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index f9cdc921ef96..69688ef5cf46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -461,7 +461,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
461 OUT_RING (chan, 0x00000000); 461 OUT_RING (chan, 0x00000000);
462 } else { 462 } else {
463 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1); 463 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
464 OUT_RING (chan, ++chan->fence.sequence); 464 OUT_RING (chan, 0);
465 BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000); 465 BEGIN_IMC0(chan, 0, NVSW_SUBCHAN_PAGE_FLIP, 0x0000);
466 } 466 }
467 FIRE_RING (chan); 467 FIRE_RING (chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 43a46f157def..79eecf53ef2a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -165,6 +165,7 @@ enum nouveau_flags {
165#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG 165#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
166#define NVOBJ_ENGINE_BSP 6 166#define NVOBJ_ENGINE_BSP 6
167#define NVOBJ_ENGINE_VP 7 167#define NVOBJ_ENGINE_VP 7
168#define NVOBJ_ENGINE_FENCE 14
168#define NVOBJ_ENGINE_DISPLAY 15 169#define NVOBJ_ENGINE_DISPLAY 15
169#define NVOBJ_ENGINE_NR 16 170#define NVOBJ_ENGINE_NR 16
170 171
@@ -234,17 +235,6 @@ struct nouveau_channel {
234 uint32_t user_get_hi; 235 uint32_t user_get_hi;
235 uint32_t user_put; 236 uint32_t user_put;
236 237
237 /* Fencing */
238 struct {
239 /* lock protects the pending list only */
240 spinlock_t lock;
241 struct list_head pending;
242 uint32_t sequence;
243 uint32_t sequence_ack;
244 atomic_t last_sequence_irq;
245 struct nouveau_vma vma;
246 } fence;
247
248 /* DMA push buffer */ 238 /* DMA push buffer */
249 struct nouveau_gpuobj *pushbuf; 239 struct nouveau_gpuobj *pushbuf;
250 struct nouveau_bo *pushbuf_bo; 240 struct nouveau_bo *pushbuf_bo;
@@ -1443,13 +1433,6 @@ extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
1443 struct nouveau_vma *); 1433 struct nouveau_vma *);
1444extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *); 1434extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
1445 1435
1446/* nouveau_fence.c */
1447int nouveau_fence_init(struct drm_device *);
1448void nouveau_fence_fini(struct drm_device *);
1449int nouveau_fence_channel_init(struct nouveau_channel *);
1450void nouveau_fence_channel_fini(struct nouveau_channel *);
1451void nouveau_fence_work(struct nouveau_fence *fence,
1452 void (*work)(void *priv, bool signalled), void *priv);
1453/* nouveau_gem.c */ 1436/* nouveau_gem.c */
1454extern int nouveau_gem_new(struct drm_device *, int size, int align, 1437extern int nouveau_gem_new(struct drm_device *, int size, int align,
1455 uint32_t domain, uint32_t tile_mode, 1438 uint32_t domain, uint32_t tile_mode,
@@ -1746,6 +1729,7 @@ nv44_graph_class(struct drm_device *dev)
1746#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001 1729#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL 0x00000001
1747#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002 1730#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
1748#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004 1731#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
1732#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
1749#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020 1733#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020
1750#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024 1734#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
1751#define NV10_SUBCHAN_REF_CNT 0x00000050 1735#define NV10_SUBCHAN_REF_CNT 0x00000050
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 2c10d54fc493..4ba41a45114f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -36,85 +36,71 @@
36#include "nouveau_software.h" 36#include "nouveau_software.h"
37#include "nouveau_dma.h" 37#include "nouveau_dma.h"
38 38
39#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) 39void
40#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17) 40nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
41{
42 struct nouveau_fence *fence, *fnext;
43 spin_lock(&fctx->lock);
44 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
45 if (fence->work)
46 fence->work(fence->priv, false);
47 fence->channel = NULL;
48 list_del(&fence->head);
49 nouveau_fence_unref(&fence);
50 }
51 spin_unlock(&fctx->lock);
52}
53
54void
55nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
56{
57 INIT_LIST_HEAD(&fctx->pending);
58 spin_lock_init(&fctx->lock);
59}
41 60
42void 61void
43nouveau_fence_update(struct nouveau_channel *chan) 62nouveau_fence_update(struct nouveau_channel *chan)
44{ 63{
45 struct drm_device *dev = chan->dev; 64 struct drm_device *dev = chan->dev;
46 struct nouveau_fence *tmp, *fence; 65 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
47 uint32_t sequence; 66 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
48 67 struct nouveau_fence *fence, *fnext;
49 spin_lock(&chan->fence.lock);
50
51 /* Fetch the last sequence if the channel is still up and running */
52 if (likely(!list_empty(&chan->fence.pending))) {
53 if (USE_REFCNT(dev))
54 sequence = nvchan_rd32(chan, 0x48);
55 else
56 sequence = atomic_read(&chan->fence.last_sequence_irq);
57
58 if (chan->fence.sequence_ack == sequence)
59 goto out;
60 chan->fence.sequence_ack = sequence;
61 }
62 68
63 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) { 69 spin_lock(&fctx->lock);
64 if (fence->sequence > chan->fence.sequence_ack) 70 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
71 if (priv->read(chan) < fence->sequence)
65 break; 72 break;
66 73
67 fence->channel = NULL;
68 list_del(&fence->head);
69 if (fence->work) 74 if (fence->work)
70 fence->work(fence->priv, true); 75 fence->work(fence->priv, true);
71 76 fence->channel = NULL;
77 list_del(&fence->head);
72 nouveau_fence_unref(&fence); 78 nouveau_fence_unref(&fence);
73 } 79 }
74 80 spin_unlock(&fctx->lock);
75out:
76 spin_unlock(&chan->fence.lock);
77} 81}
78 82
79int 83int
80nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) 84nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
81{ 85{
82 struct drm_device *dev = chan->dev; 86 struct drm_device *dev = chan->dev;
83 struct drm_nouveau_private *dev_priv = dev->dev_private; 87 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
88 struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
84 int ret; 89 int ret;
85 90
86 ret = RING_SPACE(chan, 2); 91 fence->channel = chan;
87 if (ret) 92 fence->timeout = jiffies + (3 * DRM_HZ);
88 return ret; 93 fence->sequence = ++fctx->sequence;
89
90 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
91 nouveau_fence_update(chan);
92
93 BUG_ON(chan->fence.sequence ==
94 chan->fence.sequence_ack - 1);
95 }
96
97 fence->sequence = ++chan->fence.sequence;
98 fence->channel = chan;
99
100 kref_get(&fence->kref);
101 spin_lock(&chan->fence.lock);
102 list_add_tail(&fence->head, &chan->fence.pending);
103 spin_unlock(&chan->fence.lock);
104 94
105 if (USE_REFCNT(dev)) { 95 ret = priv->emit(fence);
106 if (dev_priv->card_type < NV_C0) 96 if (!ret) {
107 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1); 97 kref_get(&fence->kref);
108 else 98 spin_lock(&fctx->lock);
109 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1); 99 list_add_tail(&fence->head, &fctx->pending);
110 } else { 100 spin_unlock(&fctx->lock);
111 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
112 } 101 }
113 OUT_RING (chan, fence->sequence);
114 FIRE_RING(chan);
115 fence->timeout = jiffies + 3 * DRM_HZ;
116 102
117 return 0; 103 return ret;
118} 104}
119 105
120bool 106bool
@@ -158,6 +144,23 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
158 return ret; 144 return ret;
159} 145}
160 146
147int
148nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
149{
150 struct nouveau_channel *prev = fence ? fence->channel : NULL;
151 struct drm_device *dev = chan->dev;
152 struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE);
153 int ret = 0;
154
155 if (unlikely(prev && prev != chan && !nouveau_fence_done(fence))) {
156 ret = priv->sync(fence, chan);
157 if (unlikely(ret))
158 ret = nouveau_fence_wait(fence, true, false);
159 }
160
161 return ret;
162}
163
161static void 164static void
162nouveau_fence_del(struct kref *kref) 165nouveau_fence_del(struct kref *kref)
163{ 166{
@@ -186,6 +189,9 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
186 struct nouveau_fence *fence; 189 struct nouveau_fence *fence;
187 int ret = 0; 190 int ret = 0;
188 191
192 if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE]))
193 return -ENODEV;
194
189 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 195 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
190 if (!fence) 196 if (!fence)
191 return -ENOMEM; 197 return -ENOMEM;
@@ -200,359 +206,3 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
200 *pfence = fence; 206 *pfence = fence;
201 return ret; 207 return ret;
202} 208}
203
204struct nouveau_semaphore {
205 struct kref ref;
206 struct drm_device *dev;
207 struct drm_mm_node *mem;
208};
209
210void
211nouveau_fence_work(struct nouveau_fence *fence,
212 void (*work)(void *priv, bool signalled),
213 void *priv)
214{
215 if (!fence->channel) {
216 work(priv, true);
217 } else {
218 fence->work = work;
219 fence->priv = priv;
220 }
221}
222
223static struct nouveau_semaphore *
224semaphore_alloc(struct drm_device *dev)
225{
226 struct drm_nouveau_private *dev_priv = dev->dev_private;
227 struct nouveau_semaphore *sema;
228 int size = (dev_priv->chipset < 0x84) ? 4 : 16;
229 int ret, i;
230
231 if (!USE_SEMA(dev))
232 return NULL;
233
234 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
235 if (!sema)
236 goto fail;
237
238 ret = drm_mm_pre_get(&dev_priv->fence.heap);
239 if (ret)
240 goto fail;
241
242 spin_lock(&dev_priv->fence.lock);
243 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
244 if (sema->mem)
245 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
246 spin_unlock(&dev_priv->fence.lock);
247
248 if (!sema->mem)
249 goto fail;
250
251 kref_init(&sema->ref);
252 sema->dev = dev;
253 for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
254 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
255
256 return sema;
257fail:
258 kfree(sema);
259 return NULL;
260}
261
262static void
263semaphore_free(struct kref *ref)
264{
265 struct nouveau_semaphore *sema =
266 container_of(ref, struct nouveau_semaphore, ref);
267 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
268
269 spin_lock(&dev_priv->fence.lock);
270 drm_mm_put_block(sema->mem);
271 spin_unlock(&dev_priv->fence.lock);
272
273 kfree(sema);
274}
275
276static void
277semaphore_work(void *priv, bool signalled)
278{
279 struct nouveau_semaphore *sema = priv;
280 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
281
282 if (unlikely(!signalled))
283 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
284
285 kref_put(&sema->ref, semaphore_free);
286}
287
288static int
289semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
290{
291 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
292 struct nouveau_fence *fence = NULL;
293 u64 offset = chan->fence.vma.offset + sema->mem->start;
294 int ret;
295
296 if (dev_priv->chipset < 0x84) {
297 ret = RING_SPACE(chan, 4);
298 if (ret)
299 return ret;
300
301 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
302 OUT_RING (chan, NvSema);
303 OUT_RING (chan, offset);
304 OUT_RING (chan, 1);
305 } else
306 if (dev_priv->chipset < 0xc0) {
307 ret = RING_SPACE(chan, 7);
308 if (ret)
309 return ret;
310
311 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
312 OUT_RING (chan, chan->vram_handle);
313 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
314 OUT_RING (chan, upper_32_bits(offset));
315 OUT_RING (chan, lower_32_bits(offset));
316 OUT_RING (chan, 1);
317 OUT_RING (chan, 1); /* ACQUIRE_EQ */
318 } else {
319 ret = RING_SPACE(chan, 5);
320 if (ret)
321 return ret;
322
323 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
324 OUT_RING (chan, upper_32_bits(offset));
325 OUT_RING (chan, lower_32_bits(offset));
326 OUT_RING (chan, 1);
327 OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
328 }
329
330 /* Delay semaphore destruction until its work is done */
331 ret = nouveau_fence_new(chan, &fence);
332 if (ret)
333 return ret;
334
335 kref_get(&sema->ref);
336 nouveau_fence_work(fence, semaphore_work, sema);
337 nouveau_fence_unref(&fence);
338 return 0;
339}
340
341static int
342semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
343{
344 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
345 struct nouveau_fence *fence = NULL;
346 u64 offset = chan->fence.vma.offset + sema->mem->start;
347 int ret;
348
349 if (dev_priv->chipset < 0x84) {
350 ret = RING_SPACE(chan, 5);
351 if (ret)
352 return ret;
353
354 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
355 OUT_RING (chan, NvSema);
356 OUT_RING (chan, offset);
357 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
358 OUT_RING (chan, 1);
359 } else
360 if (dev_priv->chipset < 0xc0) {
361 ret = RING_SPACE(chan, 7);
362 if (ret)
363 return ret;
364
365 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
366 OUT_RING (chan, chan->vram_handle);
367 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
368 OUT_RING (chan, upper_32_bits(offset));
369 OUT_RING (chan, lower_32_bits(offset));
370 OUT_RING (chan, 1);
371 OUT_RING (chan, 2); /* RELEASE */
372 } else {
373 ret = RING_SPACE(chan, 5);
374 if (ret)
375 return ret;
376
377 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
378 OUT_RING (chan, upper_32_bits(offset));
379 OUT_RING (chan, lower_32_bits(offset));
380 OUT_RING (chan, 1);
381 OUT_RING (chan, 0x1002); /* RELEASE */
382 }
383
384 /* Delay semaphore destruction until its work is done */
385 ret = nouveau_fence_new(chan, &fence);
386 if (ret)
387 return ret;
388
389 kref_get(&sema->ref);
390 nouveau_fence_work(fence, semaphore_work, sema);
391 nouveau_fence_unref(&fence);
392 return 0;
393}
394
395int
396nouveau_fence_sync(struct nouveau_fence *fence,
397 struct nouveau_channel *wchan)
398{
399 struct nouveau_channel *chan;
400 struct drm_device *dev = wchan->dev;
401 struct nouveau_semaphore *sema;
402 int ret = 0;
403
404 chan = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
405 if (likely(!chan || chan == wchan || nouveau_fence_done(fence)))
406 goto out;
407
408 sema = semaphore_alloc(dev);
409 if (!sema) {
410 /* Early card or broken userspace, fall back to
411 * software sync. */
412 ret = nouveau_fence_wait(fence, true, false);
413 goto out;
414 }
415
416 /* try to take chan's mutex, if we can't take it right away
417 * we have to fallback to software sync to prevent locking
418 * order issues
419 */
420 if (!mutex_trylock(&chan->mutex)) {
421 ret = nouveau_fence_wait(fence, true, false);
422 goto out_unref;
423 }
424
425 /* Make wchan wait until it gets signalled */
426 ret = semaphore_acquire(wchan, sema);
427 if (ret)
428 goto out_unlock;
429
430 /* Signal the semaphore from chan */
431 ret = semaphore_release(chan, sema);
432
433out_unlock:
434 mutex_unlock(&chan->mutex);
435out_unref:
436 kref_put(&sema->ref, semaphore_free);
437out:
438 if (chan)
439 nouveau_channel_put_unlocked(&chan);
440 return ret;
441}
442
443int
444nouveau_fence_channel_init(struct nouveau_channel *chan)
445{
446 struct drm_device *dev = chan->dev;
447 struct drm_nouveau_private *dev_priv = dev->dev_private;
448 struct nouveau_gpuobj *obj = NULL;
449 int ret;
450
451 if (dev_priv->card_type < NV_C0) {
452 ret = RING_SPACE(chan, 2);
453 if (ret)
454 return ret;
455
456 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
457 OUT_RING (chan, NvSw);
458 FIRE_RING (chan);
459 }
460
461 /* Setup area of memory shared between all channels for x-chan sync */
462 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
463 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
464
465 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
466 mem->start << PAGE_SHIFT,
467 mem->size, NV_MEM_ACCESS_RW,
468 NV_MEM_TARGET_VRAM, &obj);
469 if (ret)
470 return ret;
471
472 ret = nouveau_ramht_insert(chan, NvSema, obj);
473 nouveau_gpuobj_ref(NULL, &obj);
474 if (ret)
475 return ret;
476 } else
477 if (USE_SEMA(dev)) {
478 /* map fence bo into channel's vm */
479 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
480 &chan->fence.vma);
481 if (ret)
482 return ret;
483 }
484
485 atomic_set(&chan->fence.last_sequence_irq, 0);
486 return 0;
487}
488
489void
490nouveau_fence_channel_fini(struct nouveau_channel *chan)
491{
492 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
493 struct nouveau_fence *tmp, *fence;
494
495 spin_lock(&chan->fence.lock);
496 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) {
497 fence->channel = NULL;
498 list_del(&fence->head);
499
500 if (unlikely(fence->work))
501 fence->work(fence->priv, false);
502
503 kref_put(&fence->kref, nouveau_fence_del);
504 }
505 spin_unlock(&chan->fence.lock);
506
507 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
508}
509
510int
511nouveau_fence_init(struct drm_device *dev)
512{
513 struct drm_nouveau_private *dev_priv = dev->dev_private;
514 int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
515 int ret;
516
517 /* Create a shared VRAM heap for cross-channel sync. */
518 if (USE_SEMA(dev)) {
519 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
520 0, 0, NULL, &dev_priv->fence.bo);
521 if (ret)
522 return ret;
523
524 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
525 if (ret)
526 goto fail;
527
528 ret = nouveau_bo_map(dev_priv->fence.bo);
529 if (ret)
530 goto fail;
531
532 ret = drm_mm_init(&dev_priv->fence.heap, 0,
533 dev_priv->fence.bo->bo.mem.size);
534 if (ret)
535 goto fail;
536
537 spin_lock_init(&dev_priv->fence.lock);
538 }
539
540 return 0;
541fail:
542 nouveau_bo_unmap(dev_priv->fence.bo);
543 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
544 return ret;
545}
546
547void
548nouveau_fence_fini(struct drm_device *dev)
549{
550 struct drm_nouveau_private *dev_priv = dev->dev_private;
551
552 if (USE_SEMA(dev)) {
553 drm_mm_takedown(&dev_priv->fence.heap);
554 nouveau_bo_unmap(dev_priv->fence.bo);
555 nouveau_bo_unpin(dev_priv->fence.bo);
556 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
557 }
558}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 1337acb00172..ec9afa775ea7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -25,4 +25,27 @@ int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
25void nouveau_fence_idle(struct nouveau_channel *); 25void nouveau_fence_idle(struct nouveau_channel *);
26void nouveau_fence_update(struct nouveau_channel *); 26void nouveau_fence_update(struct nouveau_channel *);
27 27
28struct nouveau_fence_chan {
29 struct list_head pending;
30 spinlock_t lock;
31 u32 sequence;
32};
33
34struct nouveau_fence_priv {
35 struct nouveau_exec_engine engine;
36 int (*emit)(struct nouveau_fence *);
37 int (*sync)(struct nouveau_fence *, struct nouveau_channel *);
38 u32 (*read)(struct nouveau_channel *);
39};
40
41void nouveau_fence_context_new(struct nouveau_fence_chan *);
42void nouveau_fence_context_del(struct nouveau_fence_chan *);
43
44int nv04_fence_create(struct drm_device *dev);
45int nv04_fence_mthd(struct nouveau_channel *, u32, u32, u32);
46
47int nv10_fence_create(struct drm_device *dev);
48int nv84_fence_create(struct drm_device *dev);
49int nvc0_fence_create(struct drm_device *dev);
50
28#endif 51#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 996755a8c9a1..30f542316944 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -709,7 +709,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
709 } 709 }
710 710
711 if (chan->dma.ib_max) { 711 if (chan->dma.ib_max) {
712 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6); 712 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
713 if (ret) { 713 if (ret) {
714 NV_INFO(dev, "nv50cal_space: %d\n", ret); 714 NV_INFO(dev, "nv50cal_space: %d\n", ret);
715 goto out; 715 goto out;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index ed83c425fcf3..1039e57d0aef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -39,6 +39,7 @@
39#include "nouveau_gpio.h" 39#include "nouveau_gpio.h"
40#include "nouveau_pm.h" 40#include "nouveau_pm.h"
41#include "nv50_display.h" 41#include "nv50_display.h"
42#include "nouveau_fence.h"
42#include "nouveau_software.h" 43#include "nouveau_software.h"
43 44
44static void nouveau_stub_takedown(struct drm_device *dev) {} 45static void nouveau_stub_takedown(struct drm_device *dev) {}
@@ -768,6 +769,29 @@ nouveau_card_init(struct drm_device *dev)
768 if (!dev_priv->noaccel) { 769 if (!dev_priv->noaccel) {
769 switch (dev_priv->card_type) { 770 switch (dev_priv->card_type) {
770 case NV_04: 771 case NV_04:
772 nv04_fence_create(dev);
773 break;
774 case NV_10:
775 case NV_20:
776 case NV_30:
777 case NV_40:
778 case NV_50:
779 if (dev_priv->chipset < 0x84)
780 nv10_fence_create(dev);
781 else
782 nv84_fence_create(dev);
783 break;
784 case NV_C0:
785 case NV_D0:
786 case NV_E0:
787 nvc0_fence_create(dev);
788 break;
789 default:
790 break;
791 }
792
793 switch (dev_priv->card_type) {
794 case NV_04:
771 case NV_10: 795 case NV_10:
772 case NV_20: 796 case NV_20:
773 case NV_30: 797 case NV_30:
@@ -894,14 +918,10 @@ nouveau_card_init(struct drm_device *dev)
894 nouveau_backlight_init(dev); 918 nouveau_backlight_init(dev);
895 nouveau_pm_init(dev); 919 nouveau_pm_init(dev);
896 920
897 ret = nouveau_fence_init(dev);
898 if (ret)
899 goto out_pm;
900
901 if (dev_priv->eng[NVOBJ_ENGINE_GR]) { 921 if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
902 ret = nouveau_card_channel_init(dev); 922 ret = nouveau_card_channel_init(dev);
903 if (ret) 923 if (ret)
904 goto out_fence; 924 goto out_pm;
905 } 925 }
906 926
907 if (dev->mode_config.num_crtc) { 927 if (dev->mode_config.num_crtc) {
@@ -916,8 +936,6 @@ nouveau_card_init(struct drm_device *dev)
916 936
917out_chan: 937out_chan:
918 nouveau_card_channel_fini(dev); 938 nouveau_card_channel_fini(dev);
919out_fence:
920 nouveau_fence_fini(dev);
921out_pm: 939out_pm:
922 nouveau_pm_fini(dev); 940 nouveau_pm_fini(dev);
923 nouveau_backlight_exit(dev); 941 nouveau_backlight_exit(dev);
@@ -974,7 +992,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
974 } 992 }
975 993
976 nouveau_card_channel_fini(dev); 994 nouveau_card_channel_fini(dev);
977 nouveau_fence_fini(dev);
978 nouveau_pm_fini(dev); 995 nouveau_pm_fini(dev);
979 nouveau_backlight_exit(dev); 996 nouveau_backlight_exit(dev);
980 nouveau_display_destroy(dev); 997 nouveau_display_destroy(dev);
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
new file mode 100644
index 000000000000..08bd2ceaefef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30
31struct nv04_fence_chan {
32 struct nouveau_fence_chan base;
33 atomic_t sequence;
34};
35
36struct nv04_fence_priv {
37 struct nouveau_fence_priv base;
38};
39
40static int
41nv04_fence_emit(struct nouveau_fence *fence)
42{
43 struct nouveau_channel *chan = fence->channel;
44 int ret = RING_SPACE(chan, 2);
45 if (ret == 0) {
46 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
47 OUT_RING (chan, fence->sequence);
48 FIRE_RING (chan);
49 }
50 return ret;
51}
52
53static int
54nv04_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
55{
56 return -ENODEV;
57}
58
59int
60nv04_fence_mthd(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
61{
62 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
63 atomic_set(&fctx->sequence, data);
64 return 0;
65}
66
67static u32
68nv04_fence_read(struct nouveau_channel *chan)
69{
70 struct nv04_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
71 return atomic_read(&fctx->sequence);
72}
73
74static void
75nv04_fence_context_del(struct nouveau_channel *chan, int engine)
76{
77 struct nv04_fence_chan *fctx = chan->engctx[engine];
78 nouveau_fence_context_del(&fctx->base);
79 chan->engctx[engine] = NULL;
80 kfree(fctx);
81}
82
83static int
84nv04_fence_context_new(struct nouveau_channel *chan, int engine)
85{
86 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
87 if (fctx) {
88 nouveau_fence_context_new(&fctx->base);
89 atomic_set(&fctx->sequence, 0);
90 chan->engctx[engine] = fctx;
91 return 0;
92 }
93 return -ENOMEM;
94}
95
96static int
97nv04_fence_fini(struct drm_device *dev, int engine, bool suspend)
98{
99 return 0;
100}
101
102static int
103nv04_fence_init(struct drm_device *dev, int engine)
104{
105 return 0;
106}
107
108static void
109nv04_fence_destroy(struct drm_device *dev, int engine)
110{
111 struct drm_nouveau_private *dev_priv = dev->dev_private;
112 struct nv04_fence_priv *priv = nv_engine(dev, engine);
113
114 dev_priv->eng[engine] = NULL;
115 kfree(priv);
116}
117
118int
119nv04_fence_create(struct drm_device *dev)
120{
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 struct nv04_fence_priv *priv;
123 int ret;
124
125 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
126 if (!priv)
127 return -ENOMEM;
128
129 priv->base.engine.destroy = nv04_fence_destroy;
130 priv->base.engine.init = nv04_fence_init;
131 priv->base.engine.fini = nv04_fence_fini;
132 priv->base.engine.context_new = nv04_fence_context_new;
133 priv->base.engine.context_del = nv04_fence_context_del;
134 priv->base.emit = nv04_fence_emit;
135 priv->base.sync = nv04_fence_sync;
136 priv->base.read = nv04_fence_read;
137 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
138 return ret;
139}
diff --git a/drivers/gpu/drm/nouveau/nv04_software.c b/drivers/gpu/drm/nouveau/nv04_software.c
index a91cec6030a5..0c41abf48774 100644
--- a/drivers/gpu/drm/nouveau/nv04_software.c
+++ b/drivers/gpu/drm/nouveau/nv04_software.c
@@ -26,6 +26,7 @@
26 26
27#include "nouveau_drv.h" 27#include "nouveau_drv.h"
28#include "nouveau_ramht.h" 28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
29#include "nouveau_software.h" 30#include "nouveau_software.h"
30#include "nouveau_hw.h" 31#include "nouveau_hw.h"
31 32
@@ -38,13 +39,6 @@ struct nv04_software_chan {
38}; 39};
39 40
40static int 41static int
41mthd_fence(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
42{
43 atomic_set(&chan->fence.last_sequence_irq, data);
44 return 0;
45}
46
47static int
48mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data) 42mthd_flip(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
49{ 43{
50 44
@@ -69,7 +63,6 @@ nv04_software_context_new(struct nouveau_channel *chan, int engine)
69 return -ENOMEM; 63 return -ENOMEM;
70 64
71 nouveau_software_context_new(&pch->base); 65 nouveau_software_context_new(&pch->base);
72 atomic_set(&chan->fence.last_sequence_irq, 0);
73 chan->engctx[engine] = pch; 66 chan->engctx[engine] = pch;
74 return 0; 67 return 0;
75} 68}
@@ -143,7 +136,7 @@ nv04_software_create(struct drm_device *dev)
143 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base); 136 NVOBJ_ENGINE_ADD(dev, SW, &psw->base.base);
144 if (dev_priv->card_type <= NV_04) { 137 if (dev_priv->card_type <= NV_04) {
145 NVOBJ_CLASS(dev, 0x006e, SW); 138 NVOBJ_CLASS(dev, 0x006e, SW);
146 NVOBJ_MTHD (dev, 0x006e, 0x0150, mthd_fence); 139 NVOBJ_MTHD (dev, 0x006e, 0x0150, nv04_fence_mthd);
147 NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip); 140 NVOBJ_MTHD (dev, 0x006e, 0x0500, mthd_flip);
148 } else { 141 } else {
149 NVOBJ_CLASS(dev, 0x016e, SW); 142 NVOBJ_CLASS(dev, 0x016e, SW);
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
new file mode 100644
index 000000000000..10831eaff958
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -0,0 +1,212 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30
31struct nv10_fence_chan {
32 struct nouveau_fence_chan base;
33};
34
35struct nv10_fence_priv {
36 struct nouveau_fence_priv base;
37 struct nouveau_bo *bo;
38 spinlock_t lock;
39 u32 sequence;
40};
41
42static int
43nv10_fence_emit(struct nouveau_fence *fence)
44{
45 struct nouveau_channel *chan = fence->channel;
46 int ret = RING_SPACE(chan, 2);
47 if (ret == 0) {
48 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
49 OUT_RING (chan, fence->sequence);
50 FIRE_RING (chan);
51 }
52 return ret;
53}
54
55static int
56nv10_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
57{
58 return -ENODEV;
59}
60
61static int
62nv17_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
63{
64 struct nv10_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
65 struct nouveau_channel *prev = fence->channel;
66 u32 value;
67 int ret;
68
69 if (!mutex_trylock(&prev->mutex))
70 return -EBUSY;
71
72 spin_lock(&priv->lock);
73 value = priv->sequence;
74 priv->sequence += 2;
75 spin_unlock(&priv->lock);
76
77 ret = RING_SPACE(prev, 5);
78 if (!ret) {
79 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
80 OUT_RING (prev, NvSema);
81 OUT_RING (prev, 0);
82 OUT_RING (prev, value + 0);
83 OUT_RING (prev, value + 1);
84 FIRE_RING (prev);
85 }
86
87 if (!ret && !(ret = RING_SPACE(chan, 5))) {
88 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
89 OUT_RING (chan, NvSema);
90 OUT_RING (chan, 0);
91 OUT_RING (chan, value + 1);
92 OUT_RING (chan, value + 2);
93 FIRE_RING (chan);
94 }
95
96 mutex_unlock(&prev->mutex);
97 return 0;
98}
99
100static u32
101nv10_fence_read(struct nouveau_channel *chan)
102{
103 return nvchan_rd32(chan, 0x0048);
104}
105
106static void
107nv10_fence_context_del(struct nouveau_channel *chan, int engine)
108{
109 struct nv10_fence_chan *fctx = chan->engctx[engine];
110 nouveau_fence_context_del(&fctx->base);
111 chan->engctx[engine] = NULL;
112 kfree(fctx);
113}
114
115static int
116nv10_fence_context_new(struct nouveau_channel *chan, int engine)
117{
118 struct nv10_fence_priv *priv = nv_engine(chan->dev, engine);
119 struct nv10_fence_chan *fctx;
120 struct nouveau_gpuobj *obj;
121 int ret = 0;
122
123 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
124 if (!fctx)
125 return -ENOMEM;
126
127 nouveau_fence_context_new(&fctx->base);
128
129 if (priv->bo) {
130 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
131
132 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
133 mem->start * PAGE_SIZE, mem->size,
134 NV_MEM_ACCESS_RW,
135 NV_MEM_TARGET_VRAM, &obj);
136 if (!ret) {
137 ret = nouveau_ramht_insert(chan, NvSema, obj);
138 nouveau_gpuobj_ref(NULL, &obj);
139 }
140 }
141
142 if (ret)
143 nv10_fence_context_del(chan, engine);
144 return ret;
145}
146
147static int
148nv10_fence_fini(struct drm_device *dev, int engine, bool suspend)
149{
150 return 0;
151}
152
153static int
154nv10_fence_init(struct drm_device *dev, int engine)
155{
156 return 0;
157}
158
159static void
160nv10_fence_destroy(struct drm_device *dev, int engine)
161{
162 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 struct nv10_fence_priv *priv = nv_engine(dev, engine);
164
165 nouveau_bo_ref(NULL, &priv->bo);
166 dev_priv->eng[engine] = NULL;
167 kfree(priv);
168}
169
170int
171nv10_fence_create(struct drm_device *dev)
172{
173 struct drm_nouveau_private *dev_priv = dev->dev_private;
174 struct nv10_fence_priv *priv;
175 int ret = 0;
176
177 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
178 if (!priv)
179 return -ENOMEM;
180
181 priv->base.engine.destroy = nv10_fence_destroy;
182 priv->base.engine.init = nv10_fence_init;
183 priv->base.engine.fini = nv10_fence_fini;
184 priv->base.engine.context_new = nv10_fence_context_new;
185 priv->base.engine.context_del = nv10_fence_context_del;
186 priv->base.emit = nv10_fence_emit;
187 priv->base.read = nv10_fence_read;
188 priv->base.sync = nv10_fence_sync;
189 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
190 spin_lock_init(&priv->lock);
191
192 if (dev_priv->chipset >= 0x17) {
193 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
194 0, 0x0000, NULL, &priv->bo);
195 if (!ret) {
196 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
197 if (!ret)
198 ret = nouveau_bo_map(priv->bo);
199 if (ret)
200 nouveau_bo_ref(NULL, &priv->bo);
201 }
202
203 if (ret == 0) {
204 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
205 priv->base.sync = nv17_fence_sync;
206 }
207 }
208
209 if (ret)
210 nv10_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
211 return ret;
212}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
new file mode 100644
index 000000000000..d23dbc06f436
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -0,0 +1,174 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30
31struct nv84_fence_chan {
32 struct nouveau_fence_chan base;
33};
34
35struct nv84_fence_priv {
36 struct nouveau_fence_priv base;
37 struct nouveau_gpuobj *mem;
38};
39
40static int
41nv84_fence_emit(struct nouveau_fence *fence)
42{
43 struct nouveau_channel *chan = fence->channel;
44 int ret = RING_SPACE(chan, 7);
45 if (ret == 0) {
46 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
47 OUT_RING (chan, NvSema);
48 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
49 OUT_RING (chan, upper_32_bits(chan->id * 16));
50 OUT_RING (chan, lower_32_bits(chan->id * 16));
51 OUT_RING (chan, fence->sequence);
52 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
53 FIRE_RING (chan);
54 }
55 return ret;
56}
57
58static int
59nv84_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
60{
61 int ret = RING_SPACE(chan, 7);
62 if (ret == 0) {
63 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
64 OUT_RING (chan, NvSema);
65 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
66 OUT_RING (chan, upper_32_bits(fence->channel->id * 16));
67 OUT_RING (chan, lower_32_bits(fence->channel->id * 16));
68 OUT_RING (chan, fence->sequence);
69 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
70 FIRE_RING (chan);
71 }
72 return ret;
73}
74
75static u32
76nv84_fence_read(struct nouveau_channel *chan)
77{
78 struct nv84_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
79 return nv_ro32(priv->mem, chan->id * 16);
80}
81
82static void
83nv84_fence_context_del(struct nouveau_channel *chan, int engine)
84{
85 struct nv84_fence_chan *fctx = chan->engctx[engine];
86 nouveau_fence_context_del(&fctx->base);
87 chan->engctx[engine] = NULL;
88 kfree(fctx);
89}
90
91static int
92nv84_fence_context_new(struct nouveau_channel *chan, int engine)
93{
94 struct nv84_fence_priv *priv = nv_engine(chan->dev, engine);
95 struct nv84_fence_chan *fctx;
96 struct nouveau_gpuobj *obj;
97 int ret;
98
99 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
100 if (!fctx)
101 return -ENOMEM;
102
103 nouveau_fence_context_new(&fctx->base);
104
105 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
106 priv->mem->vinst, priv->mem->size,
107 NV_MEM_ACCESS_RW,
108 NV_MEM_TARGET_VRAM, &obj);
109 if (ret == 0) {
110 ret = nouveau_ramht_insert(chan, NvSema, obj);
111 nouveau_gpuobj_ref(NULL, &obj);
112 nv_wo32(priv->mem, chan->id * 16, 0x00000000);
113 }
114
115 if (ret)
116 nv84_fence_context_del(chan, engine);
117 return ret;
118}
119
120static int
121nv84_fence_fini(struct drm_device *dev, int engine, bool suspend)
122{
123 return 0;
124}
125
126static int
127nv84_fence_init(struct drm_device *dev, int engine)
128{
129 return 0;
130}
131
132static void
133nv84_fence_destroy(struct drm_device *dev, int engine)
134{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nv84_fence_priv *priv = nv_engine(dev, engine);
137
138 nouveau_gpuobj_ref(NULL, &priv->mem);
139 dev_priv->eng[engine] = NULL;
140 kfree(priv);
141}
142
143int
144nv84_fence_create(struct drm_device *dev)
145{
146 struct drm_nouveau_private *dev_priv = dev->dev_private;
147 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
148 struct nv84_fence_priv *priv;
149 int ret;
150
151 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
152 if (!priv)
153 return -ENOMEM;
154
155 priv->base.engine.destroy = nv84_fence_destroy;
156 priv->base.engine.init = nv84_fence_init;
157 priv->base.engine.fini = nv84_fence_fini;
158 priv->base.engine.context_new = nv84_fence_context_new;
159 priv->base.engine.context_del = nv84_fence_context_del;
160 priv->base.emit = nv84_fence_emit;
161 priv->base.sync = nv84_fence_sync;
162 priv->base.read = nv84_fence_read;
163 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
164
165 ret = nouveau_gpuobj_new(dev, NULL, 16 * pfifo->channels,
166 0x1000, 0, &priv->mem);
167 if (ret)
168 goto out;
169
170out:
171 if (ret)
172 nv84_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
173 return ret;
174}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
new file mode 100644
index 000000000000..41545f15c4d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_ramht.h"
29#include "nouveau_fence.h"
30
31struct nvc0_fence_priv {
32 struct nouveau_fence_priv base;
33 struct nouveau_bo *bo;
34};
35
36struct nvc0_fence_chan {
37 struct nouveau_fence_chan base;
38 struct nouveau_vma vma;
39};
40
41static int
42nvc0_fence_emit(struct nouveau_fence *fence)
43{
44 struct nouveau_channel *chan = fence->channel;
45 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
46 u64 addr = fctx->vma.offset + chan->id * 16;
47 int ret;
48
49 ret = RING_SPACE(chan, 5);
50 if (ret == 0) {
51 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
52 OUT_RING (chan, upper_32_bits(addr));
53 OUT_RING (chan, lower_32_bits(addr));
54 OUT_RING (chan, fence->sequence);
55 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
56 FIRE_RING (chan);
57 }
58
59 return ret;
60}
61
62static int
63nvc0_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
64{
65 struct nvc0_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE];
66 u64 addr = fctx->vma.offset + fence->channel->id * 16;
67 int ret;
68
69 ret = RING_SPACE(chan, 5);
70 if (ret == 0) {
71 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
72 OUT_RING (chan, upper_32_bits(addr));
73 OUT_RING (chan, lower_32_bits(addr));
74 OUT_RING (chan, fence->sequence);
75 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
76 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
77 FIRE_RING (chan);
78 }
79
80 return ret;
81}
82
83static u32
84nvc0_fence_read(struct nouveau_channel *chan)
85{
86 struct nvc0_fence_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_FENCE);
87 return nouveau_bo_rd32(priv->bo, chan->id * 16/4);
88}
89
90static void
91nvc0_fence_context_del(struct nouveau_channel *chan, int engine)
92{
93 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
94 struct nvc0_fence_chan *fctx = chan->engctx[engine];
95
96 nouveau_bo_vma_del(priv->bo, &fctx->vma);
97 nouveau_fence_context_del(&fctx->base);
98 chan->engctx[engine] = NULL;
99 kfree(fctx);
100}
101
102static int
103nvc0_fence_context_new(struct nouveau_channel *chan, int engine)
104{
105 struct nvc0_fence_priv *priv = nv_engine(chan->dev, engine);
106 struct nvc0_fence_chan *fctx;
107 int ret;
108
109 fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
110 if (!fctx)
111 return -ENOMEM;
112
113 nouveau_fence_context_new(&fctx->base);
114
115 ret = nouveau_bo_vma_add(priv->bo, chan->vm, &fctx->vma);
116 if (ret)
117 nvc0_fence_context_del(chan, engine);
118
119 nouveau_bo_wr32(priv->bo, chan->id * 16/4, 0x00000000);
120 return ret;
121}
122
123static int
124nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend)
125{
126 return 0;
127}
128
129static int
130nvc0_fence_init(struct drm_device *dev, int engine)
131{
132 return 0;
133}
134
135static void
136nvc0_fence_destroy(struct drm_device *dev, int engine)
137{
138 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 struct nvc0_fence_priv *priv = nv_engine(dev, engine);
140
141 nouveau_bo_unmap(priv->bo);
142 nouveau_bo_ref(NULL, &priv->bo);
143 dev_priv->eng[engine] = NULL;
144 kfree(priv);
145}
146
147int
148nvc0_fence_create(struct drm_device *dev)
149{
150 struct drm_nouveau_private *dev_priv = dev->dev_private;
151 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
152 struct nvc0_fence_priv *priv;
153 int ret;
154
155 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
156 if (!priv)
157 return -ENOMEM;
158
159 priv->base.engine.destroy = nvc0_fence_destroy;
160 priv->base.engine.init = nvc0_fence_init;
161 priv->base.engine.fini = nvc0_fence_fini;
162 priv->base.engine.context_new = nvc0_fence_context_new;
163 priv->base.engine.context_del = nvc0_fence_context_del;
164 priv->base.emit = nvc0_fence_emit;
165 priv->base.sync = nvc0_fence_sync;
166 priv->base.read = nvc0_fence_read;
167 dev_priv->eng[NVOBJ_ENGINE_FENCE] = &priv->base.engine;
168
169 ret = nouveau_bo_new(dev, 16 * pfifo->channels, 0, TTM_PL_FLAG_VRAM,
170 0, 0, NULL, &priv->bo);
171 if (ret == 0) {
172 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
173 if (ret == 0)
174 ret = nouveau_bo_map(priv->bo);
175 if (ret)
176 nouveau_bo_ref(NULL, &priv->bo);
177 }
178
179 if (ret)
180 nvc0_fence_destroy(dev, NVOBJ_ENGINE_FENCE);
181 return ret;
182}