diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_fence.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_fence.c | 474 |
1 files changed, 62 insertions, 412 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 2c10d54fc493..4ba41a45114f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -36,85 +36,71 @@ | |||
36 | #include "nouveau_software.h" | 36 | #include "nouveau_software.h" |
37 | #include "nouveau_dma.h" | 37 | #include "nouveau_dma.h" |
38 | 38 | ||
39 | #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) | 39 | void |
40 | #define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17) | 40 | nouveau_fence_context_del(struct nouveau_fence_chan *fctx) |
41 | { | ||
42 | struct nouveau_fence *fence, *fnext; | ||
43 | spin_lock(&fctx->lock); | ||
44 | list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { | ||
45 | if (fence->work) | ||
46 | fence->work(fence->priv, false); | ||
47 | fence->channel = NULL; | ||
48 | list_del(&fence->head); | ||
49 | nouveau_fence_unref(&fence); | ||
50 | } | ||
51 | spin_unlock(&fctx->lock); | ||
52 | } | ||
53 | |||
54 | void | ||
55 | nouveau_fence_context_new(struct nouveau_fence_chan *fctx) | ||
56 | { | ||
57 | INIT_LIST_HEAD(&fctx->pending); | ||
58 | spin_lock_init(&fctx->lock); | ||
59 | } | ||
41 | 60 | ||
42 | void | 61 | void |
43 | nouveau_fence_update(struct nouveau_channel *chan) | 62 | nouveau_fence_update(struct nouveau_channel *chan) |
44 | { | 63 | { |
45 | struct drm_device *dev = chan->dev; | 64 | struct drm_device *dev = chan->dev; |
46 | struct nouveau_fence *tmp, *fence; | 65 | struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); |
47 | uint32_t sequence; | 66 | struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; |
48 | 67 | struct nouveau_fence *fence, *fnext; | |
49 | spin_lock(&chan->fence.lock); | ||
50 | |||
51 | /* Fetch the last sequence if the channel is still up and running */ | ||
52 | if (likely(!list_empty(&chan->fence.pending))) { | ||
53 | if (USE_REFCNT(dev)) | ||
54 | sequence = nvchan_rd32(chan, 0x48); | ||
55 | else | ||
56 | sequence = atomic_read(&chan->fence.last_sequence_irq); | ||
57 | |||
58 | if (chan->fence.sequence_ack == sequence) | ||
59 | goto out; | ||
60 | chan->fence.sequence_ack = sequence; | ||
61 | } | ||
62 | 68 | ||
63 | list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) { | 69 | spin_lock(&fctx->lock); |
64 | if (fence->sequence > chan->fence.sequence_ack) | 70 | list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { |
71 | if (priv->read(chan) < fence->sequence) | ||
65 | break; | 72 | break; |
66 | 73 | ||
67 | fence->channel = NULL; | ||
68 | list_del(&fence->head); | ||
69 | if (fence->work) | 74 | if (fence->work) |
70 | fence->work(fence->priv, true); | 75 | fence->work(fence->priv, true); |
71 | 76 | fence->channel = NULL; | |
77 | list_del(&fence->head); | ||
72 | nouveau_fence_unref(&fence); | 78 | nouveau_fence_unref(&fence); |
73 | } | 79 | } |
74 | 80 | spin_unlock(&fctx->lock); | |
75 | out: | ||
76 | spin_unlock(&chan->fence.lock); | ||
77 | } | 81 | } |
78 | 82 | ||
79 | int | 83 | int |
80 | nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) | 84 | nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) |
81 | { | 85 | { |
82 | struct drm_device *dev = chan->dev; | 86 | struct drm_device *dev = chan->dev; |
83 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 87 | struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); |
88 | struct nouveau_fence_chan *fctx = chan->engctx[NVOBJ_ENGINE_FENCE]; | ||
84 | int ret; | 89 | int ret; |
85 | 90 | ||
86 | ret = RING_SPACE(chan, 2); | 91 | fence->channel = chan; |
87 | if (ret) | 92 | fence->timeout = jiffies + (3 * DRM_HZ); |
88 | return ret; | 93 | fence->sequence = ++fctx->sequence; |
89 | |||
90 | if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) { | ||
91 | nouveau_fence_update(chan); | ||
92 | |||
93 | BUG_ON(chan->fence.sequence == | ||
94 | chan->fence.sequence_ack - 1); | ||
95 | } | ||
96 | |||
97 | fence->sequence = ++chan->fence.sequence; | ||
98 | fence->channel = chan; | ||
99 | |||
100 | kref_get(&fence->kref); | ||
101 | spin_lock(&chan->fence.lock); | ||
102 | list_add_tail(&fence->head, &chan->fence.pending); | ||
103 | spin_unlock(&chan->fence.lock); | ||
104 | 94 | ||
105 | if (USE_REFCNT(dev)) { | 95 | ret = priv->emit(fence); |
106 | if (dev_priv->card_type < NV_C0) | 96 | if (!ret) { |
107 | BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1); | 97 | kref_get(&fence->kref); |
108 | else | 98 | spin_lock(&fctx->lock); |
109 | BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1); | 99 | list_add_tail(&fence->head, &fctx->pending); |
110 | } else { | 100 | spin_unlock(&fctx->lock); |
111 | BEGIN_NV04(chan, NvSubSw, 0x0150, 1); | ||
112 | } | 101 | } |
113 | OUT_RING (chan, fence->sequence); | ||
114 | FIRE_RING(chan); | ||
115 | fence->timeout = jiffies + 3 * DRM_HZ; | ||
116 | 102 | ||
117 | return 0; | 103 | return ret; |
118 | } | 104 | } |
119 | 105 | ||
120 | bool | 106 | bool |
@@ -158,6 +144,23 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) | |||
158 | return ret; | 144 | return ret; |
159 | } | 145 | } |
160 | 146 | ||
147 | int | ||
148 | nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) | ||
149 | { | ||
150 | struct nouveau_channel *prev = fence ? fence->channel : NULL; | ||
151 | struct drm_device *dev = chan->dev; | ||
152 | struct nouveau_fence_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FENCE); | ||
153 | int ret = 0; | ||
154 | |||
155 | if (unlikely(prev && prev != chan && !nouveau_fence_done(fence))) { | ||
156 | ret = priv->sync(fence, chan); | ||
157 | if (unlikely(ret)) | ||
158 | ret = nouveau_fence_wait(fence, true, false); | ||
159 | } | ||
160 | |||
161 | return ret; | ||
162 | } | ||
163 | |||
161 | static void | 164 | static void |
162 | nouveau_fence_del(struct kref *kref) | 165 | nouveau_fence_del(struct kref *kref) |
163 | { | 166 | { |
@@ -186,6 +189,9 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence) | |||
186 | struct nouveau_fence *fence; | 189 | struct nouveau_fence *fence; |
187 | int ret = 0; | 190 | int ret = 0; |
188 | 191 | ||
192 | if (unlikely(!chan->engctx[NVOBJ_ENGINE_FENCE])) | ||
193 | return -ENODEV; | ||
194 | |||
189 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); | 195 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); |
190 | if (!fence) | 196 | if (!fence) |
191 | return -ENOMEM; | 197 | return -ENOMEM; |
@@ -200,359 +206,3 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence) | |||
200 | *pfence = fence; | 206 | *pfence = fence; |
201 | return ret; | 207 | return ret; |
202 | } | 208 | } |
203 | |||
204 | struct nouveau_semaphore { | ||
205 | struct kref ref; | ||
206 | struct drm_device *dev; | ||
207 | struct drm_mm_node *mem; | ||
208 | }; | ||
209 | |||
210 | void | ||
211 | nouveau_fence_work(struct nouveau_fence *fence, | ||
212 | void (*work)(void *priv, bool signalled), | ||
213 | void *priv) | ||
214 | { | ||
215 | if (!fence->channel) { | ||
216 | work(priv, true); | ||
217 | } else { | ||
218 | fence->work = work; | ||
219 | fence->priv = priv; | ||
220 | } | ||
221 | } | ||
222 | |||
223 | static struct nouveau_semaphore * | ||
224 | semaphore_alloc(struct drm_device *dev) | ||
225 | { | ||
226 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
227 | struct nouveau_semaphore *sema; | ||
228 | int size = (dev_priv->chipset < 0x84) ? 4 : 16; | ||
229 | int ret, i; | ||
230 | |||
231 | if (!USE_SEMA(dev)) | ||
232 | return NULL; | ||
233 | |||
234 | sema = kmalloc(sizeof(*sema), GFP_KERNEL); | ||
235 | if (!sema) | ||
236 | goto fail; | ||
237 | |||
238 | ret = drm_mm_pre_get(&dev_priv->fence.heap); | ||
239 | if (ret) | ||
240 | goto fail; | ||
241 | |||
242 | spin_lock(&dev_priv->fence.lock); | ||
243 | sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0); | ||
244 | if (sema->mem) | ||
245 | sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0); | ||
246 | spin_unlock(&dev_priv->fence.lock); | ||
247 | |||
248 | if (!sema->mem) | ||
249 | goto fail; | ||
250 | |||
251 | kref_init(&sema->ref); | ||
252 | sema->dev = dev; | ||
253 | for (i = sema->mem->start; i < sema->mem->start + size; i += 4) | ||
254 | nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0); | ||
255 | |||
256 | return sema; | ||
257 | fail: | ||
258 | kfree(sema); | ||
259 | return NULL; | ||
260 | } | ||
261 | |||
262 | static void | ||
263 | semaphore_free(struct kref *ref) | ||
264 | { | ||
265 | struct nouveau_semaphore *sema = | ||
266 | container_of(ref, struct nouveau_semaphore, ref); | ||
267 | struct drm_nouveau_private *dev_priv = sema->dev->dev_private; | ||
268 | |||
269 | spin_lock(&dev_priv->fence.lock); | ||
270 | drm_mm_put_block(sema->mem); | ||
271 | spin_unlock(&dev_priv->fence.lock); | ||
272 | |||
273 | kfree(sema); | ||
274 | } | ||
275 | |||
276 | static void | ||
277 | semaphore_work(void *priv, bool signalled) | ||
278 | { | ||
279 | struct nouveau_semaphore *sema = priv; | ||
280 | struct drm_nouveau_private *dev_priv = sema->dev->dev_private; | ||
281 | |||
282 | if (unlikely(!signalled)) | ||
283 | nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1); | ||
284 | |||
285 | kref_put(&sema->ref, semaphore_free); | ||
286 | } | ||
287 | |||
288 | static int | ||
289 | semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema) | ||
290 | { | ||
291 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
292 | struct nouveau_fence *fence = NULL; | ||
293 | u64 offset = chan->fence.vma.offset + sema->mem->start; | ||
294 | int ret; | ||
295 | |||
296 | if (dev_priv->chipset < 0x84) { | ||
297 | ret = RING_SPACE(chan, 4); | ||
298 | if (ret) | ||
299 | return ret; | ||
300 | |||
301 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3); | ||
302 | OUT_RING (chan, NvSema); | ||
303 | OUT_RING (chan, offset); | ||
304 | OUT_RING (chan, 1); | ||
305 | } else | ||
306 | if (dev_priv->chipset < 0xc0) { | ||
307 | ret = RING_SPACE(chan, 7); | ||
308 | if (ret) | ||
309 | return ret; | ||
310 | |||
311 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | ||
312 | OUT_RING (chan, chan->vram_handle); | ||
313 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
314 | OUT_RING (chan, upper_32_bits(offset)); | ||
315 | OUT_RING (chan, lower_32_bits(offset)); | ||
316 | OUT_RING (chan, 1); | ||
317 | OUT_RING (chan, 1); /* ACQUIRE_EQ */ | ||
318 | } else { | ||
319 | ret = RING_SPACE(chan, 5); | ||
320 | if (ret) | ||
321 | return ret; | ||
322 | |||
323 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
324 | OUT_RING (chan, upper_32_bits(offset)); | ||
325 | OUT_RING (chan, lower_32_bits(offset)); | ||
326 | OUT_RING (chan, 1); | ||
327 | OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */ | ||
328 | } | ||
329 | |||
330 | /* Delay semaphore destruction until its work is done */ | ||
331 | ret = nouveau_fence_new(chan, &fence); | ||
332 | if (ret) | ||
333 | return ret; | ||
334 | |||
335 | kref_get(&sema->ref); | ||
336 | nouveau_fence_work(fence, semaphore_work, sema); | ||
337 | nouveau_fence_unref(&fence); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | static int | ||
342 | semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema) | ||
343 | { | ||
344 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
345 | struct nouveau_fence *fence = NULL; | ||
346 | u64 offset = chan->fence.vma.offset + sema->mem->start; | ||
347 | int ret; | ||
348 | |||
349 | if (dev_priv->chipset < 0x84) { | ||
350 | ret = RING_SPACE(chan, 5); | ||
351 | if (ret) | ||
352 | return ret; | ||
353 | |||
354 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); | ||
355 | OUT_RING (chan, NvSema); | ||
356 | OUT_RING (chan, offset); | ||
357 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); | ||
358 | OUT_RING (chan, 1); | ||
359 | } else | ||
360 | if (dev_priv->chipset < 0xc0) { | ||
361 | ret = RING_SPACE(chan, 7); | ||
362 | if (ret) | ||
363 | return ret; | ||
364 | |||
365 | BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); | ||
366 | OUT_RING (chan, chan->vram_handle); | ||
367 | BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
368 | OUT_RING (chan, upper_32_bits(offset)); | ||
369 | OUT_RING (chan, lower_32_bits(offset)); | ||
370 | OUT_RING (chan, 1); | ||
371 | OUT_RING (chan, 2); /* RELEASE */ | ||
372 | } else { | ||
373 | ret = RING_SPACE(chan, 5); | ||
374 | if (ret) | ||
375 | return ret; | ||
376 | |||
377 | BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); | ||
378 | OUT_RING (chan, upper_32_bits(offset)); | ||
379 | OUT_RING (chan, lower_32_bits(offset)); | ||
380 | OUT_RING (chan, 1); | ||
381 | OUT_RING (chan, 0x1002); /* RELEASE */ | ||
382 | } | ||
383 | |||
384 | /* Delay semaphore destruction until its work is done */ | ||
385 | ret = nouveau_fence_new(chan, &fence); | ||
386 | if (ret) | ||
387 | return ret; | ||
388 | |||
389 | kref_get(&sema->ref); | ||
390 | nouveau_fence_work(fence, semaphore_work, sema); | ||
391 | nouveau_fence_unref(&fence); | ||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | int | ||
396 | nouveau_fence_sync(struct nouveau_fence *fence, | ||
397 | struct nouveau_channel *wchan) | ||
398 | { | ||
399 | struct nouveau_channel *chan; | ||
400 | struct drm_device *dev = wchan->dev; | ||
401 | struct nouveau_semaphore *sema; | ||
402 | int ret = 0; | ||
403 | |||
404 | chan = fence ? nouveau_channel_get_unlocked(fence->channel) : NULL; | ||
405 | if (likely(!chan || chan == wchan || nouveau_fence_done(fence))) | ||
406 | goto out; | ||
407 | |||
408 | sema = semaphore_alloc(dev); | ||
409 | if (!sema) { | ||
410 | /* Early card or broken userspace, fall back to | ||
411 | * software sync. */ | ||
412 | ret = nouveau_fence_wait(fence, true, false); | ||
413 | goto out; | ||
414 | } | ||
415 | |||
416 | /* try to take chan's mutex, if we can't take it right away | ||
417 | * we have to fallback to software sync to prevent locking | ||
418 | * order issues | ||
419 | */ | ||
420 | if (!mutex_trylock(&chan->mutex)) { | ||
421 | ret = nouveau_fence_wait(fence, true, false); | ||
422 | goto out_unref; | ||
423 | } | ||
424 | |||
425 | /* Make wchan wait until it gets signalled */ | ||
426 | ret = semaphore_acquire(wchan, sema); | ||
427 | if (ret) | ||
428 | goto out_unlock; | ||
429 | |||
430 | /* Signal the semaphore from chan */ | ||
431 | ret = semaphore_release(chan, sema); | ||
432 | |||
433 | out_unlock: | ||
434 | mutex_unlock(&chan->mutex); | ||
435 | out_unref: | ||
436 | kref_put(&sema->ref, semaphore_free); | ||
437 | out: | ||
438 | if (chan) | ||
439 | nouveau_channel_put_unlocked(&chan); | ||
440 | return ret; | ||
441 | } | ||
442 | |||
443 | int | ||
444 | nouveau_fence_channel_init(struct nouveau_channel *chan) | ||
445 | { | ||
446 | struct drm_device *dev = chan->dev; | ||
447 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
448 | struct nouveau_gpuobj *obj = NULL; | ||
449 | int ret; | ||
450 | |||
451 | if (dev_priv->card_type < NV_C0) { | ||
452 | ret = RING_SPACE(chan, 2); | ||
453 | if (ret) | ||
454 | return ret; | ||
455 | |||
456 | BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1); | ||
457 | OUT_RING (chan, NvSw); | ||
458 | FIRE_RING (chan); | ||
459 | } | ||
460 | |||
461 | /* Setup area of memory shared between all channels for x-chan sync */ | ||
462 | if (USE_SEMA(dev) && dev_priv->chipset < 0x84) { | ||
463 | struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; | ||
464 | |||
465 | ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, | ||
466 | mem->start << PAGE_SHIFT, | ||
467 | mem->size, NV_MEM_ACCESS_RW, | ||
468 | NV_MEM_TARGET_VRAM, &obj); | ||
469 | if (ret) | ||
470 | return ret; | ||
471 | |||
472 | ret = nouveau_ramht_insert(chan, NvSema, obj); | ||
473 | nouveau_gpuobj_ref(NULL, &obj); | ||
474 | if (ret) | ||
475 | return ret; | ||
476 | } else | ||
477 | if (USE_SEMA(dev)) { | ||
478 | /* map fence bo into channel's vm */ | ||
479 | ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm, | ||
480 | &chan->fence.vma); | ||
481 | if (ret) | ||
482 | return ret; | ||
483 | } | ||
484 | |||
485 | atomic_set(&chan->fence.last_sequence_irq, 0); | ||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | void | ||
490 | nouveau_fence_channel_fini(struct nouveau_channel *chan) | ||
491 | { | ||
492 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
493 | struct nouveau_fence *tmp, *fence; | ||
494 | |||
495 | spin_lock(&chan->fence.lock); | ||
496 | list_for_each_entry_safe(fence, tmp, &chan->fence.pending, head) { | ||
497 | fence->channel = NULL; | ||
498 | list_del(&fence->head); | ||
499 | |||
500 | if (unlikely(fence->work)) | ||
501 | fence->work(fence->priv, false); | ||
502 | |||
503 | kref_put(&fence->kref, nouveau_fence_del); | ||
504 | } | ||
505 | spin_unlock(&chan->fence.lock); | ||
506 | |||
507 | nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma); | ||
508 | } | ||
509 | |||
510 | int | ||
511 | nouveau_fence_init(struct drm_device *dev) | ||
512 | { | ||
513 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
514 | int size = (dev_priv->chipset < 0x84) ? 4096 : 16384; | ||
515 | int ret; | ||
516 | |||
517 | /* Create a shared VRAM heap for cross-channel sync. */ | ||
518 | if (USE_SEMA(dev)) { | ||
519 | ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM, | ||
520 | 0, 0, NULL, &dev_priv->fence.bo); | ||
521 | if (ret) | ||
522 | return ret; | ||
523 | |||
524 | ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM); | ||
525 | if (ret) | ||
526 | goto fail; | ||
527 | |||
528 | ret = nouveau_bo_map(dev_priv->fence.bo); | ||
529 | if (ret) | ||
530 | goto fail; | ||
531 | |||
532 | ret = drm_mm_init(&dev_priv->fence.heap, 0, | ||
533 | dev_priv->fence.bo->bo.mem.size); | ||
534 | if (ret) | ||
535 | goto fail; | ||
536 | |||
537 | spin_lock_init(&dev_priv->fence.lock); | ||
538 | } | ||
539 | |||
540 | return 0; | ||
541 | fail: | ||
542 | nouveau_bo_unmap(dev_priv->fence.bo); | ||
543 | nouveau_bo_ref(NULL, &dev_priv->fence.bo); | ||
544 | return ret; | ||
545 | } | ||
546 | |||
547 | void | ||
548 | nouveau_fence_fini(struct drm_device *dev) | ||
549 | { | ||
550 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
551 | |||
552 | if (USE_SEMA(dev)) { | ||
553 | drm_mm_takedown(&dev_priv->fence.heap); | ||
554 | nouveau_bo_unmap(dev_priv->fence.bo); | ||
555 | nouveau_bo_unpin(dev_priv->fence.bo); | ||
556 | nouveau_bo_ref(NULL, &dev_priv->fence.bo); | ||
557 | } | ||
558 | } | ||