aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_fence.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c195
1 files changed, 194 insertions, 1 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index fbb2c3b2623..f42675cc9d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -28,9 +28,11 @@
28#include "drm.h" 28#include "drm.h"
29 29
30#include "nouveau_drv.h" 30#include "nouveau_drv.h"
31#include "nouveau_ramht.h"
31#include "nouveau_dma.h" 32#include "nouveau_dma.h"
32 33
33#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10) 34#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
35#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
34 36
35struct nouveau_fence { 37struct nouveau_fence {
36 struct nouveau_channel *channel; 38 struct nouveau_channel *channel;
@@ -44,6 +46,12 @@ struct nouveau_fence {
44 void *priv; 46 void *priv;
45}; 47};
46 48
49struct nouveau_semaphore {
50 struct kref ref;
51 struct drm_device *dev;
52 struct drm_mm_node *mem;
53};
54
47static inline struct nouveau_fence * 55static inline struct nouveau_fence *
48nouveau_fence(void *sync_obj) 56nouveau_fence(void *sync_obj)
49{ 57{
@@ -236,17 +244,128 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
236 return ret; 244 return ret;
237} 245}
238 246
247static struct nouveau_semaphore *
248alloc_semaphore(struct drm_device *dev)
249{
250 struct drm_nouveau_private *dev_priv = dev->dev_private;
251 struct nouveau_semaphore *sema;
252
253 if (!USE_SEMA(dev))
254 return NULL;
255
256 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
257 if (!sema)
258 goto fail;
259
260 spin_lock(&dev_priv->fence.lock);
261 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
262 if (sema->mem)
263 sema->mem = drm_mm_get_block(sema->mem, 4, 0);
264 spin_unlock(&dev_priv->fence.lock);
265
266 if (!sema->mem)
267 goto fail;
268
269 kref_init(&sema->ref);
270 sema->dev = dev;
271 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
272
273 return sema;
274fail:
275 kfree(sema);
276 return NULL;
277}
278
279static void
280free_semaphore(struct kref *ref)
281{
282 struct nouveau_semaphore *sema =
283 container_of(ref, struct nouveau_semaphore, ref);
284 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
285
286 spin_lock(&dev_priv->fence.lock);
287 drm_mm_put_block(sema->mem);
288 spin_unlock(&dev_priv->fence.lock);
289
290 kfree(sema);
291}
292
293static void
294semaphore_work(void *priv, bool signalled)
295{
296 struct nouveau_semaphore *sema = priv;
297 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
298
299 if (unlikely(!signalled))
300 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
301
302 kref_put(&sema->ref, free_semaphore);
303}
304
305static int
306emit_semaphore(struct nouveau_channel *chan, int method,
307 struct nouveau_semaphore *sema)
308{
309 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
310 struct nouveau_fence *fence;
311 int ret;
312
313 ret = RING_SPACE(chan, dev_priv->card_type >= NV_50 ? 6 : 4);
314 if (ret)
315 return ret;
316
317 if (dev_priv->card_type >= NV_50) {
318 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
319 OUT_RING(chan, NvSema);
320 }
321 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
322 OUT_RING(chan, sema->mem->start);
323 BEGIN_RING(chan, NvSubSw, method, 1);
324 OUT_RING(chan, 1);
325
326 /* Delay semaphore destruction until its work is done */
327 ret = nouveau_fence_new(chan, &fence, true);
328 if (ret)
329 return ret;
330
331 kref_get(&sema->ref);
332 nouveau_fence_work(fence, semaphore_work, sema);
333 nouveau_fence_unref((void *)&fence);
334
335 return 0;
336}
337
239int 338int
240nouveau_fence_sync(struct nouveau_fence *fence, 339nouveau_fence_sync(struct nouveau_fence *fence,
241 struct nouveau_channel *wchan) 340 struct nouveau_channel *wchan)
242{ 341{
243 struct nouveau_channel *chan = nouveau_fence_channel(fence); 342 struct nouveau_channel *chan = nouveau_fence_channel(fence);
343 struct drm_device *dev = wchan->dev;
344 struct nouveau_semaphore *sema;
345 int ret;
244 346
245 if (likely(!fence || chan == wchan || 347 if (likely(!fence || chan == wchan ||
246 nouveau_fence_signalled(fence, NULL))) 348 nouveau_fence_signalled(fence, NULL)))
247 return 0; 349 return 0;
248 350
249 return nouveau_fence_wait(fence, NULL, false, false); 351 sema = alloc_semaphore(dev);
352 if (!sema) {
353 /* Early card or broken userspace, fall back to
354 * software sync. */
355 return nouveau_fence_wait(fence, NULL, false, false);
356 }
357
358 /* Signal the semaphore from chan */
359 ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
360 if (ret)
361 goto out;
362
363 /* Make wchan wait until it gets signalled */
364 ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
365
366out:
367 kref_put(&sema->ref, free_semaphore);
368 return ret;
250} 369}
251 370
252int 371int
@@ -258,6 +377,8 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
258int 377int
259nouveau_fence_channel_init(struct nouveau_channel *chan) 378nouveau_fence_channel_init(struct nouveau_channel *chan)
260{ 379{
380 struct drm_device *dev = chan->dev;
381 struct drm_nouveau_private *dev_priv = dev->dev_private;
261 struct nouveau_gpuobj *obj = NULL; 382 struct nouveau_gpuobj *obj = NULL;
262 int ret; 383 int ret;
263 384
@@ -277,6 +398,30 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
277 BEGIN_RING(chan, NvSubSw, 0, 1); 398 BEGIN_RING(chan, NvSubSw, 0, 1);
278 OUT_RING(chan, NvSw); 399 OUT_RING(chan, NvSw);
279 400
401 /* Create a DMA object for the shared cross-channel sync area. */
402 if (USE_SEMA(dev)) {
403 struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
404
405 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
406 mem->start << PAGE_SHIFT,
407 mem->size << PAGE_SHIFT,
408 NV_DMA_ACCESS_RW,
409 NV_DMA_TARGET_VIDMEM, &obj);
410 if (ret)
411 return ret;
412
413 ret = nouveau_ramht_insert(chan, NvSema, obj);
414 nouveau_gpuobj_ref(NULL, &obj);
415 if (ret)
416 return ret;
417
418 ret = RING_SPACE(chan, 2);
419 if (ret)
420 return ret;
421 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
422 OUT_RING(chan, NvSema);
423 }
424
280 FIRE_RING(chan); 425 FIRE_RING(chan);
281 426
282 INIT_LIST_HEAD(&chan->fence.pending); 427 INIT_LIST_HEAD(&chan->fence.pending);
@@ -302,3 +447,51 @@ nouveau_fence_channel_fini(struct nouveau_channel *chan)
302 } 447 }
303} 448}
304 449
450int
451nouveau_fence_init(struct drm_device *dev)
452{
453 struct drm_nouveau_private *dev_priv = dev->dev_private;
454 int ret;
455
456 /* Create a shared VRAM heap for cross-channel sync. */
457 if (USE_SEMA(dev)) {
458 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
459 0, 0, false, true, &dev_priv->fence.bo);
460 if (ret)
461 return ret;
462
463 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
464 if (ret)
465 goto fail;
466
467 ret = nouveau_bo_map(dev_priv->fence.bo);
468 if (ret)
469 goto fail;
470
471 ret = drm_mm_init(&dev_priv->fence.heap, 0,
472 dev_priv->fence.bo->bo.mem.size);
473 if (ret)
474 goto fail;
475
476 spin_lock_init(&dev_priv->fence.lock);
477 }
478
479 return 0;
480fail:
481 nouveau_bo_unmap(dev_priv->fence.bo);
482 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
483 return ret;
484}
485
486void
487nouveau_fence_fini(struct drm_device *dev)
488{
489 struct drm_nouveau_private *dev_priv = dev->dev_private;
490
491 if (USE_SEMA(dev)) {
492 drm_mm_takedown(&dev_priv->fence.heap);
493 nouveau_bo_unmap(dev_priv->fence.bo);
494 nouveau_bo_unpin(dev_priv->fence.bo);
495 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
496 }
497}