aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c53
1 files changed, 33 insertions, 20 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0f417ac1b696..19620a6709f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
167 goto out; 167 goto out;
168 168
169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 169 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
170 /* drop reference from allocate - handle holds it now */
171 drm_gem_object_unreference_unlocked(nvbo->gem);
170out: 172out:
171 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
172
173 if (ret)
174 drm_gem_object_unreference_unlocked(nvbo->gem);
175 return ret; 173 return ret;
176} 174}
177 175
@@ -245,7 +243,7 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
245 list_del(&nvbo->entry); 243 list_del(&nvbo->entry);
246 nvbo->reserved_by = NULL; 244 nvbo->reserved_by = NULL;
247 ttm_bo_unreserve(&nvbo->bo); 245 ttm_bo_unreserve(&nvbo->bo);
248 drm_gem_object_unreference(nvbo->gem); 246 drm_gem_object_unreference_unlocked(nvbo->gem);
249 } 247 }
250} 248}
251 249
@@ -300,7 +298,7 @@ retry:
300 validate_fini(op, NULL); 298 validate_fini(op, NULL);
301 if (ret == -EAGAIN) 299 if (ret == -EAGAIN)
302 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 300 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
303 drm_gem_object_unreference(gem); 301 drm_gem_object_unreference_unlocked(gem);
304 if (ret) { 302 if (ret) {
305 NV_ERROR(dev, "fail reserve\n"); 303 NV_ERROR(dev, "fail reserve\n");
306 return ret; 304 return ret;
@@ -337,7 +335,9 @@ retry:
337 return -EINVAL; 335 return -EINVAL;
338 } 336 }
339 337
338 mutex_unlock(&drm_global_mutex);
340 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 339 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
340 mutex_lock(&drm_global_mutex);
341 if (ret) { 341 if (ret) {
342 NV_ERROR(dev, "fail wait_cpu\n"); 342 NV_ERROR(dev, "fail wait_cpu\n");
343 return ret; 343 return ret;
@@ -361,16 +361,11 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
361 361
362 list_for_each_entry(nvbo, list, entry) { 362 list_for_each_entry(nvbo, list, entry) {
363 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 363 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
364 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
365 364
366 if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { 365 ret = nouveau_bo_sync_gpu(nvbo, chan);
367 spin_lock(&nvbo->bo.lock); 366 if (unlikely(ret)) {
368 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 367 NV_ERROR(dev, "fail pre-validate sync\n");
369 spin_unlock(&nvbo->bo.lock); 368 return ret;
370 if (unlikely(ret)) {
371 NV_ERROR(dev, "fail wait other chan\n");
372 return ret;
373 }
374 } 369 }
375 370
376 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 371 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
@@ -381,7 +376,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
381 return ret; 376 return ret;
382 } 377 }
383 378
384 nvbo->channel = chan; 379 nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
385 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 380 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
386 false, false, false); 381 false, false, false);
387 nvbo->channel = NULL; 382 nvbo->channel = NULL;
@@ -390,6 +385,12 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
390 return ret; 385 return ret;
391 } 386 }
392 387
388 ret = nouveau_bo_sync_gpu(nvbo, chan);
389 if (unlikely(ret)) {
390 NV_ERROR(dev, "fail post-validate sync\n");
391 return ret;
392 }
393
393 if (nvbo->bo.offset == b->presumed.offset && 394 if (nvbo->bo.offset == b->presumed.offset &&
394 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 395 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
395 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 396 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
@@ -613,7 +614,20 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
613 return PTR_ERR(bo); 614 return PTR_ERR(bo);
614 } 615 }
615 616
616 mutex_lock(&dev->struct_mutex); 617 /* Mark push buffers as being used on PFIFO, the validation code
618 * will then make sure that if the pushbuf bo moves, that they
619 * happen on the kernel channel, which will in turn cause a sync
620 * to happen before we try and submit the push buffer.
621 */
622 for (i = 0; i < req->nr_push; i++) {
623 if (push[i].bo_index >= req->nr_buffers) {
624 NV_ERROR(dev, "push %d buffer not in list\n", i);
625 ret = -EINVAL;
626 goto out;
627 }
628
629 bo[push[i].bo_index].read_domains |= (1 << 31);
630 }
617 631
618 /* Validate buffer list */ 632 /* Validate buffer list */
619 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, 633 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
@@ -647,7 +661,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
647 push[i].length); 661 push[i].length);
648 } 662 }
649 } else 663 } else
650 if (dev_priv->card_type >= NV_20) { 664 if (dev_priv->chipset >= 0x25) {
651 ret = RING_SPACE(chan, req->nr_push * 2); 665 ret = RING_SPACE(chan, req->nr_push * 2);
652 if (ret) { 666 if (ret) {
653 NV_ERROR(dev, "cal_space: %d\n", ret); 667 NV_ERROR(dev, "cal_space: %d\n", ret);
@@ -713,7 +727,6 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
713out: 727out:
714 validate_fini(&op, fence); 728 validate_fini(&op, fence);
715 nouveau_fence_unref((void**)&fence); 729 nouveau_fence_unref((void**)&fence);
716 mutex_unlock(&dev->struct_mutex);
717 kfree(bo); 730 kfree(bo);
718 kfree(push); 731 kfree(push);
719 732
@@ -722,7 +735,7 @@ out_next:
722 req->suffix0 = 0x00000000; 735 req->suffix0 = 0x00000000;
723 req->suffix1 = 0x00000000; 736 req->suffix1 = 0x00000000;
724 } else 737 } else
725 if (dev_priv->card_type >= NV_20) { 738 if (dev_priv->chipset >= 0x25) {
726 req->suffix0 = 0x00020000; 739 req->suffix0 = 0x00020000;
727 req->suffix1 = 0x00000000; 740 req->suffix1 = 0x00000000;
728 } else { 741 } else {