aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_gem.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2013-02-20 02:46:25 -0500
committerDave Airlie <airlied@redhat.com>2013-02-20 02:54:13 -0500
commit1f3a574a4bfe86ebf7d51fac37e0668397372fd8 (patch)
tree86308b3a63ea03151eff6e6b5c4ea96e2f509993 /drivers/gpu/drm/nouveau/nouveau_gem.c
parentb81e059ec5a7128622ab5d74d78e9b4f361b54ae (diff)
parenta91ed42de25e7e81159c0dd59faf8cac9dfa1d32 (diff)
Merge branch 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6 into drm-next
Nothing terribly exciting in here probably: - reworked thermal stuff from mupuf/I, has a chance of possibly working well enough when we get to being able to reclock.. - driver will report mmio access faults on chipsets where it's supported - will now sleep waiting on fences on nv84+ rather than polling - some cleanup of the internal fencing, looking towards sli/dmabuf sync - initial support for anx9805 dp/tmds encoder - nv50+ display fixes related to the above, and also might fix a few other issues - nicer error reporting (will log process names with channel errors) - various other random fixes * 'drm-nouveau-next' of git://anongit.freedesktop.org/git/nouveau/linux-2.6: (87 commits) nouveau: ACPI support depends on X86 and X86_PLATFORM_DEVICES drm/nouveau/i2c: add support for ddc/aux, and dp link training on anx9805 drm/nv50: initial kms support for off-chip TMDS/DP encoders drm/nv50-/disp: initial supervisor support for off-chip encoders drm/nv50-/disp: initial work towards supporting external encoders drm/nv50-/kms: remove unnecessary wait-for-completion points drm/nv50-/disp: move DP link training to core and train from supervisor drm/nv50-/disp: handle supervisor tasks from workqueue drm/nouveau/i2c: create proper chipset-specific class implementations drm/nv50-/disp: 0x0000 is a valid udisp config value drm/nv50/devinit: reverse the logic for running encoder init scripts drm/nouveau/bios: store a type/mask hash in parsed dcb data drm/nouveau/i2c: extend type to 16-bits, add lookup-by-type function drm/nouveau/i2c: aux channels not necessarily on nvio drm/nouveau/i2c: fix a bit of a thinko in nv_wri2cr helper functions drm/nouveau/bios: parse external transmitter type if off-chip drm/nouveau: store i2c port pointer directly in nouveau_encoder drm/nouveau/i2c: handle i2c/aux mux outside of port lookup function drm/nv50/graph: avoid touching 400724, it doesn't exist drm/nouveau: Fix DPMS 1 on G4 Snowball, from snow white to coal black. ...
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c81
1 files changed, 42 insertions, 39 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index d98bee012cab..b4b4d0c1f4af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -203,6 +203,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
203 struct drm_file *file_priv) 203 struct drm_file *file_priv)
204{ 204{
205 struct nouveau_drm *drm = nouveau_drm(dev); 205 struct nouveau_drm *drm = nouveau_drm(dev);
206 struct nouveau_cli *cli = nouveau_cli(file_priv);
206 struct nouveau_fb *pfb = nouveau_fb(drm->device); 207 struct nouveau_fb *pfb = nouveau_fb(drm->device);
207 struct drm_nouveau_gem_new *req = data; 208 struct drm_nouveau_gem_new *req = data;
208 struct nouveau_bo *nvbo = NULL; 209 struct nouveau_bo *nvbo = NULL;
@@ -211,7 +212,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
211 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; 212 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
212 213
213 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { 214 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
214 NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags); 215 NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
215 return -EINVAL; 216 return -EINVAL;
216 } 217 }
217 218
@@ -313,6 +314,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
313 struct drm_nouveau_gem_pushbuf_bo *pbbo, 314 struct drm_nouveau_gem_pushbuf_bo *pbbo,
314 int nr_buffers, struct validate_op *op) 315 int nr_buffers, struct validate_op *op)
315{ 316{
317 struct nouveau_cli *cli = nouveau_cli(file_priv);
316 struct drm_device *dev = chan->drm->dev; 318 struct drm_device *dev = chan->drm->dev;
317 struct nouveau_drm *drm = nouveau_drm(dev); 319 struct nouveau_drm *drm = nouveau_drm(dev);
318 uint32_t sequence; 320 uint32_t sequence;
@@ -323,7 +325,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
323 sequence = atomic_add_return(1, &drm->ttm.validate_sequence); 325 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
324retry: 326retry:
325 if (++trycnt > 100000) { 327 if (++trycnt > 100000) {
326 NV_ERROR(drm, "%s failed and gave up.\n", __func__); 328 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
327 return -EINVAL; 329 return -EINVAL;
328 } 330 }
329 331
@@ -334,7 +336,7 @@ retry:
334 336
335 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 337 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
336 if (!gem) { 338 if (!gem) {
337 NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle); 339 NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
338 validate_fini(op, NULL); 340 validate_fini(op, NULL);
339 return -ENOENT; 341 return -ENOENT;
340 } 342 }
@@ -346,7 +348,7 @@ retry:
346 } 348 }
347 349
348 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 350 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
349 NV_ERROR(drm, "multiple instances of buffer %d on " 351 NV_ERROR(cli, "multiple instances of buffer %d on "
350 "validation list\n", b->handle); 352 "validation list\n", b->handle);
351 drm_gem_object_unreference_unlocked(gem); 353 drm_gem_object_unreference_unlocked(gem);
352 validate_fini(op, NULL); 354 validate_fini(op, NULL);
@@ -366,7 +368,7 @@ retry:
366 if (unlikely(ret)) { 368 if (unlikely(ret)) {
367 drm_gem_object_unreference_unlocked(gem); 369 drm_gem_object_unreference_unlocked(gem);
368 if (ret != -ERESTARTSYS) 370 if (ret != -ERESTARTSYS)
369 NV_ERROR(drm, "fail reserve\n"); 371 NV_ERROR(cli, "fail reserve\n");
370 return ret; 372 return ret;
371 } 373 }
372 } 374 }
@@ -384,7 +386,7 @@ retry:
384 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 386 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
385 list_add_tail(&nvbo->entry, &op->gart_list); 387 list_add_tail(&nvbo->entry, &op->gart_list);
386 else { 388 else {
387 NV_ERROR(drm, "invalid valid domains: 0x%08x\n", 389 NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
388 b->valid_domains); 390 b->valid_domains);
389 list_add_tail(&nvbo->entry, &op->both_list); 391 list_add_tail(&nvbo->entry, &op->both_list);
390 validate_fini(op, NULL); 392 validate_fini(op, NULL);
@@ -417,8 +419,9 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
417} 419}
418 420
419static int 421static int
420validate_list(struct nouveau_channel *chan, struct list_head *list, 422validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
421 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) 423 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
424 uint64_t user_pbbo_ptr)
422{ 425{
423 struct nouveau_drm *drm = chan->drm; 426 struct nouveau_drm *drm = chan->drm;
424 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 427 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
@@ -431,7 +434,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
431 434
432 ret = validate_sync(chan, nvbo); 435 ret = validate_sync(chan, nvbo);
433 if (unlikely(ret)) { 436 if (unlikely(ret)) {
434 NV_ERROR(drm, "fail pre-validate sync\n"); 437 NV_ERROR(cli, "fail pre-validate sync\n");
435 return ret; 438 return ret;
436 } 439 }
437 440
@@ -439,20 +442,20 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
439 b->write_domains, 442 b->write_domains,
440 b->valid_domains); 443 b->valid_domains);
441 if (unlikely(ret)) { 444 if (unlikely(ret)) {
442 NV_ERROR(drm, "fail set_domain\n"); 445 NV_ERROR(cli, "fail set_domain\n");
443 return ret; 446 return ret;
444 } 447 }
445 448
446 ret = nouveau_bo_validate(nvbo, true, false); 449 ret = nouveau_bo_validate(nvbo, true, false);
447 if (unlikely(ret)) { 450 if (unlikely(ret)) {
448 if (ret != -ERESTARTSYS) 451 if (ret != -ERESTARTSYS)
449 NV_ERROR(drm, "fail ttm_validate\n"); 452 NV_ERROR(cli, "fail ttm_validate\n");
450 return ret; 453 return ret;
451 } 454 }
452 455
453 ret = validate_sync(chan, nvbo); 456 ret = validate_sync(chan, nvbo);
454 if (unlikely(ret)) { 457 if (unlikely(ret)) {
455 NV_ERROR(drm, "fail post-validate sync\n"); 458 NV_ERROR(cli, "fail post-validate sync\n");
456 return ret; 459 return ret;
457 } 460 }
458 461
@@ -488,7 +491,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
488 uint64_t user_buffers, int nr_buffers, 491 uint64_t user_buffers, int nr_buffers,
489 struct validate_op *op, int *apply_relocs) 492 struct validate_op *op, int *apply_relocs)
490{ 493{
491 struct nouveau_drm *drm = chan->drm; 494 struct nouveau_cli *cli = nouveau_cli(file_priv);
492 int ret, relocs = 0; 495 int ret, relocs = 0;
493 496
494 INIT_LIST_HEAD(&op->vram_list); 497 INIT_LIST_HEAD(&op->vram_list);
@@ -501,32 +504,32 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
501 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 504 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
502 if (unlikely(ret)) { 505 if (unlikely(ret)) {
503 if (ret != -ERESTARTSYS) 506 if (ret != -ERESTARTSYS)
504 NV_ERROR(drm, "validate_init\n"); 507 NV_ERROR(cli, "validate_init\n");
505 return ret; 508 return ret;
506 } 509 }
507 510
508 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 511 ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
509 if (unlikely(ret < 0)) { 512 if (unlikely(ret < 0)) {
510 if (ret != -ERESTARTSYS) 513 if (ret != -ERESTARTSYS)
511 NV_ERROR(drm, "validate vram_list\n"); 514 NV_ERROR(cli, "validate vram_list\n");
512 validate_fini(op, NULL); 515 validate_fini(op, NULL);
513 return ret; 516 return ret;
514 } 517 }
515 relocs += ret; 518 relocs += ret;
516 519
517 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 520 ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
518 if (unlikely(ret < 0)) { 521 if (unlikely(ret < 0)) {
519 if (ret != -ERESTARTSYS) 522 if (ret != -ERESTARTSYS)
520 NV_ERROR(drm, "validate gart_list\n"); 523 NV_ERROR(cli, "validate gart_list\n");
521 validate_fini(op, NULL); 524 validate_fini(op, NULL);
522 return ret; 525 return ret;
523 } 526 }
524 relocs += ret; 527 relocs += ret;
525 528
526 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 529 ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
527 if (unlikely(ret < 0)) { 530 if (unlikely(ret < 0)) {
528 if (ret != -ERESTARTSYS) 531 if (ret != -ERESTARTSYS)
529 NV_ERROR(drm, "validate both_list\n"); 532 NV_ERROR(cli, "validate both_list\n");
530 validate_fini(op, NULL); 533 validate_fini(op, NULL);
531 return ret; 534 return ret;
532 } 535 }
@@ -555,11 +558,10 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
555} 558}
556 559
557static int 560static int
558nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, 561nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
559 struct drm_nouveau_gem_pushbuf *req, 562 struct drm_nouveau_gem_pushbuf *req,
560 struct drm_nouveau_gem_pushbuf_bo *bo) 563 struct drm_nouveau_gem_pushbuf_bo *bo)
561{ 564{
562 struct nouveau_drm *drm = nouveau_drm(dev);
563 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 565 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
564 int ret = 0; 566 int ret = 0;
565 unsigned i; 567 unsigned i;
@@ -575,7 +577,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
575 uint32_t data; 577 uint32_t data;
576 578
577 if (unlikely(r->bo_index > req->nr_buffers)) { 579 if (unlikely(r->bo_index > req->nr_buffers)) {
578 NV_ERROR(drm, "reloc bo index invalid\n"); 580 NV_ERROR(cli, "reloc bo index invalid\n");
579 ret = -EINVAL; 581 ret = -EINVAL;
580 break; 582 break;
581 } 583 }
@@ -585,7 +587,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
585 continue; 587 continue;
586 588
587 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 589 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
588 NV_ERROR(drm, "reloc container bo index invalid\n"); 590 NV_ERROR(cli, "reloc container bo index invalid\n");
589 ret = -EINVAL; 591 ret = -EINVAL;
590 break; 592 break;
591 } 593 }
@@ -593,7 +595,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
593 595
594 if (unlikely(r->reloc_bo_offset + 4 > 596 if (unlikely(r->reloc_bo_offset + 4 >
595 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 597 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
596 NV_ERROR(drm, "reloc outside of bo\n"); 598 NV_ERROR(cli, "reloc outside of bo\n");
597 ret = -EINVAL; 599 ret = -EINVAL;
598 break; 600 break;
599 } 601 }
@@ -602,7 +604,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
602 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 604 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
603 &nvbo->kmap); 605 &nvbo->kmap);
604 if (ret) { 606 if (ret) {
605 NV_ERROR(drm, "failed kmap for reloc\n"); 607 NV_ERROR(cli, "failed kmap for reloc\n");
606 break; 608 break;
607 } 609 }
608 nvbo->validate_mapped = true; 610 nvbo->validate_mapped = true;
@@ -627,7 +629,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
627 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 629 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
628 spin_unlock(&nvbo->bo.bdev->fence_lock); 630 spin_unlock(&nvbo->bo.bdev->fence_lock);
629 if (ret) { 631 if (ret) {
630 NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret); 632 NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
631 break; 633 break;
632 } 634 }
633 635
@@ -643,6 +645,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
643 struct drm_file *file_priv) 645 struct drm_file *file_priv)
644{ 646{
645 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 647 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
648 struct nouveau_cli *cli = nouveau_cli(file_priv);
646 struct nouveau_abi16_chan *temp; 649 struct nouveau_abi16_chan *temp;
647 struct nouveau_drm *drm = nouveau_drm(dev); 650 struct nouveau_drm *drm = nouveau_drm(dev);
648 struct drm_nouveau_gem_pushbuf *req = data; 651 struct drm_nouveau_gem_pushbuf *req = data;
@@ -672,19 +675,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
672 goto out_next; 675 goto out_next;
673 676
674 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 677 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
675 NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n", 678 NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
676 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 679 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
677 return nouveau_abi16_put(abi16, -EINVAL); 680 return nouveau_abi16_put(abi16, -EINVAL);
678 } 681 }
679 682
680 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 683 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
681 NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n", 684 NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
682 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 685 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
683 return nouveau_abi16_put(abi16, -EINVAL); 686 return nouveau_abi16_put(abi16, -EINVAL);
684 } 687 }
685 688
686 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 689 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
687 NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n", 690 NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
688 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 691 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
689 return nouveau_abi16_put(abi16, -EINVAL); 692 return nouveau_abi16_put(abi16, -EINVAL);
690 } 693 }
@@ -702,7 +705,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
702 /* Ensure all push buffers are on validate list */ 705 /* Ensure all push buffers are on validate list */
703 for (i = 0; i < req->nr_push; i++) { 706 for (i = 0; i < req->nr_push; i++) {
704 if (push[i].bo_index >= req->nr_buffers) { 707 if (push[i].bo_index >= req->nr_buffers) {
705 NV_ERROR(drm, "push %d buffer not in list\n", i); 708 NV_ERROR(cli, "push %d buffer not in list\n", i);
706 ret = -EINVAL; 709 ret = -EINVAL;
707 goto out_prevalid; 710 goto out_prevalid;
708 } 711 }
@@ -713,15 +716,15 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
713 req->nr_buffers, &op, &do_reloc); 716 req->nr_buffers, &op, &do_reloc);
714 if (ret) { 717 if (ret) {
715 if (ret != -ERESTARTSYS) 718 if (ret != -ERESTARTSYS)
716 NV_ERROR(drm, "validate: %d\n", ret); 719 NV_ERROR(cli, "validate: %d\n", ret);
717 goto out_prevalid; 720 goto out_prevalid;
718 } 721 }
719 722
720 /* Apply any relocations that are required */ 723 /* Apply any relocations that are required */
721 if (do_reloc) { 724 if (do_reloc) {
722 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); 725 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
723 if (ret) { 726 if (ret) {
724 NV_ERROR(drm, "reloc apply: %d\n", ret); 727 NV_ERROR(cli, "reloc apply: %d\n", ret);
725 goto out; 728 goto out;
726 } 729 }
727 } 730 }
@@ -729,7 +732,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
729 if (chan->dma.ib_max) { 732 if (chan->dma.ib_max) {
730 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 733 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
731 if (ret) { 734 if (ret) {
732 NV_ERROR(drm, "nv50cal_space: %d\n", ret); 735 NV_ERROR(cli, "nv50cal_space: %d\n", ret);
733 goto out; 736 goto out;
734 } 737 }
735 738
@@ -744,7 +747,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
744 if (nv_device(drm->device)->chipset >= 0x25) { 747 if (nv_device(drm->device)->chipset >= 0x25) {
745 ret = RING_SPACE(chan, req->nr_push * 2); 748 ret = RING_SPACE(chan, req->nr_push * 2);
746 if (ret) { 749 if (ret) {
747 NV_ERROR(drm, "cal_space: %d\n", ret); 750 NV_ERROR(cli, "cal_space: %d\n", ret);
748 goto out; 751 goto out;
749 } 752 }
750 753
@@ -758,7 +761,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
758 } else { 761 } else {
759 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 762 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
760 if (ret) { 763 if (ret) {
761 NV_ERROR(drm, "jmp_space: %d\n", ret); 764 NV_ERROR(cli, "jmp_space: %d\n", ret);
762 goto out; 765 goto out;
763 } 766 }
764 767
@@ -794,9 +797,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
794 } 797 }
795 } 798 }
796 799
797 ret = nouveau_fence_new(chan, &fence); 800 ret = nouveau_fence_new(chan, false, &fence);
798 if (ret) { 801 if (ret) {
799 NV_ERROR(drm, "error fencing pushbuf: %d\n", ret); 802 NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
800 WIND_RING(chan); 803 WIND_RING(chan);
801 goto out; 804 goto out;
802 } 805 }