aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c508
1 files changed, 178 insertions, 330 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 70cc30803e3..0d22f66f1c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
167 167
168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); 168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
169out: 169out:
170 mutex_lock(&dev->struct_mutex); 170 drm_gem_object_handle_unreference_unlocked(nvbo->gem);
171 drm_gem_object_handle_unreference(nvbo->gem);
172 mutex_unlock(&dev->struct_mutex);
173 171
174 if (ret) 172 if (ret)
175 drm_gem_object_unreference(nvbo->gem); 173 drm_gem_object_unreference_unlocked(nvbo->gem);
176 return ret; 174 return ret;
177} 175}
178 176
@@ -243,6 +241,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
243 nouveau_fence_unref((void *)&prev_fence); 241 nouveau_fence_unref((void *)&prev_fence);
244 } 242 }
245 243
244 if (unlikely(nvbo->validate_mapped)) {
245 ttm_bo_kunmap(&nvbo->kmap);
246 nvbo->validate_mapped = false;
247 }
248
246 list_del(&nvbo->entry); 249 list_del(&nvbo->entry);
247 nvbo->reserved_by = NULL; 250 nvbo->reserved_by = NULL;
248 ttm_bo_unreserve(&nvbo->bo); 251 ttm_bo_unreserve(&nvbo->bo);
@@ -302,11 +305,14 @@ retry:
302 if (ret == -EAGAIN) 305 if (ret == -EAGAIN)
303 ret = ttm_bo_wait_unreserved(&nvbo->bo, false); 306 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
304 drm_gem_object_unreference(gem); 307 drm_gem_object_unreference(gem);
305 if (ret) 308 if (ret) {
309 NV_ERROR(dev, "fail reserve\n");
306 return ret; 310 return ret;
311 }
307 goto retry; 312 goto retry;
308 } 313 }
309 314
315 b->user_priv = (uint64_t)(unsigned long)nvbo;
310 nvbo->reserved_by = file_priv; 316 nvbo->reserved_by = file_priv;
311 nvbo->pbbo_index = i; 317 nvbo->pbbo_index = i;
312 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 318 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -336,8 +342,10 @@ retry:
336 } 342 }
337 343
338 ret = ttm_bo_wait_cpu(&nvbo->bo, false); 344 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
339 if (ret) 345 if (ret) {
346 NV_ERROR(dev, "fail wait_cpu\n");
340 return ret; 347 return ret;
348 }
341 goto retry; 349 goto retry;
342 } 350 }
343 } 351 }
@@ -351,6 +359,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
351{ 359{
352 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 360 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
353 (void __force __user *)(uintptr_t)user_pbbo_ptr; 361 (void __force __user *)(uintptr_t)user_pbbo_ptr;
362 struct drm_device *dev = chan->dev;
354 struct nouveau_bo *nvbo; 363 struct nouveau_bo *nvbo;
355 int ret, relocs = 0; 364 int ret, relocs = 0;
356 365
@@ -362,39 +371,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
362 spin_lock(&nvbo->bo.lock); 371 spin_lock(&nvbo->bo.lock);
363 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 372 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
364 spin_unlock(&nvbo->bo.lock); 373 spin_unlock(&nvbo->bo.lock);
365 if (unlikely(ret)) 374 if (unlikely(ret)) {
375 NV_ERROR(dev, "fail wait other chan\n");
366 return ret; 376 return ret;
377 }
367 } 378 }
368 379
369 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, 380 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
370 b->write_domains, 381 b->write_domains,
371 b->valid_domains); 382 b->valid_domains);
372 if (unlikely(ret)) 383 if (unlikely(ret)) {
384 NV_ERROR(dev, "fail set_domain\n");
373 return ret; 385 return ret;
386 }
374 387
375 nvbo->channel = chan; 388 nvbo->channel = chan;
376 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 389 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
377 false, false); 390 false, false);
378 nvbo->channel = NULL; 391 nvbo->channel = NULL;
379 if (unlikely(ret)) 392 if (unlikely(ret)) {
393 NV_ERROR(dev, "fail ttm_validate\n");
380 return ret; 394 return ret;
395 }
381 396
382 if (nvbo->bo.offset == b->presumed_offset && 397 if (nvbo->bo.offset == b->presumed.offset &&
383 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 398 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
384 b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || 399 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
385 (nvbo->bo.mem.mem_type == TTM_PL_TT && 400 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
386 b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) 401 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
387 continue; 402 continue;
388 403
389 if (nvbo->bo.mem.mem_type == TTM_PL_TT) 404 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
390 b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; 405 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
391 else 406 else
392 b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; 407 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
393 b->presumed_offset = nvbo->bo.offset; 408 b->presumed.offset = nvbo->bo.offset;
394 b->presumed_ok = 0; 409 b->presumed.valid = 0;
395 relocs++; 410 relocs++;
396 411
397 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) 412 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
413 &b->presumed, sizeof(b->presumed)))
398 return -EFAULT; 414 return -EFAULT;
399 } 415 }
400 416
@@ -408,6 +424,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
408 uint64_t user_buffers, int nr_buffers, 424 uint64_t user_buffers, int nr_buffers,
409 struct validate_op *op, int *apply_relocs) 425 struct validate_op *op, int *apply_relocs)
410{ 426{
427 struct drm_device *dev = chan->dev;
411 int ret, relocs = 0; 428 int ret, relocs = 0;
412 429
413 INIT_LIST_HEAD(&op->vram_list); 430 INIT_LIST_HEAD(&op->vram_list);
@@ -418,11 +435,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
418 return 0; 435 return 0;
419 436
420 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 437 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
421 if (unlikely(ret)) 438 if (unlikely(ret)) {
439 NV_ERROR(dev, "validate_init\n");
422 return ret; 440 return ret;
441 }
423 442
424 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 443 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
425 if (unlikely(ret < 0)) { 444 if (unlikely(ret < 0)) {
445 NV_ERROR(dev, "validate vram_list\n");
426 validate_fini(op, NULL); 446 validate_fini(op, NULL);
427 return ret; 447 return ret;
428 } 448 }
@@ -430,6 +450,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
430 450
431 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 451 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
432 if (unlikely(ret < 0)) { 452 if (unlikely(ret < 0)) {
453 NV_ERROR(dev, "validate gart_list\n");
433 validate_fini(op, NULL); 454 validate_fini(op, NULL);
434 return ret; 455 return ret;
435 } 456 }
@@ -437,6 +458,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
437 458
438 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 459 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
439 if (unlikely(ret < 0)) { 460 if (unlikely(ret < 0)) {
461 NV_ERROR(dev, "validate both_list\n");
440 validate_fini(op, NULL); 462 validate_fini(op, NULL);
441 return ret; 463 return ret;
442 } 464 }
@@ -465,59 +487,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
465} 487}
466 488
467static int 489static int
468nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, 490nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
469 struct drm_nouveau_gem_pushbuf_bo *bo, 491 struct drm_nouveau_gem_pushbuf *req,
470 unsigned nr_relocs, uint64_t ptr_relocs, 492 struct drm_nouveau_gem_pushbuf_bo *bo)
471 unsigned nr_dwords, unsigned first_dword,
472 uint32_t *pushbuf, bool is_iomem)
473{ 493{
474 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 494 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
475 struct drm_device *dev = chan->dev;
476 int ret = 0; 495 int ret = 0;
477 unsigned i; 496 unsigned i;
478 497
479 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); 498 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
480 if (IS_ERR(reloc)) 499 if (IS_ERR(reloc))
481 return PTR_ERR(reloc); 500 return PTR_ERR(reloc);
482 501
483 for (i = 0; i < nr_relocs; i++) { 502 for (i = 0; i < req->nr_relocs; i++) {
484 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 503 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
485 struct drm_nouveau_gem_pushbuf_bo *b; 504 struct drm_nouveau_gem_pushbuf_bo *b;
505 struct nouveau_bo *nvbo;
486 uint32_t data; 506 uint32_t data;
487 507
488 if (r->bo_index >= nr_bo || r->reloc_index < first_dword || 508 if (unlikely(r->bo_index > req->nr_buffers)) {
489 r->reloc_index >= first_dword + nr_dwords) { 509 NV_ERROR(dev, "reloc bo index invalid\n");
490 NV_ERROR(dev, "Bad relocation %d\n", i);
491 NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
492 NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
493 ret = -EINVAL; 510 ret = -EINVAL;
494 break; 511 break;
495 } 512 }
496 513
497 b = &bo[r->bo_index]; 514 b = &bo[r->bo_index];
498 if (b->presumed_ok) 515 if (b->presumed.valid)
499 continue; 516 continue;
500 517
518 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
519 NV_ERROR(dev, "reloc container bo index invalid\n");
520 ret = -EINVAL;
521 break;
522 }
523 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
524
525 if (unlikely(r->reloc_bo_offset + 4 >
526 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
527 NV_ERROR(dev, "reloc outside of bo\n");
528 ret = -EINVAL;
529 break;
530 }
531
532 if (!nvbo->kmap.virtual) {
533 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
534 &nvbo->kmap);
535 if (ret) {
536 NV_ERROR(dev, "failed kmap for reloc\n");
537 break;
538 }
539 nvbo->validate_mapped = true;
540 }
541
501 if (r->flags & NOUVEAU_GEM_RELOC_LOW) 542 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
502 data = b->presumed_offset + r->data; 543 data = b->presumed.offset + r->data;
503 else 544 else
504 if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 545 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
505 data = (b->presumed_offset + r->data) >> 32; 546 data = (b->presumed.offset + r->data) >> 32;
506 else 547 else
507 data = r->data; 548 data = r->data;
508 549
509 if (r->flags & NOUVEAU_GEM_RELOC_OR) { 550 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
510 if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) 551 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
511 data |= r->tor; 552 data |= r->tor;
512 else 553 else
513 data |= r->vor; 554 data |= r->vor;
514 } 555 }
515 556
516 if (is_iomem) 557 spin_lock(&nvbo->bo.lock);
517 iowrite32_native(data, (void __force __iomem *) 558 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
518 &pushbuf[r->reloc_index]); 559 spin_unlock(&nvbo->bo.lock);
519 else 560 if (ret) {
520 pushbuf[r->reloc_index] = data; 561 NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
562 break;
563 }
564
565 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
521 } 566 }
522 567
523 kfree(reloc); 568 kfree(reloc);
@@ -528,127 +573,50 @@ int
528nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 573nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
529 struct drm_file *file_priv) 574 struct drm_file *file_priv)
530{ 575{
576 struct drm_nouveau_private *dev_priv = dev->dev_private;
531 struct drm_nouveau_gem_pushbuf *req = data; 577 struct drm_nouveau_gem_pushbuf *req = data;
532 struct drm_nouveau_gem_pushbuf_bo *bo = NULL; 578 struct drm_nouveau_gem_pushbuf_push *push;
579 struct drm_nouveau_gem_pushbuf_bo *bo;
533 struct nouveau_channel *chan; 580 struct nouveau_channel *chan;
534 struct validate_op op; 581 struct validate_op op;
535 struct nouveau_fence* fence = 0; 582 struct nouveau_fence *fence = 0;
536 uint32_t *pushbuf = NULL; 583 int i, j, ret = 0, do_reloc = 0;
537 int ret = 0, do_reloc = 0, i;
538 584
539 NOUVEAU_CHECK_INITIALISED_WITH_RETURN; 585 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
540 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); 586 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
541 587
542 if (req->nr_dwords >= chan->dma.max || 588 req->vram_available = dev_priv->fb_aper_free;
543 req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || 589 req->gart_available = dev_priv->gart_info.aper_free;
544 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { 590 if (unlikely(req->nr_push == 0))
545 NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); 591 goto out_next;
546 NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
547 chan->dma.max - 1);
548 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
549 NOUVEAU_GEM_MAX_BUFFERS);
550 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
551 NOUVEAU_GEM_MAX_RELOCS);
552 return -EINVAL;
553 }
554
555 pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
556 if (IS_ERR(pushbuf))
557 return PTR_ERR(pushbuf);
558
559 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
560 if (IS_ERR(bo)) {
561 kfree(pushbuf);
562 return PTR_ERR(bo);
563 }
564
565 mutex_lock(&dev->struct_mutex);
566
567 /* Validate buffer list */
568 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
569 req->nr_buffers, &op, &do_reloc);
570 if (ret)
571 goto out;
572
573 /* Apply any relocations that are required */
574 if (do_reloc) {
575 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
576 bo, req->nr_relocs,
577 req->relocs,
578 req->nr_dwords, 0,
579 pushbuf, false);
580 if (ret)
581 goto out;
582 }
583
584 /* Emit push buffer to the hw
585 */
586 ret = RING_SPACE(chan, req->nr_dwords);
587 if (ret)
588 goto out;
589
590 OUT_RINGp(chan, pushbuf, req->nr_dwords);
591 592
592 ret = nouveau_fence_new(chan, &fence, true); 593 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
593 if (ret) { 594 NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
594 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); 595 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
595 WIND_RING(chan); 596 return -EINVAL;
596 goto out;
597 } 597 }
598 598
599 if (nouveau_gem_pushbuf_sync(chan)) { 599 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
600 ret = nouveau_fence_wait(fence, NULL, false, false); 600 NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
601 if (ret) { 601 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
602 for (i = 0; i < req->nr_dwords; i++) 602 return -EINVAL;
603 NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
604 NV_ERROR(dev, "^^ above push buffer is fail :(\n");
605 }
606 } 603 }
607 604
608out: 605 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
609 validate_fini(&op, fence); 606 NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
610 nouveau_fence_unref((void**)&fence); 607 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
611 mutex_unlock(&dev->struct_mutex);
612 kfree(pushbuf);
613 kfree(bo);
614 return ret;
615}
616
617#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
618
619int
620nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
621 struct drm_file *file_priv)
622{
623 struct drm_nouveau_private *dev_priv = dev->dev_private;
624 struct drm_nouveau_gem_pushbuf_call *req = data;
625 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
626 struct nouveau_channel *chan;
627 struct drm_gem_object *gem;
628 struct nouveau_bo *pbbo;
629 struct validate_op op;
630 struct nouveau_fence* fence = 0;
631 int i, ret = 0, do_reloc = 0;
632
633 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
634 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
635
636 if (unlikely(req->handle == 0))
637 goto out_next;
638
639 if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
640 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
641 NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
642 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
643 NOUVEAU_GEM_MAX_BUFFERS);
644 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
645 NOUVEAU_GEM_MAX_RELOCS);
646 return -EINVAL; 608 return -EINVAL;
647 } 609 }
648 610
611 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
612 if (IS_ERR(push))
613 return PTR_ERR(push);
614
649 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 615 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
650 if (IS_ERR(bo)) 616 if (IS_ERR(bo)) {
617 kfree(push);
651 return PTR_ERR(bo); 618 return PTR_ERR(bo);
619 }
652 620
653 mutex_lock(&dev->struct_mutex); 621 mutex_lock(&dev->struct_mutex);
654 622
@@ -660,122 +628,84 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
660 goto out; 628 goto out;
661 } 629 }
662 630
663 /* Validate DMA push buffer */
664 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
665 if (!gem) {
666 NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
667 ret = -EINVAL;
668 goto out;
669 }
670 pbbo = nouveau_gem_object(gem);
671
672 if ((req->offset & 3) || req->nr_dwords < 2 ||
673 (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
674 (unsigned long)req->nr_dwords >
675 ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
676 NV_ERROR(dev, "pb call misaligned or out of bounds: "
677 "%d + %d * 4 > %ld\n",
678 req->offset, req->nr_dwords, pbbo->bo.mem.size);
679 ret = -EINVAL;
680 drm_gem_object_unreference(gem);
681 goto out;
682 }
683
684 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
685 chan->fence.sequence);
686 if (ret) {
687 NV_ERROR(dev, "resv pb: %d\n", ret);
688 drm_gem_object_unreference(gem);
689 goto out;
690 }
691
692 nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
693 ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
694 if (ret) {
695 NV_ERROR(dev, "validate pb: %d\n", ret);
696 ttm_bo_unreserve(&pbbo->bo);
697 drm_gem_object_unreference(gem);
698 goto out;
699 }
700
701 list_add_tail(&pbbo->entry, &op.both_list);
702
703 /* If presumed return address doesn't match, we need to map the
704 * push buffer and fix it..
705 */
706 if (!PUSHBUF_CAL) {
707 uint32_t retaddy;
708
709 if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
710 ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
711 if (ret) {
712 NV_ERROR(dev, "jmp_space: %d\n", ret);
713 goto out;
714 }
715 }
716
717 retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
718 retaddy |= 0x20000000;
719 if (retaddy != req->suffix0) {
720 req->suffix0 = retaddy;
721 do_reloc = 1;
722 }
723 }
724
725 /* Apply any relocations that are required */ 631 /* Apply any relocations that are required */
726 if (do_reloc) { 632 if (do_reloc) {
727 void *pbvirt; 633 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
728 bool is_iomem;
729 ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
730 &pbbo->kmap);
731 if (ret) { 634 if (ret) {
732 NV_ERROR(dev, "kmap pb: %d\n", ret); 635 NV_ERROR(dev, "reloc apply: %d\n", ret);
733 goto out; 636 goto out;
734 } 637 }
638 }
735 639
736 pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem); 640 if (chan->dma.ib_max) {
737 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo, 641 ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
738 req->nr_relocs,
739 req->relocs,
740 req->nr_dwords,
741 req->offset / 4,
742 pbvirt, is_iomem);
743
744 if (!PUSHBUF_CAL) {
745 nouveau_bo_wr32(pbbo,
746 req->offset / 4 + req->nr_dwords - 2,
747 req->suffix0);
748 }
749
750 ttm_bo_kunmap(&pbbo->kmap);
751 if (ret) { 642 if (ret) {
752 NV_ERROR(dev, "reloc apply: %d\n", ret); 643 NV_INFO(dev, "nv50cal_space: %d\n", ret);
753 goto out; 644 goto out;
754 } 645 }
755 }
756 646
757 if (PUSHBUF_CAL) { 647 for (i = 0; i < req->nr_push; i++) {
758 ret = RING_SPACE(chan, 2); 648 struct nouveau_bo *nvbo = (void *)(unsigned long)
649 bo[push[i].bo_index].user_priv;
650
651 nv50_dma_push(chan, nvbo, push[i].offset,
652 push[i].length);
653 }
654 } else
655 if (dev_priv->card_type >= NV_20) {
656 ret = RING_SPACE(chan, req->nr_push * 2);
759 if (ret) { 657 if (ret) {
760 NV_ERROR(dev, "cal_space: %d\n", ret); 658 NV_ERROR(dev, "cal_space: %d\n", ret);
761 goto out; 659 goto out;
762 } 660 }
763 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + 661
764 req->offset) | 2); 662 for (i = 0; i < req->nr_push; i++) {
765 OUT_RING(chan, 0); 663 struct nouveau_bo *nvbo = (void *)(unsigned long)
664 bo[push[i].bo_index].user_priv;
665 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
666
667 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
668 push[i].offset) | 2);
669 OUT_RING(chan, 0);
670 }
766 } else { 671 } else {
767 ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); 672 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
768 if (ret) { 673 if (ret) {
769 NV_ERROR(dev, "jmp_space: %d\n", ret); 674 NV_ERROR(dev, "jmp_space: %d\n", ret);
770 goto out; 675 goto out;
771 } 676 }
772 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
773 req->offset) | 0x20000000);
774 OUT_RING(chan, 0);
775 677
776 /* Space the jumps apart with NOPs. */ 678 for (i = 0; i < req->nr_push; i++) {
777 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) 679 struct nouveau_bo *nvbo = (void *)(unsigned long)
680 bo[push[i].bo_index].user_priv;
681 struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
682 uint32_t cmd;
683
684 cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
685 cmd |= 0x20000000;
686 if (unlikely(cmd != req->suffix0)) {
687 if (!nvbo->kmap.virtual) {
688 ret = ttm_bo_kmap(&nvbo->bo, 0,
689 nvbo->bo.mem.
690 num_pages,
691 &nvbo->kmap);
692 if (ret) {
693 WIND_RING(chan);
694 goto out;
695 }
696 nvbo->validate_mapped = true;
697 }
698
699 nouveau_bo_wr32(nvbo, (push[i].offset +
700 push[i].length - 8) / 4, cmd);
701 }
702
703 OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
704 push[i].offset) | 0x20000000);
778 OUT_RING(chan, 0); 705 OUT_RING(chan, 0);
706 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
707 OUT_RING(chan, 0);
708 }
779 } 709 }
780 710
781 ret = nouveau_fence_new(chan, &fence, true); 711 ret = nouveau_fence_new(chan, &fence, true);
@@ -790,9 +720,14 @@ out:
790 nouveau_fence_unref((void**)&fence); 720 nouveau_fence_unref((void**)&fence);
791 mutex_unlock(&dev->struct_mutex); 721 mutex_unlock(&dev->struct_mutex);
792 kfree(bo); 722 kfree(bo);
723 kfree(push);
793 724
794out_next: 725out_next:
795 if (PUSHBUF_CAL) { 726 if (chan->dma.ib_max) {
727 req->suffix0 = 0x00000000;
728 req->suffix1 = 0x00000000;
729 } else
730 if (dev_priv->card_type >= NV_20) {
796 req->suffix0 = 0x00020000; 731 req->suffix0 = 0x00020000;
797 req->suffix1 = 0x00000000; 732 req->suffix1 = 0x00000000;
798 } else { 733 } else {
@@ -804,19 +739,6 @@ out_next:
804 return ret; 739 return ret;
805} 740}
806 741
807int
808nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
809 struct drm_file *file_priv)
810{
811 struct drm_nouveau_private *dev_priv = dev->dev_private;
812 struct drm_nouveau_gem_pushbuf_call *req = data;
813
814 req->vram_available = dev_priv->fb_aper_free;
815 req->gart_available = dev_priv->gart_info.aper_free;
816
817 return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
818}
819
820static inline uint32_t 742static inline uint32_t
821domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) 743domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
822{ 744{
@@ -831,74 +753,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
831} 753}
832 754
833int 755int
834nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
835 struct drm_file *file_priv)
836{
837 struct drm_nouveau_gem_pin *req = data;
838 struct drm_gem_object *gem;
839 struct nouveau_bo *nvbo;
840 int ret = 0;
841
842 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
843
844 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
845 NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
846 return -EINVAL;
847 }
848
849 if (!DRM_SUSER(DRM_CURPROC))
850 return -EPERM;
851
852 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
853 if (!gem)
854 return -EINVAL;
855 nvbo = nouveau_gem_object(gem);
856
857 ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
858 if (ret)
859 goto out;
860
861 req->offset = nvbo->bo.offset;
862 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
863 req->domain = NOUVEAU_GEM_DOMAIN_GART;
864 else
865 req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
866
867out:
868 mutex_lock(&dev->struct_mutex);
869 drm_gem_object_unreference(gem);
870 mutex_unlock(&dev->struct_mutex);
871
872 return ret;
873}
874
875int
876nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
877 struct drm_file *file_priv)
878{
879 struct drm_nouveau_gem_pin *req = data;
880 struct drm_gem_object *gem;
881 int ret;
882
883 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
884
885 if (drm_core_check_feature(dev, DRIVER_MODESET))
886 return -EINVAL;
887
888 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
889 if (!gem)
890 return -EINVAL;
891
892 ret = nouveau_bo_unpin(nouveau_gem_object(gem));
893
894 mutex_lock(&dev->struct_mutex);
895 drm_gem_object_unreference(gem);
896 mutex_unlock(&dev->struct_mutex);
897
898 return ret;
899}
900
901int
902nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 756nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
903 struct drm_file *file_priv) 757 struct drm_file *file_priv)
904{ 758{
@@ -935,9 +789,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
935 } 789 }
936 790
937out: 791out:
938 mutex_lock(&dev->struct_mutex); 792 drm_gem_object_unreference_unlocked(gem);
939 drm_gem_object_unreference(gem);
940 mutex_unlock(&dev->struct_mutex);
941 return ret; 793 return ret;
942} 794}
943 795
@@ -965,9 +817,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
965 ret = 0; 817 ret = 0;
966 818
967out: 819out:
968 mutex_lock(&dev->struct_mutex); 820 drm_gem_object_unreference_unlocked(gem);
969 drm_gem_object_unreference(gem);
970 mutex_unlock(&dev->struct_mutex);
971 return ret; 821 return ret;
972} 822}
973 823
@@ -986,9 +836,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
986 return -EINVAL; 836 return -EINVAL;
987 837
988 ret = nouveau_gem_info(gem, req); 838 ret = nouveau_gem_info(gem, req);
989 mutex_lock(&dev->struct_mutex); 839 drm_gem_object_unreference_unlocked(gem);
990 drm_gem_object_unreference(gem);
991 mutex_unlock(&dev->struct_mutex);
992 return ret; 840 return ret;
993} 841}
994 842