diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 129 |
1 files changed, 104 insertions, 25 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 6d66314d16bd..6e78b1aaa74d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include "nouveau_mm.h" | 36 | #include "nouveau_mm.h" |
37 | #include "nouveau_vm.h" | 37 | #include "nouveau_vm.h" |
38 | #include "nouveau_fence.h" | 38 | #include "nouveau_fence.h" |
39 | #include "nouveau_ramht.h" | ||
39 | 40 | ||
40 | #include <linux/log2.h> | 41 | #include <linux/log2.h> |
41 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
@@ -511,6 +512,17 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
511 | } | 512 | } |
512 | 513 | ||
513 | static int | 514 | static int |
515 | nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) | ||
516 | { | ||
517 | int ret = RING_SPACE(chan, 2); | ||
518 | if (ret == 0) { | ||
519 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); | ||
520 | OUT_RING (chan, handle); | ||
521 | } | ||
522 | return ret; | ||
523 | } | ||
524 | |||
525 | static int | ||
514 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 526 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
515 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 527 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
516 | { | 528 | { |
@@ -528,17 +540,17 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
528 | if (ret) | 540 | if (ret) |
529 | return ret; | 541 | return ret; |
530 | 542 | ||
531 | BEGIN_NVC0(chan, NvSubM2MF, 0x0238, 2); | 543 | BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); |
532 | OUT_RING (chan, upper_32_bits(dst_offset)); | 544 | OUT_RING (chan, upper_32_bits(dst_offset)); |
533 | OUT_RING (chan, lower_32_bits(dst_offset)); | 545 | OUT_RING (chan, lower_32_bits(dst_offset)); |
534 | BEGIN_NVC0(chan, NvSubM2MF, 0x030c, 6); | 546 | BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); |
535 | OUT_RING (chan, upper_32_bits(src_offset)); | 547 | OUT_RING (chan, upper_32_bits(src_offset)); |
536 | OUT_RING (chan, lower_32_bits(src_offset)); | 548 | OUT_RING (chan, lower_32_bits(src_offset)); |
537 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ | 549 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ |
538 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ | 550 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ |
539 | OUT_RING (chan, PAGE_SIZE); /* line_length */ | 551 | OUT_RING (chan, PAGE_SIZE); /* line_length */ |
540 | OUT_RING (chan, line_count); | 552 | OUT_RING (chan, line_count); |
541 | BEGIN_NVC0(chan, NvSubM2MF, 0x0300, 1); | 553 | BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); |
542 | OUT_RING (chan, 0x00100110); | 554 | OUT_RING (chan, 0x00100110); |
543 | 555 | ||
544 | page_count -= line_count; | 556 | page_count -= line_count; |
@@ -550,6 +562,28 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
550 | } | 562 | } |
551 | 563 | ||
552 | static int | 564 | static int |
565 | nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) | ||
566 | { | ||
567 | int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, | ||
568 | &chan->m2mf_ntfy); | ||
569 | if (ret == 0) { | ||
570 | ret = RING_SPACE(chan, 6); | ||
571 | if (ret == 0) { | ||
572 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); | ||
573 | OUT_RING (chan, handle); | ||
574 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); | ||
575 | OUT_RING (chan, NvNotify0); | ||
576 | OUT_RING (chan, NvDmaFB); | ||
577 | OUT_RING (chan, NvDmaFB); | ||
578 | } else { | ||
579 | nouveau_ramht_remove(chan, NvNotify0); | ||
580 | } | ||
581 | } | ||
582 | |||
583 | return ret; | ||
584 | } | ||
585 | |||
586 | static int | ||
553 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 587 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
554 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 588 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
555 | { | 589 | { |
@@ -573,7 +607,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
573 | if (ret) | 607 | if (ret) |
574 | return ret; | 608 | return ret; |
575 | 609 | ||
576 | BEGIN_NV04(chan, NvSubM2MF, 0x0200, 7); | 610 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); |
577 | OUT_RING (chan, 0); | 611 | OUT_RING (chan, 0); |
578 | OUT_RING (chan, 0); | 612 | OUT_RING (chan, 0); |
579 | OUT_RING (chan, stride); | 613 | OUT_RING (chan, stride); |
@@ -586,7 +620,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
586 | if (ret) | 620 | if (ret) |
587 | return ret; | 621 | return ret; |
588 | 622 | ||
589 | BEGIN_NV04(chan, NvSubM2MF, 0x0200, 1); | 623 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); |
590 | OUT_RING (chan, 1); | 624 | OUT_RING (chan, 1); |
591 | } | 625 | } |
592 | if (old_mem->mem_type == TTM_PL_VRAM && | 626 | if (old_mem->mem_type == TTM_PL_VRAM && |
@@ -595,7 +629,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
595 | if (ret) | 629 | if (ret) |
596 | return ret; | 630 | return ret; |
597 | 631 | ||
598 | BEGIN_NV04(chan, NvSubM2MF, 0x021c, 7); | 632 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); |
599 | OUT_RING (chan, 0); | 633 | OUT_RING (chan, 0); |
600 | OUT_RING (chan, 0); | 634 | OUT_RING (chan, 0); |
601 | OUT_RING (chan, stride); | 635 | OUT_RING (chan, stride); |
@@ -608,7 +642,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
608 | if (ret) | 642 | if (ret) |
609 | return ret; | 643 | return ret; |
610 | 644 | ||
611 | BEGIN_NV04(chan, NvSubM2MF, 0x021c, 1); | 645 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); |
612 | OUT_RING (chan, 1); | 646 | OUT_RING (chan, 1); |
613 | } | 647 | } |
614 | 648 | ||
@@ -616,10 +650,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
616 | if (ret) | 650 | if (ret) |
617 | return ret; | 651 | return ret; |
618 | 652 | ||
619 | BEGIN_NV04(chan, NvSubM2MF, 0x0238, 2); | 653 | BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); |
620 | OUT_RING (chan, upper_32_bits(src_offset)); | 654 | OUT_RING (chan, upper_32_bits(src_offset)); |
621 | OUT_RING (chan, upper_32_bits(dst_offset)); | 655 | OUT_RING (chan, upper_32_bits(dst_offset)); |
622 | BEGIN_NV04(chan, NvSubM2MF, 0x030c, 8); | 656 | BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); |
623 | OUT_RING (chan, lower_32_bits(src_offset)); | 657 | OUT_RING (chan, lower_32_bits(src_offset)); |
624 | OUT_RING (chan, lower_32_bits(dst_offset)); | 658 | OUT_RING (chan, lower_32_bits(dst_offset)); |
625 | OUT_RING (chan, stride); | 659 | OUT_RING (chan, stride); |
@@ -628,7 +662,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
628 | OUT_RING (chan, height); | 662 | OUT_RING (chan, height); |
629 | OUT_RING (chan, 0x00000101); | 663 | OUT_RING (chan, 0x00000101); |
630 | OUT_RING (chan, 0x00000000); | 664 | OUT_RING (chan, 0x00000000); |
631 | BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); | 665 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
632 | OUT_RING (chan, 0); | 666 | OUT_RING (chan, 0); |
633 | 667 | ||
634 | length -= amount; | 668 | length -= amount; |
@@ -639,6 +673,24 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
639 | return 0; | 673 | return 0; |
640 | } | 674 | } |
641 | 675 | ||
676 | static int | ||
677 | nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) | ||
678 | { | ||
679 | int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000, | ||
680 | &chan->m2mf_ntfy); | ||
681 | if (ret == 0) { | ||
682 | ret = RING_SPACE(chan, 4); | ||
683 | if (ret == 0) { | ||
684 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); | ||
685 | OUT_RING (chan, handle); | ||
686 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); | ||
687 | OUT_RING (chan, NvNotify0); | ||
688 | } | ||
689 | } | ||
690 | |||
691 | return ret; | ||
692 | } | ||
693 | |||
642 | static inline uint32_t | 694 | static inline uint32_t |
643 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, | 695 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, |
644 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) | 696 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) |
@@ -661,7 +713,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
661 | if (ret) | 713 | if (ret) |
662 | return ret; | 714 | return ret; |
663 | 715 | ||
664 | BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); | 716 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); |
665 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); | 717 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); |
666 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); | 718 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); |
667 | 719 | ||
@@ -673,7 +725,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
673 | if (ret) | 725 | if (ret) |
674 | return ret; | 726 | return ret; |
675 | 727 | ||
676 | BEGIN_NV04(chan, NvSubM2MF, | 728 | BEGIN_NV04(chan, NvSubCopy, |
677 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | 729 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); |
678 | OUT_RING (chan, src_offset); | 730 | OUT_RING (chan, src_offset); |
679 | OUT_RING (chan, dst_offset); | 731 | OUT_RING (chan, dst_offset); |
@@ -683,7 +735,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
683 | OUT_RING (chan, line_count); | 735 | OUT_RING (chan, line_count); |
684 | OUT_RING (chan, 0x00000101); | 736 | OUT_RING (chan, 0x00000101); |
685 | OUT_RING (chan, 0x00000000); | 737 | OUT_RING (chan, 0x00000000); |
686 | BEGIN_NV04(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); | 738 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
687 | OUT_RING (chan, 0); | 739 | OUT_RING (chan, 0); |
688 | 740 | ||
689 | page_count -= line_count; | 741 | page_count -= line_count; |
@@ -743,16 +795,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
743 | goto out; | 795 | goto out; |
744 | } | 796 | } |
745 | 797 | ||
746 | if (dev_priv->card_type < NV_50) | 798 | ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem); |
747 | ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | ||
748 | else | ||
749 | if (dev_priv->card_type < NV_C0) | ||
750 | ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | ||
751 | else | ||
752 | if (dev_priv->card_type < NV_E0) | ||
753 | ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | ||
754 | else | ||
755 | ret = nve0_bo_move_copy(chan, bo, &bo->mem, new_mem); | ||
756 | if (ret == 0) { | 799 | if (ret == 0) { |
757 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, | 800 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, |
758 | no_wait_reserve, | 801 | no_wait_reserve, |
@@ -764,6 +807,42 @@ out: | |||
764 | return ret; | 807 | return ret; |
765 | } | 808 | } |
766 | 809 | ||
810 | void | ||
811 | nouveau_bo_move_init(struct nouveau_channel *chan) | ||
812 | { | ||
813 | struct drm_nouveau_private *dev_priv = chan->dev->dev_private; | ||
814 | static const struct { | ||
815 | const char *name; | ||
816 | u32 oclass; | ||
817 | int (*exec)(struct nouveau_channel *, | ||
818 | struct ttm_buffer_object *, | ||
819 | struct ttm_mem_reg *, struct ttm_mem_reg *); | ||
820 | int (*init)(struct nouveau_channel *, u32 handle); | ||
821 | } _methods[] = { | ||
822 | { "COPY", 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, | ||
823 | { "M2MF", 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, | ||
824 | { "M2MF", 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, | ||
825 | { "M2MF", 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, | ||
826 | {} | ||
827 | }, *mthd = _methods; | ||
828 | const char *name = "CPU"; | ||
829 | int ret; | ||
830 | |||
831 | do { | ||
832 | ret = nouveau_gpuobj_gr_new(chan, mthd->oclass, mthd->oclass); | ||
833 | if (ret == 0) { | ||
834 | ret = mthd->init(chan, mthd->oclass); | ||
835 | if (ret == 0) { | ||
836 | dev_priv->ttm.move = mthd->exec; | ||
837 | name = mthd->name; | ||
838 | break; | ||
839 | } | ||
840 | } | ||
841 | } while ((++mthd)->exec); | ||
842 | |||
843 | NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name); | ||
844 | } | ||
845 | |||
767 | static int | 846 | static int |
768 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | 847 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, |
769 | bool no_wait_reserve, bool no_wait_gpu, | 848 | bool no_wait_reserve, bool no_wait_gpu, |
@@ -920,8 +999,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
920 | goto out; | 999 | goto out; |
921 | } | 1000 | } |
922 | 1001 | ||
923 | /* Software copy if the card isn't up and running yet. */ | 1002 | /* CPU copy if we have no accelerated method available */ |
924 | if (!dev_priv->channel) { | 1003 | if (!dev_priv->ttm.move) { |
925 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | 1004 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
926 | goto out; | 1005 | goto out; |
927 | } | 1006 | } |