diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2011-06-06 06:54:42 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2011-06-23 01:59:34 -0400 |
commit | d2f96666c56a501c5b74c645d81918b7805d46ce (patch) | |
tree | 938297be311c0f02f7fb36bdcefb8776d28b7ba6 /drivers/gpu/drm/nouveau/nouveau_bo.c | |
parent | f91bac5bf694e8060b7473fb0aefb8de09aa9595 (diff) |
drm/nouveau: create temp vmas for both src and dst of bo moves
Greatly simplifies a number of things, particularly once per-client GPU
address spaces are involved.
May add this back later once I know what things'll look like.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 116 |
1 files changed, 45 insertions, 71 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 71d01ce6598e..a04998492bb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -496,19 +496,12 @@ static int | |||
496 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 496 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
497 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 497 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
498 | { | 498 | { |
499 | struct nouveau_mem *old_node = old_mem->mm_node; | 499 | struct nouveau_mem *node = old_mem->mm_node; |
500 | struct nouveau_mem *new_node = new_mem->mm_node; | 500 | u64 src_offset = node->vma[0].offset; |
501 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 501 | u64 dst_offset = node->vma[1].offset; |
502 | u32 page_count = new_mem->num_pages; | 502 | u32 page_count = new_mem->num_pages; |
503 | u64 src_offset, dst_offset; | ||
504 | int ret; | 503 | int ret; |
505 | 504 | ||
506 | src_offset = old_node->tmp_vma.offset; | ||
507 | if (new_node->tmp_vma.node) | ||
508 | dst_offset = new_node->tmp_vma.offset; | ||
509 | else | ||
510 | dst_offset = nvbo->vma.offset; | ||
511 | |||
512 | page_count = new_mem->num_pages; | 505 | page_count = new_mem->num_pages; |
513 | while (page_count) { | 506 | while (page_count) { |
514 | int line_count = (page_count > 2047) ? 2047 : page_count; | 507 | int line_count = (page_count > 2047) ? 2047 : page_count; |
@@ -542,19 +535,13 @@ static int | |||
542 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | 535 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
543 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | 536 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
544 | { | 537 | { |
545 | struct nouveau_mem *old_node = old_mem->mm_node; | 538 | struct nouveau_mem *node = old_mem->mm_node; |
546 | struct nouveau_mem *new_node = new_mem->mm_node; | ||
547 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 539 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
548 | u64 length = (new_mem->num_pages << PAGE_SHIFT); | 540 | u64 length = (new_mem->num_pages << PAGE_SHIFT); |
549 | u64 src_offset, dst_offset; | 541 | u64 src_offset = node->vma[0].offset; |
542 | u64 dst_offset = node->vma[1].offset; | ||
550 | int ret; | 543 | int ret; |
551 | 544 | ||
552 | src_offset = old_node->tmp_vma.offset; | ||
553 | if (new_node->tmp_vma.node) | ||
554 | dst_offset = new_node->tmp_vma.offset; | ||
555 | else | ||
556 | dst_offset = nvbo->vma.offset; | ||
557 | |||
558 | while (length) { | 545 | while (length) { |
559 | u32 amount, stride, height; | 546 | u32 amount, stride, height; |
560 | 547 | ||
@@ -690,6 +677,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |||
690 | } | 677 | } |
691 | 678 | ||
692 | static int | 679 | static int |
680 | nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, | ||
681 | struct ttm_mem_reg *mem, struct nouveau_vma *vma) | ||
682 | { | ||
683 | struct nouveau_mem *node = mem->mm_node; | ||
684 | int ret; | ||
685 | |||
686 | ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, | ||
687 | node->page_shift, NV_MEM_ACCESS_RO, vma); | ||
688 | if (ret) | ||
689 | return ret; | ||
690 | |||
691 | if (mem->mem_type == TTM_PL_VRAM) | ||
692 | nouveau_vm_map(vma, node); | ||
693 | else | ||
694 | nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, | ||
695 | node, node->pages); | ||
696 | |||
697 | return 0; | ||
698 | } | ||
699 | |||
700 | static int | ||
693 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | 701 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, |
694 | bool no_wait_reserve, bool no_wait_gpu, | 702 | bool no_wait_reserve, bool no_wait_gpu, |
695 | struct ttm_mem_reg *new_mem) | 703 | struct ttm_mem_reg *new_mem) |
@@ -706,31 +714,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
706 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); | 714 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); |
707 | } | 715 | } |
708 | 716 | ||
709 | /* create temporary vma for old memory, this will get cleaned | 717 | /* create temporary vmas for the transfer and attach them to the |
710 | * up after ttm destroys the ttm_mem_reg | 718 | * old nouveau_mem node, these will get cleaned up after ttm has |
719 | * destroyed the ttm_mem_reg | ||
711 | */ | 720 | */ |
712 | if (dev_priv->card_type >= NV_50) { | 721 | if (dev_priv->card_type >= NV_50) { |
713 | struct nouveau_mem *node = old_mem->mm_node; | 722 | struct nouveau_mem *node = old_mem->mm_node; |
714 | if (!node->tmp_vma.node) { | ||
715 | u32 page_shift = nvbo->vma.node->type; | ||
716 | if (old_mem->mem_type == TTM_PL_TT) | ||
717 | page_shift = nvbo->vma.vm->spg_shift; | ||
718 | |||
719 | ret = nouveau_vm_get(chan->vm, | ||
720 | old_mem->num_pages << PAGE_SHIFT, | ||
721 | page_shift, NV_MEM_ACCESS_RO, | ||
722 | &node->tmp_vma); | ||
723 | if (ret) | ||
724 | goto out; | ||
725 | } | ||
726 | 723 | ||
727 | if (old_mem->mem_type == TTM_PL_VRAM) | 724 | ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); |
728 | nouveau_vm_map(&node->tmp_vma, node); | 725 | if (ret) |
729 | else { | 726 | goto out; |
730 | nouveau_vm_map_sg(&node->tmp_vma, 0, | 727 | |
731 | old_mem->num_pages << PAGE_SHIFT, | 728 | ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]); |
732 | node, node->pages); | 729 | if (ret) |
733 | } | 730 | goto out; |
734 | } | 731 | } |
735 | 732 | ||
736 | if (dev_priv->card_type < NV_50) | 733 | if (dev_priv->card_type < NV_50) |
@@ -757,7 +754,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
757 | bool no_wait_reserve, bool no_wait_gpu, | 754 | bool no_wait_reserve, bool no_wait_gpu, |
758 | struct ttm_mem_reg *new_mem) | 755 | struct ttm_mem_reg *new_mem) |
759 | { | 756 | { |
760 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | ||
761 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | 757 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; |
762 | struct ttm_placement placement; | 758 | struct ttm_placement placement; |
763 | struct ttm_mem_reg tmp_mem; | 759 | struct ttm_mem_reg tmp_mem; |
@@ -777,23 +773,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
777 | if (ret) | 773 | if (ret) |
778 | goto out; | 774 | goto out; |
779 | 775 | ||
780 | if (dev_priv->card_type >= NV_50) { | ||
781 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
782 | struct nouveau_mem *node = tmp_mem.mm_node; | ||
783 | struct nouveau_vma *vma = &nvbo->vma; | ||
784 | if (vma->node->type != vma->vm->spg_shift) | ||
785 | vma = &node->tmp_vma; | ||
786 | nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT, | ||
787 | node, node->pages); | ||
788 | } | ||
789 | |||
790 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); | 776 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); |
791 | |||
792 | if (dev_priv->card_type >= NV_50) { | ||
793 | struct nouveau_bo *nvbo = nouveau_bo(bo); | ||
794 | nouveau_vm_unmap(&nvbo->vma); | ||
795 | } | ||
796 | |||
797 | if (ret) | 777 | if (ret) |
798 | goto out; | 778 | goto out; |
799 | 779 | ||
@@ -846,21 +826,15 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) | |||
846 | if (!vma->vm) | 826 | if (!vma->vm) |
847 | return; | 827 | return; |
848 | 828 | ||
849 | switch (new_mem->mem_type) { | 829 | if (new_mem->mem_type == TTM_PL_VRAM) { |
850 | case TTM_PL_VRAM: | 830 | nouveau_vm_map(&nvbo->vma, new_mem->mm_node); |
851 | nouveau_vm_map(vma, node); | 831 | } else |
852 | break; | 832 | if (new_mem->mem_type == TTM_PL_TT && |
853 | case TTM_PL_TT: | 833 | nvbo->page_shift == nvbo->vma.vm->spg_shift) { |
854 | if (vma->node->type != vma->vm->spg_shift) { | 834 | nouveau_vm_map_sg(&nvbo->vma, 0, new_mem-> |
855 | nouveau_vm_unmap(vma); | 835 | num_pages << PAGE_SHIFT, node, node->pages); |
856 | vma = &node->tmp_vma; | 836 | } else { |
857 | } | ||
858 | nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT, | ||
859 | node, node->pages); | ||
860 | break; | ||
861 | default: | ||
862 | nouveau_vm_unmap(&nvbo->vma); | 837 | nouveau_vm_unmap(&nvbo->vma); |
863 | break; | ||
864 | } | 838 | } |
865 | } | 839 | } |
866 | 840 | ||