aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-06-06 06:54:42 -0400
committerBen Skeggs <bskeggs@redhat.com>2011-06-23 01:59:34 -0400
commitd2f96666c56a501c5b74c645d81918b7805d46ce (patch)
tree938297be311c0f02f7fb36bdcefb8776d28b7ba6 /drivers/gpu/drm/nouveau
parentf91bac5bf694e8060b7473fb0aefb8de09aa9595 (diff)
drm/nouveau: create temp vmas for both src and dst of bo moves
Greatly simplifies a number of things, particularly once per-client GPU address spaces are involved. May add this back later once I know what things'll look like. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c116
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c53
3 files changed, 64 insertions, 107 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 71d01ce6598e..a04998492bb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -496,19 +496,12 @@ static int
496nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 496nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
497 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 497 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
498{ 498{
499 struct nouveau_mem *old_node = old_mem->mm_node; 499 struct nouveau_mem *node = old_mem->mm_node;
500 struct nouveau_mem *new_node = new_mem->mm_node; 500 u64 src_offset = node->vma[0].offset;
501 struct nouveau_bo *nvbo = nouveau_bo(bo); 501 u64 dst_offset = node->vma[1].offset;
502 u32 page_count = new_mem->num_pages; 502 u32 page_count = new_mem->num_pages;
503 u64 src_offset, dst_offset;
504 int ret; 503 int ret;
505 504
506 src_offset = old_node->tmp_vma.offset;
507 if (new_node->tmp_vma.node)
508 dst_offset = new_node->tmp_vma.offset;
509 else
510 dst_offset = nvbo->vma.offset;
511
512 page_count = new_mem->num_pages; 505 page_count = new_mem->num_pages;
513 while (page_count) { 506 while (page_count) {
514 int line_count = (page_count > 2047) ? 2047 : page_count; 507 int line_count = (page_count > 2047) ? 2047 : page_count;
@@ -542,19 +535,13 @@ static int
542nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 535nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
543 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) 536 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
544{ 537{
545 struct nouveau_mem *old_node = old_mem->mm_node; 538 struct nouveau_mem *node = old_mem->mm_node;
546 struct nouveau_mem *new_node = new_mem->mm_node;
547 struct nouveau_bo *nvbo = nouveau_bo(bo); 539 struct nouveau_bo *nvbo = nouveau_bo(bo);
548 u64 length = (new_mem->num_pages << PAGE_SHIFT); 540 u64 length = (new_mem->num_pages << PAGE_SHIFT);
549 u64 src_offset, dst_offset; 541 u64 src_offset = node->vma[0].offset;
542 u64 dst_offset = node->vma[1].offset;
550 int ret; 543 int ret;
551 544
552 src_offset = old_node->tmp_vma.offset;
553 if (new_node->tmp_vma.node)
554 dst_offset = new_node->tmp_vma.offset;
555 else
556 dst_offset = nvbo->vma.offset;
557
558 while (length) { 545 while (length) {
559 u32 amount, stride, height; 546 u32 amount, stride, height;
560 547
@@ -690,6 +677,27 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
690} 677}
691 678
692static int 679static int
680nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
681 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
682{
683 struct nouveau_mem *node = mem->mm_node;
684 int ret;
685
686 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
687 node->page_shift, NV_MEM_ACCESS_RO, vma);
688 if (ret)
689 return ret;
690
691 if (mem->mem_type == TTM_PL_VRAM)
692 nouveau_vm_map(vma, node);
693 else
694 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
695 node, node->pages);
696
697 return 0;
698}
699
700static int
693nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 701nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
694 bool no_wait_reserve, bool no_wait_gpu, 702 bool no_wait_reserve, bool no_wait_gpu,
695 struct ttm_mem_reg *new_mem) 703 struct ttm_mem_reg *new_mem)
@@ -706,31 +714,20 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
706 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); 714 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
707 } 715 }
708 716
709 /* create temporary vma for old memory, this will get cleaned 717 /* create temporary vmas for the transfer and attach them to the
710 * up after ttm destroys the ttm_mem_reg 718 * old nouveau_mem node, these will get cleaned up after ttm has
719 * destroyed the ttm_mem_reg
711 */ 720 */
712 if (dev_priv->card_type >= NV_50) { 721 if (dev_priv->card_type >= NV_50) {
713 struct nouveau_mem *node = old_mem->mm_node; 722 struct nouveau_mem *node = old_mem->mm_node;
714 if (!node->tmp_vma.node) {
715 u32 page_shift = nvbo->vma.node->type;
716 if (old_mem->mem_type == TTM_PL_TT)
717 page_shift = nvbo->vma.vm->spg_shift;
718
719 ret = nouveau_vm_get(chan->vm,
720 old_mem->num_pages << PAGE_SHIFT,
721 page_shift, NV_MEM_ACCESS_RO,
722 &node->tmp_vma);
723 if (ret)
724 goto out;
725 }
726 723
727 if (old_mem->mem_type == TTM_PL_VRAM) 724 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
728 nouveau_vm_map(&node->tmp_vma, node); 725 if (ret)
729 else { 726 goto out;
730 nouveau_vm_map_sg(&node->tmp_vma, 0, 727
731 old_mem->num_pages << PAGE_SHIFT, 728 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
732 node, node->pages); 729 if (ret)
733 } 730 goto out;
734 } 731 }
735 732
736 if (dev_priv->card_type < NV_50) 733 if (dev_priv->card_type < NV_50)
@@ -757,7 +754,6 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
757 bool no_wait_reserve, bool no_wait_gpu, 754 bool no_wait_reserve, bool no_wait_gpu,
758 struct ttm_mem_reg *new_mem) 755 struct ttm_mem_reg *new_mem)
759{ 756{
760 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
761 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 757 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
762 struct ttm_placement placement; 758 struct ttm_placement placement;
763 struct ttm_mem_reg tmp_mem; 759 struct ttm_mem_reg tmp_mem;
@@ -777,23 +773,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
777 if (ret) 773 if (ret)
778 goto out; 774 goto out;
779 775
780 if (dev_priv->card_type >= NV_50) {
781 struct nouveau_bo *nvbo = nouveau_bo(bo);
782 struct nouveau_mem *node = tmp_mem.mm_node;
783 struct nouveau_vma *vma = &nvbo->vma;
784 if (vma->node->type != vma->vm->spg_shift)
785 vma = &node->tmp_vma;
786 nouveau_vm_map_sg(vma, 0, tmp_mem.num_pages << PAGE_SHIFT,
787 node, node->pages);
788 }
789
790 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); 776 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
791
792 if (dev_priv->card_type >= NV_50) {
793 struct nouveau_bo *nvbo = nouveau_bo(bo);
794 nouveau_vm_unmap(&nvbo->vma);
795 }
796
797 if (ret) 777 if (ret)
798 goto out; 778 goto out;
799 779
@@ -846,21 +826,15 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
846 if (!vma->vm) 826 if (!vma->vm)
847 return; 827 return;
848 828
849 switch (new_mem->mem_type) { 829 if (new_mem->mem_type == TTM_PL_VRAM) {
850 case TTM_PL_VRAM: 830 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
851 nouveau_vm_map(vma, node); 831 } else
852 break; 832 if (new_mem->mem_type == TTM_PL_TT &&
853 case TTM_PL_TT: 833 nvbo->page_shift == nvbo->vma.vm->spg_shift) {
854 if (vma->node->type != vma->vm->spg_shift) { 834 nouveau_vm_map_sg(&nvbo->vma, 0, new_mem->
855 nouveau_vm_unmap(vma); 835 num_pages << PAGE_SHIFT, node, node->pages);
856 vma = &node->tmp_vma; 836 } else {
857 }
858 nouveau_vm_map_sg(vma, 0, new_mem->num_pages << PAGE_SHIFT,
859 node, node->pages);
860 break;
861 default:
862 nouveau_vm_unmap(&nvbo->vma); 837 nouveau_vm_unmap(&nvbo->vma);
863 break;
864 } 838 }
865} 839}
866 840
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1439188c3422..d7083d5ffd02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -77,7 +77,7 @@ struct nouveau_mem {
77 struct drm_device *dev; 77 struct drm_device *dev;
78 78
79 struct nouveau_vma bar_vma; 79 struct nouveau_vma bar_vma;
80 struct nouveau_vma tmp_vma; 80 struct nouveau_vma vma[2];
81 u8 page_shift; 81 u8 page_shift;
82 82
83 struct drm_mm_node *tag; 83 struct drm_mm_node *tag;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index f55b51be1bf1..9d9605644175 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -762,20 +762,29 @@ nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
762 return 0; 762 return 0;
763} 763}
764 764
765static inline void
766nouveau_mem_node_cleanup(struct nouveau_mem *node)
767{
768 if (node->vma[0].node) {
769 nouveau_vm_unmap(&node->vma[0]);
770 nouveau_vm_put(&node->vma[0]);
771 }
772
773 if (node->vma[1].node) {
774 nouveau_vm_unmap(&node->vma[1]);
775 nouveau_vm_put(&node->vma[1]);
776 }
777}
778
765static void 779static void
766nouveau_vram_manager_del(struct ttm_mem_type_manager *man, 780nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
767 struct ttm_mem_reg *mem) 781 struct ttm_mem_reg *mem)
768{ 782{
769 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev); 783 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
770 struct nouveau_vram_engine *vram = &dev_priv->engine.vram; 784 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
771 struct nouveau_mem *node = mem->mm_node;
772 struct drm_device *dev = dev_priv->dev; 785 struct drm_device *dev = dev_priv->dev;
773 786
774 if (node->tmp_vma.node) { 787 nouveau_mem_node_cleanup(mem->mm_node);
775 nouveau_vm_unmap(&node->tmp_vma);
776 nouveau_vm_put(&node->tmp_vma);
777 }
778
779 vram->put(dev, (struct nouveau_mem **)&mem->mm_node); 788 vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
780} 789}
781 790
@@ -860,15 +869,9 @@ static void
860nouveau_gart_manager_del(struct ttm_mem_type_manager *man, 869nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
861 struct ttm_mem_reg *mem) 870 struct ttm_mem_reg *mem)
862{ 871{
863 struct nouveau_mem *node = mem->mm_node; 872 nouveau_mem_node_cleanup(mem->mm_node);
864
865 if (node->tmp_vma.node) {
866 nouveau_vm_unmap(&node->tmp_vma);
867 nouveau_vm_put(&node->tmp_vma);
868 }
869
870 mem->mm_node = NULL; 873 mem->mm_node = NULL;
871 kfree(node); 874 kfree(mem->mm_node);
872} 875}
873 876
874static int 877static int
@@ -878,11 +881,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
878 struct ttm_mem_reg *mem) 881 struct ttm_mem_reg *mem)
879{ 882{
880 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 883 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
881 struct nouveau_bo *nvbo = nouveau_bo(bo);
882 struct nouveau_vma *vma = &nvbo->vma;
883 struct nouveau_vm *vm = vma->vm;
884 struct nouveau_mem *node; 884 struct nouveau_mem *node;
885 int ret;
886 885
887 if (unlikely((mem->num_pages << PAGE_SHIFT) >= 886 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
888 dev_priv->gart_info.aper_size)) 887 dev_priv->gart_info.aper_size))
@@ -891,24 +890,8 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
891 node = kzalloc(sizeof(*node), GFP_KERNEL); 890 node = kzalloc(sizeof(*node), GFP_KERNEL);
892 if (!node) 891 if (!node)
893 return -ENOMEM; 892 return -ENOMEM;
893 node->page_shift = 12;
894 894
895 /* This node must be for evicting large-paged VRAM
896 * to system memory. Due to a nv50 limitation of
897 * not being able to mix large/small pages within
898 * the same PDE, we need to create a temporary
899 * small-paged VMA for the eviction.
900 */
901 if (vma->node->type != vm->spg_shift) {
902 ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
903 vm->spg_shift, NV_MEM_ACCESS_RW,
904 &node->tmp_vma);
905 if (ret) {
906 kfree(node);
907 return ret;
908 }
909 }
910
911 node->page_shift = nvbo->vma.node->type;
912 mem->mm_node = node; 895 mem->mm_node = node;
913 mem->start = 0; 896 mem->start = 0;
914 return 0; 897 return 0;