aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/nouveau/nouveau_mem.c
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-02-09 21:59:51 -0500
committerBen Skeggs <bskeggs@redhat.com>2011-02-24 15:46:01 -0500
commit26c0c9e33a2eb44b345d22d5928d5c8b7b261226 (patch)
treed15305e77bfc4547a36cfa9755aeeffb15dd59ce /drivers/gpu/drm/nouveau/nouveau_mem.c
parentd5f423947a11103c43ad26ebb680d049c2d8edd6 (diff)
drm/nv50-nvc0: delay GART binding until move_notify time
The immediate benefit of doing this is that on NV50 and up, the GPU virtual address of any buffer is now constant, regardless of what memtype they're placed in. Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c81
1 files changed, 81 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index ff5fe28b467b..73f37bd0adfa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -785,3 +785,84 @@ const struct ttm_mem_type_manager_func nouveau_vram_manager = {
785 nouveau_vram_manager_del, 785 nouveau_vram_manager_del,
786 nouveau_vram_manager_debug 786 nouveau_vram_manager_debug
787}; 787};
788
789static int
790nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
791{
792 return 0;
793}
794
795static int
796nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
797{
798 return 0;
799}
800
801static void
802nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
803 struct ttm_mem_reg *mem)
804{
805 struct nouveau_mem *node = mem->mm_node;
806
807 if (node->tmp_vma.node) {
808 nouveau_vm_unmap(&node->tmp_vma);
809 nouveau_vm_put(&node->tmp_vma);
810 }
811 mem->mm_node = NULL;
812}
813
814static int
815nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
816 struct ttm_buffer_object *bo,
817 struct ttm_placement *placement,
818 struct ttm_mem_reg *mem)
819{
820 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
821 struct nouveau_bo *nvbo = nouveau_bo(bo);
822 struct nouveau_vma *vma = &nvbo->vma;
823 struct nouveau_vm *vm = vma->vm;
824 struct nouveau_mem *node;
825 int ret;
826
827 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
828 dev_priv->gart_info.aper_size))
829 return -ENOMEM;
830
831 node = kzalloc(sizeof(*node), GFP_KERNEL);
832 if (!node)
833 return -ENOMEM;
834
835 /* This node must be for evicting large-paged VRAM
836 * to system memory. Due to a nv50 limitation of
837 * not being able to mix large/small pages within
838 * the same PDE, we need to create a temporary
839 * small-paged VMA for the eviction.
840 */
841 if (vma->node->type != vm->spg_shift) {
842 ret = nouveau_vm_get(vm, (u64)vma->node->length << 12,
843 vm->spg_shift, NV_MEM_ACCESS_RW,
844 &node->tmp_vma);
845 if (ret) {
846 kfree(node);
847 return ret;
848 }
849 }
850
851 node->page_shift = nvbo->vma.node->type;
852 mem->mm_node = node;
853 mem->start = 0;
854 return 0;
855}
856
857void
858nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
859{
860}
861
862const struct ttm_mem_type_manager_func nouveau_gart_manager = {
863 nouveau_gart_manager_init,
864 nouveau_gart_manager_fini,
865 nouveau_gart_manager_new,
866 nouveau_gart_manager_del,
867 nouveau_gart_manager_debug
868};