aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Skeggs <bskeggs@redhat.com>2011-06-06 00:07:04 -0400
committerBen Skeggs <bskeggs@redhat.com>2011-06-23 01:59:53 -0400
commitfd2871af3d2dad4e07df84941128b0813b5dd34b (patch)
treeda48aa06eb69e791d78ba10ddb6fdc142e223c44
parenta3fcd0a975c4ae272c3e5db0632479633cef19ef (diff)
drm/nouveau: initial changes to support multiple VMAs per buffer object
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h1
3 files changed, 80 insertions, 26 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index ae1f0e46e481..36f3137b3ae2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,10 +49,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 50
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 if (nvbo->vma.node) { 52 nouveau_bo_vma_del(nvbo, &nvbo->vma);
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
55 }
56 kfree(nvbo); 53 kfree(nvbo);
57} 54}
58 55
@@ -103,6 +100,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
103 return -ENOMEM; 100 return -ENOMEM;
104 INIT_LIST_HEAD(&nvbo->head); 101 INIT_LIST_HEAD(&nvbo->head);
105 INIT_LIST_HEAD(&nvbo->entry); 102 INIT_LIST_HEAD(&nvbo->entry);
103 INIT_LIST_HEAD(&nvbo->vma_list);
106 nvbo->tile_mode = tile_mode; 104 nvbo->tile_mode = tile_mode;
107 nvbo->tile_flags = tile_flags; 105 nvbo->tile_flags = tile_flags;
108 nvbo->bo.bdev = &dev_priv->ttm.bdev; 106 nvbo->bo.bdev = &dev_priv->ttm.bdev;
@@ -114,24 +112,22 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
114 } 112 }
115 113
116 nouveau_bo_fixup_align(nvbo, flags, &align, &size); 114 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
117 align >>= PAGE_SHIFT; 115 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
116 nouveau_bo_placement_set(nvbo, flags, 0);
118 117
119 if (dev_priv->chan_vm) { 118 if (dev_priv->chan_vm) {
120 ret = nouveau_vm_get(dev_priv->chan_vm, size, nvbo->page_shift, 119 ret = nouveau_bo_vma_add(nvbo, dev_priv->chan_vm, &nvbo->vma);
121 NV_MEM_ACCESS_RW, &nvbo->vma);
122 if (ret) { 120 if (ret) {
123 kfree(nvbo); 121 kfree(nvbo);
124 return ret; 122 return ret;
125 } 123 }
126 } 124 }
127 125
128 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
129 nouveau_bo_placement_set(nvbo, flags, 0);
130
131 nvbo->channel = chan; 126 nvbo->channel = chan;
132 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 127 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
133 ttm_bo_type_device, &nvbo->placement, align, 0, 128 ttm_bo_type_device, &nvbo->placement,
134 false, NULL, size, nouveau_bo_del_ttm); 129 align >> PAGE_SHIFT, 0, false, NULL, size,
130 nouveau_bo_del_ttm);
135 if (ret) { 131 if (ret) {
136 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 132 /* ttm will call nouveau_bo_del_ttm if it fails.. */
137 return ret; 133 return ret;
@@ -818,20 +814,20 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
818{ 814{
819 struct nouveau_mem *node = new_mem->mm_node; 815 struct nouveau_mem *node = new_mem->mm_node;
820 struct nouveau_bo *nvbo = nouveau_bo(bo); 816 struct nouveau_bo *nvbo = nouveau_bo(bo);
821 struct nouveau_vma *vma = &nvbo->vma; 817 struct nouveau_vma *vma;
822 818
823 if (!vma->vm) 819 list_for_each_entry(vma, &nvbo->vma_list, head) {
824 return; 820 if (new_mem->mem_type == TTM_PL_VRAM) {
825 821 nouveau_vm_map(vma, new_mem->mm_node);
826 if (new_mem->mem_type == TTM_PL_VRAM) { 822 } else
827 nouveau_vm_map(&nvbo->vma, new_mem->mm_node); 823 if (new_mem->mem_type == TTM_PL_TT &&
828 } else 824 nvbo->page_shift == vma->vm->spg_shift) {
829 if (new_mem->mem_type == TTM_PL_TT && 825 nouveau_vm_map_sg(vma, 0, new_mem->
830 nvbo->page_shift == nvbo->vma.vm->spg_shift) { 826 num_pages << PAGE_SHIFT,
831 nouveau_vm_map_sg(&nvbo->vma, 0, new_mem-> 827 node, node->pages);
832 num_pages << PAGE_SHIFT, node, node->pages); 828 } else {
833 } else { 829 nouveau_vm_unmap(vma);
834 nouveau_vm_unmap(&nvbo->vma); 830 }
835 } 831 }
836} 832}
837 833
@@ -1077,3 +1073,53 @@ struct ttm_bo_driver nouveau_bo_driver = {
1077 .io_mem_free = &nouveau_ttm_io_mem_free, 1073 .io_mem_free = &nouveau_ttm_io_mem_free,
1078}; 1074};
1079 1075
1076struct nouveau_vma *
1077nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1078{
1079 struct nouveau_vma *vma;
1080 list_for_each_entry(vma, &nvbo->vma_list, head) {
1081 if (vma->vm == vm)
1082 return vma;
1083 }
1084
1085 return NULL;
1086}
1087
1088int
1089nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1090 struct nouveau_vma *vma)
1091{
1092 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1093 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1094 int ret;
1095
1096 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1097 NV_MEM_ACCESS_RW, vma);
1098 if (ret)
1099 return ret;
1100
1101 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1102 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1103 else
1104 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1105 nouveau_vm_map_sg(vma, 0, size, node, node->pages);
1106
1107 list_add_tail(&vma->head, &nvbo->vma_list);
1108 return 0;
1109}
1110
1111void
1112nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1113{
1114 if (vma->node) {
1115 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1116 spin_lock(&nvbo->bo.bdev->fence_lock);
1117 ttm_bo_wait(&nvbo->bo, false, false, false);
1118 spin_unlock(&nvbo->bo.bdev->fence_lock);
1119 nouveau_vm_unmap(vma);
1120 }
1121
1122 nouveau_vm_put(vma);
1123 list_del(&vma->head);
1124 }
1125}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d7083d5ffd02..23be8cb8ff54 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -116,6 +116,7 @@ struct nouveau_bo {
116 struct nouveau_channel *channel; 116 struct nouveau_channel *channel;
117 117
118 struct nouveau_vma vma; 118 struct nouveau_vma vma;
119 struct list_head vma_list;
119 unsigned page_shift; 120 unsigned page_shift;
120 121
121 uint32_t tile_mode; 122 uint32_t tile_mode;
@@ -1283,6 +1284,12 @@ extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
1283extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 1284extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
1284 bool no_wait_reserve, bool no_wait_gpu); 1285 bool no_wait_reserve, bool no_wait_gpu);
1285 1286
1287extern struct nouveau_vma *
1288nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
1289extern int nouveau_bo_vma_add(struct nouveau_bo *, struct nouveau_vm *,
1290 struct nouveau_vma *);
1291extern void nouveau_bo_vma_del(struct nouveau_bo *, struct nouveau_vma *);
1292
1286/* nouveau_fence.c */ 1293/* nouveau_fence.c */
1287struct nouveau_fence; 1294struct nouveau_fence;
1288extern int nouveau_fence_init(struct drm_device *); 1295extern int nouveau_fence_init(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index c48a9fc2b47b..07d07ff9e28b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -41,6 +41,7 @@ struct nouveau_vm_pgd {
41}; 41};
42 42
43struct nouveau_vma { 43struct nouveau_vma {
44 struct list_head head;
44 struct nouveau_vm *vm; 45 struct nouveau_vm *vm;
45 struct nouveau_mm_node *node; 46 struct nouveau_mm_node *node;
46 u64 offset; 47 u64 offset;