diff options
author | Ben Skeggs <bskeggs@redhat.com> | 2011-06-07 01:25:12 -0400 |
---|---|---|
committer | Ben Skeggs <bskeggs@redhat.com> | 2011-06-23 02:00:42 -0400 |
commit | 2fd3db6f1457050bdebf97e45147ce6827e1742a (patch) | |
tree | 5e1a6ab1ff18497a63364ee9ca6c6d1e8608ebeb | |
parent | 7375c95b343aa575940704a38482a334ea87ac6c (diff) |
drm/nouveau: remove implicit mapping of every bo into chan_vm
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_vm.h | 1 |
4 files changed, 43 insertions, 11 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 49af4072c0f6..890d50e4d682 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -49,7 +49,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
50 | 50 | ||
51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); | 51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
52 | nouveau_bo_vma_del(nvbo, &nvbo->vma); | ||
53 | kfree(nvbo); | 52 | kfree(nvbo); |
54 | } | 53 | } |
55 | 54 | ||
@@ -115,14 +114,6 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, | |||
115 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; | 114 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
116 | nouveau_bo_placement_set(nvbo, flags, 0); | 115 | nouveau_bo_placement_set(nvbo, flags, 0); |
117 | 116 | ||
118 | if (dev_priv->chan_vm) { | ||
119 | ret = nouveau_bo_vma_add(nvbo, dev_priv->chan_vm, &nvbo->vma); | ||
120 | if (ret) { | ||
121 | kfree(nvbo); | ||
122 | return ret; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, | 117 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
127 | ttm_bo_type_device, &nvbo->placement, | 118 | ttm_bo_type_device, &nvbo->placement, |
128 | align >> PAGE_SHIFT, 0, false, NULL, size, | 119 | align >> PAGE_SHIFT, 0, false, NULL, size, |
@@ -1103,6 +1094,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, | |||
1103 | nouveau_vm_map_sg(vma, 0, size, node, node->pages); | 1094 | nouveau_vm_map_sg(vma, 0, size, node, node->pages); |
1104 | 1095 | ||
1105 | list_add_tail(&vma->head, &nvbo->vma_list); | 1096 | list_add_tail(&vma->head, &nvbo->vma_list); |
1097 | vma->refcount = 1; | ||
1106 | return 0; | 1098 | return 0; |
1107 | } | 1099 | } |
1108 | 1100 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index bdb682d613d3..bbea0452dca7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -115,7 +115,6 @@ struct nouveau_bo { | |||
115 | 115 | ||
116 | struct nouveau_channel *channel; | 116 | struct nouveau_channel *channel; |
117 | 117 | ||
118 | struct nouveau_vma vma; | ||
119 | struct list_head vma_list; | 118 | struct list_head vma_list; |
120 | unsigned page_shift; | 119 | unsigned page_shift; |
121 | 120 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 3e1c7010e076..022393777805 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -63,20 +63,60 @@ int | |||
63 | nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) | 63 | nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) |
64 | { | 64 | { |
65 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | 65 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); |
66 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); | ||
67 | struct nouveau_vma *vma; | ||
68 | int ret; | ||
66 | 69 | ||
67 | if (!fpriv->vm) | 70 | if (!fpriv->vm) |
68 | return 0; | 71 | return 0; |
69 | 72 | ||
70 | return 0; | 73 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); |
74 | if (ret) | ||
75 | return ret; | ||
76 | |||
77 | vma = nouveau_bo_vma_find(nvbo, fpriv->vm); | ||
78 | if (!vma) { | ||
79 | vma = kzalloc(sizeof(*vma), GFP_KERNEL); | ||
80 | if (!vma) { | ||
81 | ret = -ENOMEM; | ||
82 | goto out; | ||
83 | } | ||
84 | |||
85 | ret = nouveau_bo_vma_add(nvbo, fpriv->vm, vma); | ||
86 | if (ret) { | ||
87 | kfree(vma); | ||
88 | goto out; | ||
89 | } | ||
90 | } else { | ||
91 | vma->refcount++; | ||
92 | } | ||
93 | |||
94 | out: | ||
95 | ttm_bo_unreserve(&nvbo->bo); | ||
96 | return ret; | ||
71 | } | 97 | } |
72 | 98 | ||
73 | void | 99 | void |
74 | nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) | 100 | nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) |
75 | { | 101 | { |
76 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); | 102 | struct nouveau_fpriv *fpriv = nouveau_fpriv(file_priv); |
103 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); | ||
104 | struct nouveau_vma *vma; | ||
105 | int ret; | ||
77 | 106 | ||
78 | if (!fpriv->vm) | 107 | if (!fpriv->vm) |
79 | return; | 108 | return; |
109 | |||
110 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | ||
111 | if (ret) | ||
112 | return; | ||
113 | |||
114 | vma = nouveau_bo_vma_find(nvbo, fpriv->vm); | ||
115 | if (vma) { | ||
116 | if (--vma->refcount == 0) | ||
117 | nouveau_bo_vma_del(nvbo, vma); | ||
118 | } | ||
119 | ttm_bo_unreserve(&nvbo->bo); | ||
80 | } | 120 | } |
81 | 121 | ||
82 | int | 122 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h index 07d07ff9e28b..579ca8cc223c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vm.h +++ b/drivers/gpu/drm/nouveau/nouveau_vm.h | |||
@@ -42,6 +42,7 @@ struct nouveau_vm_pgd { | |||
42 | 42 | ||
43 | struct nouveau_vma { | 43 | struct nouveau_vma { |
44 | struct list_head head; | 44 | struct list_head head; |
45 | int refcount; | ||
45 | struct nouveau_vm *vm; | 46 | struct nouveau_vm *vm; |
46 | struct nouveau_mm_node *node; | 47 | struct nouveau_mm_node *node; |
47 | u64 offset; | 48 | u64 offset; |