aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAaron Plattner <aplattner@nvidia.com>2013-01-15 15:47:43 -0500
committerDave Airlie <airlied@redhat.com>2013-02-07 22:39:09 -0500
commitab9ccb96a6e6f95bcde6b8b2a524370efdbfdcd6 (patch)
tree132cc3a4707a5a7661dc1fb86d6b7758ab7f0156 /drivers
parent89177644a7b6306e6084a89eab7e290f4bfef397 (diff)
drm/nouveau: use prime helpers
Simplify the Nouveau prime implementation by using the default behavior provided by drm_gem_prime_import and drm_gem_prime_export. v2: Rename functions to nouveau_gem_prime_get_sg_table and nouveau_gem_prime_import_sg_table. Signed-off-by: Aaron Plattner <aplattner@nvidia.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Airlie <airlied@linux.ie> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c173
5 files changed, 34 insertions, 161 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 81d00fe03b56..653dbbbd4fa1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -33,7 +33,6 @@ struct nouveau_bo {
33 int pin_refcnt; 33 int pin_refcnt;
34 34
35 struct ttm_bo_kmap_obj dma_buf_vmap; 35 struct ttm_bo_kmap_obj dma_buf_vmap;
36 int vmapping_count;
37}; 36};
38 37
39static inline struct nouveau_bo * 38static inline struct nouveau_bo *
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8b090f1eb51d..8e8e8ce75528 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -650,8 +650,13 @@ driver = {
650 650
651 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 651 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
652 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 652 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
653 .gem_prime_export = nouveau_gem_prime_export, 653 .gem_prime_export = drm_gem_prime_export,
654 .gem_prime_import = nouveau_gem_prime_import, 654 .gem_prime_import = drm_gem_prime_import,
655 .gem_prime_pin = nouveau_gem_prime_pin,
656 .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
657 .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
658 .gem_prime_vmap = nouveau_gem_prime_vmap,
659 .gem_prime_vunmap = nouveau_gem_prime_vunmap,
655 660
656 .gem_init_object = nouveau_gem_object_new, 661 .gem_init_object = nouveau_gem_object_new,
657 .gem_free_object = nouveau_gem_object_del, 662 .gem_free_object = nouveau_gem_object_del,
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 8bf695c52f95..24e0aabda03c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,8 +24,6 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/dma-buf.h>
28
29#include <subdev/fb.h> 27#include <subdev/fb.h>
30 28
31#include "nouveau_drm.h" 29#include "nouveau_drm.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 5c1049236d22..8d7a3f0aeb86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -35,9 +35,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
35extern int nouveau_gem_ioctl_info(struct drm_device *, void *, 35extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
36 struct drm_file *); 36 struct drm_file *);
37 37
38extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, 38extern int nouveau_gem_prime_pin(struct drm_gem_object *);
39 struct drm_gem_object *obj, int flags); 39extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
40extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev, 40extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
41 struct dma_buf *dma_buf); 41 struct drm_device *, size_t size, struct sg_table *);
42extern void *nouveau_gem_prime_vmap(struct drm_gem_object *);
43extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *);
42 44
43#endif 45#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index b8e05ae38212..f53e10874cae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,126 +22,42 @@
22 * Authors: Dave Airlie 22 * Authors: Dave Airlie
23 */ 23 */
24 24
25#include <linux/dma-buf.h>
26
27#include <drm/drmP.h> 25#include <drm/drmP.h>
28 26
29#include "nouveau_drm.h" 27#include "nouveau_drm.h"
30#include "nouveau_gem.h" 28#include "nouveau_gem.h"
31 29
32static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment, 30struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
33 enum dma_data_direction dir)
34{ 31{
35 struct nouveau_bo *nvbo = attachment->dmabuf->priv; 32 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
36 struct drm_device *dev = nvbo->gem->dev;
37 int npages = nvbo->bo.num_pages; 33 int npages = nvbo->bo.num_pages;
38 struct sg_table *sg;
39 int nents;
40
41 mutex_lock(&dev->struct_mutex);
42 sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
43 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
44 mutex_unlock(&dev->struct_mutex);
45 return sg;
46}
47
48static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
49 struct sg_table *sg, enum dma_data_direction dir)
50{
51 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
52 sg_free_table(sg);
53 kfree(sg);
54}
55
56static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
57{
58 struct nouveau_bo *nvbo = dma_buf->priv;
59
60 if (nvbo->gem->export_dma_buf == dma_buf) {
61 nvbo->gem->export_dma_buf = NULL;
62 drm_gem_object_unreference_unlocked(nvbo->gem);
63 }
64}
65
66static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
67{
68 return NULL;
69}
70
71static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
72{
73 34
74} 35 return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
75static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
76{
77 return NULL;
78} 36}
79 37
80static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 38void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
81{ 39{
82 40 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
83}
84
85static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
86{
87 return -EINVAL;
88}
89
90static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
91{
92 struct nouveau_bo *nvbo = dma_buf->priv;
93 struct drm_device *dev = nvbo->gem->dev;
94 int ret; 41 int ret;
95 42
96 mutex_lock(&dev->struct_mutex);
97 if (nvbo->vmapping_count) {
98 nvbo->vmapping_count++;
99 goto out_unlock;
100 }
101
102 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, 43 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
103 &nvbo->dma_buf_vmap); 44 &nvbo->dma_buf_vmap);
104 if (ret) { 45 if (ret)
105 mutex_unlock(&dev->struct_mutex);
106 return ERR_PTR(ret); 46 return ERR_PTR(ret);
107 } 47
108 nvbo->vmapping_count = 1;
109out_unlock:
110 mutex_unlock(&dev->struct_mutex);
111 return nvbo->dma_buf_vmap.virtual; 48 return nvbo->dma_buf_vmap.virtual;
112} 49}
113 50
114static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) 51void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
115{ 52{
116 struct nouveau_bo *nvbo = dma_buf->priv; 53 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
117 struct drm_device *dev = nvbo->gem->dev;
118 54
119 mutex_lock(&dev->struct_mutex); 55 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
120 nvbo->vmapping_count--;
121 if (nvbo->vmapping_count == 0) {
122 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
123 }
124 mutex_unlock(&dev->struct_mutex);
125} 56}
126 57
127static const struct dma_buf_ops nouveau_dmabuf_ops = { 58struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
128 .map_dma_buf = nouveau_gem_map_dma_buf, 59 size_t size,
129 .unmap_dma_buf = nouveau_gem_unmap_dma_buf, 60 struct sg_table *sg)
130 .release = nouveau_gem_dmabuf_release,
131 .kmap = nouveau_gem_kmap,
132 .kmap_atomic = nouveau_gem_kmap_atomic,
133 .kunmap = nouveau_gem_kunmap,
134 .kunmap_atomic = nouveau_gem_kunmap_atomic,
135 .mmap = nouveau_gem_prime_mmap,
136 .vmap = nouveau_gem_prime_vmap,
137 .vunmap = nouveau_gem_prime_vunmap,
138};
139
140static int
141nouveau_prime_new(struct drm_device *dev,
142 size_t size,
143 struct sg_table *sg,
144 struct nouveau_bo **pnvbo)
145{ 61{
146 struct nouveau_bo *nvbo; 62 struct nouveau_bo *nvbo;
147 u32 flags = 0; 63 u32 flags = 0;
@@ -150,24 +66,22 @@ nouveau_prime_new(struct drm_device *dev,
150 flags = TTM_PL_FLAG_TT; 66 flags = TTM_PL_FLAG_TT;
151 67
152 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0, 68 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
153 sg, pnvbo); 69 sg, &nvbo);
154 if (ret) 70 if (ret)
155 return ret; 71 return ERR_PTR(ret);
156 nvbo = *pnvbo;
157 72
158 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; 73 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
159 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 74 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
160 if (!nvbo->gem) { 75 if (!nvbo->gem) {
161 nouveau_bo_ref(NULL, pnvbo); 76 nouveau_bo_ref(NULL, &nvbo);
162 return -ENOMEM; 77 return ERR_PTR(-ENOMEM);
163 } 78 }
164 79
165 nvbo->gem->driver_private = nvbo; 80 nvbo->gem->driver_private = nvbo;
166 return 0; 81 return nvbo->gem;
167} 82}
168 83
169struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, 84int nouveau_gem_prime_pin(struct drm_gem_object *obj)
170 struct drm_gem_object *obj, int flags)
171{ 85{
172 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 86 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
173 int ret = 0; 87 int ret = 0;
@@ -175,52 +89,7 @@ struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
175 /* pin buffer into GTT */ 89 /* pin buffer into GTT */
176 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); 90 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
177 if (ret) 91 if (ret)
178 return ERR_PTR(-EINVAL); 92 return -EINVAL;
179
180 return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
181}
182
183struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
184 struct dma_buf *dma_buf)
185{
186 struct dma_buf_attachment *attach;
187 struct sg_table *sg;
188 struct nouveau_bo *nvbo;
189 int ret;
190
191 if (dma_buf->ops == &nouveau_dmabuf_ops) {
192 nvbo = dma_buf->priv;
193 if (nvbo->gem) {
194 if (nvbo->gem->dev == dev) {
195 drm_gem_object_reference(nvbo->gem);
196 dma_buf_put(dma_buf);
197 return nvbo->gem;
198 }
199 }
200 }
201 /* need to attach */
202 attach = dma_buf_attach(dma_buf, dev->dev);
203 if (IS_ERR(attach))
204 return ERR_PTR(PTR_ERR(attach));
205
206 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
207 if (IS_ERR(sg)) {
208 ret = PTR_ERR(sg);
209 goto fail_detach;
210 }
211
212 ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
213 if (ret)
214 goto fail_unmap;
215
216 nvbo->gem->import_attach = attach;
217
218 return nvbo->gem;
219 93
220fail_unmap: 94 return 0;
221 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
222fail_detach:
223 dma_buf_detach(dma_buf, attach);
224 return ERR_PTR(ret);
225} 95}
226