aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_dmabuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_dmabuf.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c99
1 files changed, 66 insertions, 33 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index eca4726f414d..4bb1b94df5c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -28,33 +28,57 @@
28#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29 29
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir) 31 enum dma_data_direction dir)
32{ 32{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34 struct drm_device *dev = obj->base.dev; 34 struct sg_table *st;
35 int npages = obj->base.size / PAGE_SIZE; 35 struct scatterlist *src, *dst;
36 struct sg_table *sg; 36 int ret, i;
37 int ret;
38 int nents;
39 37
40 ret = i915_mutex_lock_interruptible(dev); 38 ret = i915_mutex_lock_interruptible(obj->base.dev);
41 if (ret) 39 if (ret)
42 return ERR_PTR(ret); 40 return ERR_PTR(ret);
43 41
44 ret = i915_gem_object_get_pages(obj); 42 ret = i915_gem_object_get_pages(obj);
45 if (ret) { 43 if (ret) {
46 sg = ERR_PTR(ret); 44 st = ERR_PTR(ret);
45 goto out;
46 }
47
48 /* Copy sg so that we make an independent mapping */
49 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
50 if (st == NULL) {
51 st = ERR_PTR(-ENOMEM);
52 goto out;
53 }
54
55 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
56 if (ret) {
57 kfree(st);
58 st = ERR_PTR(ret);
59 goto out;
60 }
61
62 src = obj->pages->sgl;
63 dst = st->sgl;
64 for (i = 0; i < obj->pages->nents; i++) {
65 sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
66 dst = sg_next(dst);
67 src = sg_next(src);
68 }
69
70 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
71 sg_free_table(st);
72 kfree(st);
73 st = ERR_PTR(-ENOMEM);
47 goto out; 74 goto out;
48 } 75 }
49 76
50 /* link the pages into an SG then map the sg */
51 sg = drm_prime_pages_to_sg(obj->pages, npages);
52 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
53 i915_gem_object_pin_pages(obj); 77 i915_gem_object_pin_pages(obj);
54 78
55out: 79out:
56 mutex_unlock(&dev->struct_mutex); 80 mutex_unlock(&obj->base.dev->struct_mutex);
57 return sg; 81 return st;
58} 82}
59 83
60static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
@@ -80,7 +104,9 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
80{ 104{
81 struct drm_i915_gem_object *obj = dma_buf->priv; 105 struct drm_i915_gem_object *obj = dma_buf->priv;
82 struct drm_device *dev = obj->base.dev; 106 struct drm_device *dev = obj->base.dev;
83 int ret; 107 struct scatterlist *sg;
108 struct page **pages;
109 int ret, i;
84 110
85 ret = i915_mutex_lock_interruptible(dev); 111 ret = i915_mutex_lock_interruptible(dev);
86 if (ret) 112 if (ret)
@@ -92,22 +118,33 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
92 } 118 }
93 119
94 ret = i915_gem_object_get_pages(obj); 120 ret = i915_gem_object_get_pages(obj);
95 if (ret) { 121 if (ret)
96 mutex_unlock(&dev->struct_mutex); 122 goto error;
97 return ERR_PTR(ret);
98 }
99 123
100 obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); 124 ret = -ENOMEM;
101 if (!obj->dma_buf_vmapping) { 125
102 DRM_ERROR("failed to vmap object\n"); 126 pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
103 goto out_unlock; 127 if (pages == NULL)
104 } 128 goto error;
129
130 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
131 pages[i] = sg_page(sg);
132
133 obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
134 drm_free_large(pages);
135
136 if (!obj->dma_buf_vmapping)
137 goto error;
105 138
106 obj->vmapping_count = 1; 139 obj->vmapping_count = 1;
107 i915_gem_object_pin_pages(obj); 140 i915_gem_object_pin_pages(obj);
108out_unlock: 141out_unlock:
109 mutex_unlock(&dev->struct_mutex); 142 mutex_unlock(&dev->struct_mutex);
110 return obj->dma_buf_vmapping; 143 return obj->dma_buf_vmapping;
144
145error:
146 mutex_unlock(&dev->struct_mutex);
147 return ERR_PTR(ret);
111} 148}
112 149
113static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 150static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
@@ -184,22 +221,19 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
184}; 221};
185 222
186struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 223struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
187 struct drm_gem_object *gem_obj, int flags) 224 struct drm_gem_object *gem_obj, int flags)
188{ 225{
189 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 226 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
190 227
191 return dma_buf_export(obj, &i915_dmabuf_ops, 228 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
192 obj->base.size, 0600);
193} 229}
194 230
195struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 231struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
196 struct dma_buf *dma_buf) 232 struct dma_buf *dma_buf)
197{ 233{
198 struct dma_buf_attachment *attach; 234 struct dma_buf_attachment *attach;
199 struct sg_table *sg; 235 struct sg_table *sg;
200 struct drm_i915_gem_object *obj; 236 struct drm_i915_gem_object *obj;
201 int npages;
202 int size;
203 int ret; 237 int ret;
204 238
205 /* is this one of own objects? */ 239 /* is this one of own objects? */
@@ -223,21 +257,19 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
223 goto fail_detach; 257 goto fail_detach;
224 } 258 }
225 259
226 size = dma_buf->size;
227 npages = size / PAGE_SIZE;
228
229 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 260 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
230 if (obj == NULL) { 261 if (obj == NULL) {
231 ret = -ENOMEM; 262 ret = -ENOMEM;
232 goto fail_unmap; 263 goto fail_unmap;
233 } 264 }
234 265
235 ret = drm_gem_private_object_init(dev, &obj->base, size); 266 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
236 if (ret) { 267 if (ret) {
237 kfree(obj); 268 kfree(obj);
238 goto fail_unmap; 269 goto fail_unmap;
239 } 270 }
240 271
272 obj->has_dma_mapping = true;
241 obj->sg_table = sg; 273 obj->sg_table = sg;
242 obj->base.import_attach = attach; 274 obj->base.import_attach = attach;
243 275
@@ -249,3 +281,4 @@ fail_detach:
249 dma_buf_detach(dma_buf, attach); 281 dma_buf_detach(dma_buf, attach);
250 return ERR_PTR(ret); 282 return ERR_PTR(ret);
251} 283}
284