aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_dmabuf.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-09-04 16:02:58 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-09-20 08:23:10 -0400
commit2f745ad3d3ce96cb72d3981570d9e9988442bce8 (patch)
treec885743dde9b8c1862490e97ff8f086ad4055a30 /drivers/gpu/drm/i915/i915_gem_dmabuf.c
parent68d3472047a572936551f8ff0b6f4016c5a1fdef (diff)
drm/i915: Convert the dmabuf object to use the new i915_gem_object_ops
By providing a callback for when we need to bind the pages, and then release them again later, we can shorten the amount of time we hold the foreign pages mapped and pinned, and importantly the dmabuf objects then behave as any other normal object with respect to the shrinker and memory management. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_dmabuf.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c44
1 files changed, 30 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 4bb1b94df5c4..ca3497e1108c 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -82,7 +82,8 @@ out:
82} 82}
83 83
84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 struct sg_table *sg, enum dma_data_direction dir) 85 struct sg_table *sg,
86 enum dma_data_direction dir)
86{ 87{
87 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
88 sg_free_table(sg); 89 sg_free_table(sg);
@@ -228,11 +229,35 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
228 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600); 229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
229} 230}
230 231
232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
233{
234 struct sg_table *sg;
235
236 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
237 if (IS_ERR(sg))
238 return PTR_ERR(sg);
239
240 obj->pages = sg;
241 obj->has_dma_mapping = true;
242 return 0;
243}
244
245static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
246{
247 dma_buf_unmap_attachment(obj->base.import_attach,
248 obj->pages, DMA_BIDIRECTIONAL);
249 obj->has_dma_mapping = false;
250}
251
252static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
253 .get_pages = i915_gem_object_get_pages_dmabuf,
254 .put_pages = i915_gem_object_put_pages_dmabuf,
255};
256
231struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 257struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
232 struct dma_buf *dma_buf) 258 struct dma_buf *dma_buf)
233{ 259{
234 struct dma_buf_attachment *attach; 260 struct dma_buf_attachment *attach;
235 struct sg_table *sg;
236 struct drm_i915_gem_object *obj; 261 struct drm_i915_gem_object *obj;
237 int ret; 262 int ret;
238 263
@@ -251,34 +276,25 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
251 if (IS_ERR(attach)) 276 if (IS_ERR(attach))
252 return ERR_CAST(attach); 277 return ERR_CAST(attach);
253 278
254 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
255 if (IS_ERR(sg)) {
256 ret = PTR_ERR(sg);
257 goto fail_detach;
258 }
259 279
260 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 280 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
261 if (obj == NULL) { 281 if (obj == NULL) {
262 ret = -ENOMEM; 282 ret = -ENOMEM;
263 goto fail_unmap; 283 goto fail_detach;
264 } 284 }
265 285
266 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 286 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
267 if (ret) { 287 if (ret) {
268 kfree(obj); 288 kfree(obj);
269 goto fail_unmap; 289 goto fail_detach;
270 } 290 }
271 291
272 obj->has_dma_mapping = true; 292 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
273 obj->sg_table = sg;
274 obj->base.import_attach = attach; 293 obj->base.import_attach = attach;
275 294
276 return &obj->base; 295 return &obj->base;
277 296
278fail_unmap:
279 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
280fail_detach: 297fail_detach:
281 dma_buf_detach(dma_buf, attach); 298 dma_buf_detach(dma_buf, attach);
282 return ERR_PTR(ret); 299 return ERR_PTR(ret);
283} 300}
284