aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-09-04 16:02:58 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-09-20 08:23:10 -0400
commit2f745ad3d3ce96cb72d3981570d9e9988442bce8 (patch)
treec885743dde9b8c1862490e97ff8f086ad4055a30 /drivers/gpu
parent68d3472047a572936551f8ff0b6f4016c5a1fdef (diff)
drm/i915: Convert the dmabuf object to use the new i915_gem_object_ops
By providing a callback for when we need to bind the pages, and then release them again later, we can shorten the amount of time we hold the foreign pages mapped and pinned, and importantly the dmabuf objects then behave as any other normal object with respect to the shrinker and memory management. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
4 files changed, 37 insertions, 22 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a04c567ade63..af5ceb46e7bd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1012,7 +1012,6 @@ struct drm_i915_gem_object {
1012 int pages_pin_count; 1012 int pages_pin_count;
1013 1013
1014 /* prime dma-buf support */ 1014 /* prime dma-buf support */
1015 struct sg_table *sg_table;
1016 void *dma_buf_vmapping; 1015 void *dma_buf_vmapping;
1017 int vmapping_count; 1016 int vmapping_count;
1018 1017
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8f001fa155a1..61c3640188bb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1692,7 +1692,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1692{ 1692{
1693 const struct drm_i915_gem_object_ops *ops = obj->ops; 1693 const struct drm_i915_gem_object_ops *ops = obj->ops;
1694 1694
1695 if (obj->sg_table || obj->pages == NULL) 1695 if (obj->pages == NULL)
1696 return 0; 1696 return 0;
1697 1697
1698 BUG_ON(obj->gtt_space); 1698 BUG_ON(obj->gtt_space);
@@ -1845,7 +1845,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1845 const struct drm_i915_gem_object_ops *ops = obj->ops; 1845 const struct drm_i915_gem_object_ops *ops = obj->ops;
1846 int ret; 1846 int ret;
1847 1847
1848 if (obj->sg_table || obj->pages) 1848 if (obj->pages)
1849 return 0; 1849 return 0;
1850 1850
1851 BUG_ON(obj->pages_pin_count); 1851 BUG_ON(obj->pages_pin_count);
@@ -3738,9 +3738,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3738 3738
3739 trace_i915_gem_object_destroy(obj); 3739 trace_i915_gem_object_destroy(obj);
3740 3740
3741 if (gem_obj->import_attach)
3742 drm_prime_gem_destroy(gem_obj, obj->sg_table);
3743
3744 if (obj->phys_obj) 3741 if (obj->phys_obj)
3745 i915_gem_detach_phys_object(dev, obj); 3742 i915_gem_detach_phys_object(dev, obj);
3746 3743
@@ -3762,6 +3759,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3762 3759
3763 BUG_ON(obj->pages); 3760 BUG_ON(obj->pages);
3764 3761
3762 if (obj->base.import_attach)
3763 drm_prime_gem_destroy(&obj->base, NULL);
3764
3765 drm_gem_object_release(&obj->base); 3765 drm_gem_object_release(&obj->base);
3766 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3766 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3767 3767
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 4bb1b94df5c4..ca3497e1108c 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -82,7 +82,8 @@ out:
82} 82}
83 83
84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, 84static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 struct sg_table *sg, enum dma_data_direction dir) 85 struct sg_table *sg,
86 enum dma_data_direction dir)
86{ 87{
87 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); 88 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
88 sg_free_table(sg); 89 sg_free_table(sg);
@@ -228,11 +229,35 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
228 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600); 229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600);
229} 230}
230 231
232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
233{
234 struct sg_table *sg;
235
236 sg = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL);
237 if (IS_ERR(sg))
238 return PTR_ERR(sg);
239
240 obj->pages = sg;
241 obj->has_dma_mapping = true;
242 return 0;
243}
244
245static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj)
246{
247 dma_buf_unmap_attachment(obj->base.import_attach,
248 obj->pages, DMA_BIDIRECTIONAL);
249 obj->has_dma_mapping = false;
250}
251
252static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
253 .get_pages = i915_gem_object_get_pages_dmabuf,
254 .put_pages = i915_gem_object_put_pages_dmabuf,
255};
256
231struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, 257struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
232 struct dma_buf *dma_buf) 258 struct dma_buf *dma_buf)
233{ 259{
234 struct dma_buf_attachment *attach; 260 struct dma_buf_attachment *attach;
235 struct sg_table *sg;
236 struct drm_i915_gem_object *obj; 261 struct drm_i915_gem_object *obj;
237 int ret; 262 int ret;
238 263
@@ -251,34 +276,25 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
251 if (IS_ERR(attach)) 276 if (IS_ERR(attach))
252 return ERR_CAST(attach); 277 return ERR_CAST(attach);
253 278
254 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
255 if (IS_ERR(sg)) {
256 ret = PTR_ERR(sg);
257 goto fail_detach;
258 }
259 279
260 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 280 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
261 if (obj == NULL) { 281 if (obj == NULL) {
262 ret = -ENOMEM; 282 ret = -ENOMEM;
263 goto fail_unmap; 283 goto fail_detach;
264 } 284 }
265 285
266 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 286 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
267 if (ret) { 287 if (ret) {
268 kfree(obj); 288 kfree(obj);
269 goto fail_unmap; 289 goto fail_detach;
270 } 290 }
271 291
272 obj->has_dma_mapping = true; 292 i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
273 obj->sg_table = sg;
274 obj->base.import_attach = attach; 293 obj->base.import_attach = attach;
275 294
276 return &obj->base; 295 return &obj->base;
277 296
278fail_unmap:
279 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
280fail_detach: 297fail_detach:
281 dma_buf_detach(dma_buf, attach); 298 dma_buf_detach(dma_buf, attach);
282 return ERR_PTR(ret); 299 return ERR_PTR(ret);
283} 300}
284
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e0c9bddb7d92..1b1bc0025339 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -234,7 +234,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
234 } 234 }
235 235
236 i915_ppgtt_insert_sg_entries(ppgtt, 236 i915_ppgtt_insert_sg_entries(ppgtt,
237 obj->sg_table ?: obj->pages, 237 obj->pages,
238 obj->gtt_space->start >> PAGE_SHIFT, 238 obj->gtt_space->start >> PAGE_SHIFT,
239 pte_flags); 239 pte_flags);
240} 240}
@@ -325,7 +325,7 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
325 struct drm_device *dev = obj->base.dev; 325 struct drm_device *dev = obj->base.dev;
326 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 326 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
327 327
328 intel_gtt_insert_sg_entries(obj->sg_table ?: obj->pages, 328 intel_gtt_insert_sg_entries(obj->pages,
329 obj->gtt_space->start >> PAGE_SHIFT, 329 obj->gtt_space->start >> PAGE_SHIFT,
330 agp_type); 330 agp_type);
331 obj->has_global_gtt_mapping = 1; 331 obj->has_global_gtt_mapping = 1;