aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-08-08 03:10:38 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-08-22 07:31:49 -0400
commit608806a549c656c925eeb253cbed768535f26e41 (patch)
tree7f5ce2a18024022008a19b43d676529eb0b475e7
parentf214266c0d147c0a2608caafc43c832f1738f0a9 (diff)
drm/i915: explicit store base gem object in dma_buf->priv
Makes it more obviously correct what tricks we play by reusing the drm prime release helper. Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 9e6578330801..938eb341054c 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,10 +27,15 @@
27#include "i915_drv.h" 27#include "i915_drv.h"
28#include <linux/dma-buf.h> 28#include <linux/dma-buf.h>
29 29
30static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
31{
32 return to_intel_bo(buf->priv);
33}
34
30static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment, 35static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31 enum dma_data_direction dir) 36 enum dma_data_direction dir)
32{ 37{
33 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 38 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
34 struct sg_table *st; 39 struct sg_table *st;
35 struct scatterlist *src, *dst; 40 struct scatterlist *src, *dst;
36 int ret, i; 41 int ret, i;
@@ -85,7 +90,7 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
85 struct sg_table *sg, 90 struct sg_table *sg,
86 enum dma_data_direction dir) 91 enum dma_data_direction dir)
87{ 92{
88 struct drm_i915_gem_object *obj = attachment->dmabuf->priv; 93 struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
89 94
90 mutex_lock(&obj->base.dev->struct_mutex); 95 mutex_lock(&obj->base.dev->struct_mutex);
91 96
@@ -111,7 +116,7 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
111 116
112static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) 117static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
113{ 118{
114 struct drm_i915_gem_object *obj = dma_buf->priv; 119 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
115 struct drm_device *dev = obj->base.dev; 120 struct drm_device *dev = obj->base.dev;
116 struct sg_page_iter sg_iter; 121 struct sg_page_iter sg_iter;
117 struct page **pages; 122 struct page **pages;
@@ -159,7 +164,7 @@ error:
159 164
160static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) 165static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
161{ 166{
162 struct drm_i915_gem_object *obj = dma_buf->priv; 167 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
163 struct drm_device *dev = obj->base.dev; 168 struct drm_device *dev = obj->base.dev;
164 int ret; 169 int ret;
165 170
@@ -202,7 +207,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
202 207
203static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction) 208static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
204{ 209{
205 struct drm_i915_gem_object *obj = dma_buf->priv; 210 struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
206 struct drm_device *dev = obj->base.dev; 211 struct drm_device *dev = obj->base.dev;
207 int ret; 212 int ret;
208 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE); 213 bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
@@ -233,9 +238,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
233struct dma_buf *i915_gem_prime_export(struct drm_device *dev, 238struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
234 struct drm_gem_object *gem_obj, int flags) 239 struct drm_gem_object *gem_obj, int flags)
235{ 240{
236 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 241 return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
237
238 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
239} 242}
240 243
241static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 244static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -272,7 +275,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
272 275
273 /* is this one of own objects? */ 276 /* is this one of own objects? */
274 if (dma_buf->ops == &i915_dmabuf_ops) { 277 if (dma_buf->ops == &i915_dmabuf_ops) {
275 obj = dma_buf->priv; 278 obj = dma_buf_to_obj(dma_buf);
276 /* is it from our device? */ 279 /* is it from our device? */
277 if (obj->base.dev == dev) { 280 if (obj->base.dev == dev) {
278 /* 281 /*