aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_prime.c
diff options
context:
space:
mode:
authorJoonyoung Shim <jy0922.shim@samsung.com>2013-06-19 02:03:05 -0400
committerDave Airlie <airlied@redhat.com>2013-06-27 22:47:32 -0400
commit538d6661f5d8ad9dcf4ab66c9a99407464111e7a (patch)
treefd0ba59b8a4fdb2b05aeba16a1ac9c430f2db638 /drivers/gpu/drm/drm_prime.c
parent7e3d88f9cce3ea3350fa25b89393a6dd2b8e5ed4 (diff)
drm/prime: support to cache mapping
The drm prime also can support it like GEM CMA supports to cache mapping. It doesn't allow multiple mappings for one attachment. [airlied: rebased on top of other prime changes] Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/drm_prime.c')
-rw-r--r--drivers/gpu/drm/drm_prime.c52
1 files changed, 49 insertions, 3 deletions
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 4ad2c45090b3..b1cd47438850 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -62,15 +62,29 @@ struct drm_prime_member {
62 struct dma_buf *dma_buf; 62 struct dma_buf *dma_buf;
63 uint32_t handle; 63 uint32_t handle;
64}; 64};
65
66struct drm_prime_attachment {
67 struct sg_table *sgt;
68 enum dma_data_direction dir;
69};
70
65static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); 71static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
66 72
67static int drm_gem_map_attach(struct dma_buf *dma_buf, 73static int drm_gem_map_attach(struct dma_buf *dma_buf,
68 struct device *target_dev, 74 struct device *target_dev,
69 struct dma_buf_attachment *attach) 75 struct dma_buf_attachment *attach)
70{ 76{
77 struct drm_prime_attachment *prime_attach;
71 struct drm_gem_object *obj = dma_buf->priv; 78 struct drm_gem_object *obj = dma_buf->priv;
72 struct drm_device *dev = obj->dev; 79 struct drm_device *dev = obj->dev;
73 80
81 prime_attach = kzalloc(sizeof(*prime_attach), GFP_KERNEL);
82 if (!prime_attach)
83 return -ENOMEM;
84
85 prime_attach->dir = DMA_NONE;
86 attach->priv = prime_attach;
87
74 if (!dev->driver->gem_prime_pin) 88 if (!dev->driver->gem_prime_pin)
75 return 0; 89 return 0;
76 90
@@ -80,19 +94,50 @@ static int drm_gem_map_attach(struct dma_buf *dma_buf,
80static void drm_gem_map_detach(struct dma_buf *dma_buf, 94static void drm_gem_map_detach(struct dma_buf *dma_buf,
81 struct dma_buf_attachment *attach) 95 struct dma_buf_attachment *attach)
82{ 96{
97 struct drm_prime_attachment *prime_attach = attach->priv;
83 struct drm_gem_object *obj = dma_buf->priv; 98 struct drm_gem_object *obj = dma_buf->priv;
84 struct drm_device *dev = obj->dev; 99 struct drm_device *dev = obj->dev;
100 struct sg_table *sgt;
85 101
86 if (dev->driver->gem_prime_unpin) 102 if (dev->driver->gem_prime_unpin)
87 dev->driver->gem_prime_unpin(obj); 103 dev->driver->gem_prime_unpin(obj);
104
105 if (!prime_attach)
106 return;
107
108 sgt = prime_attach->sgt;
109
110 if (prime_attach->dir != DMA_NONE)
111 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
112 prime_attach->dir);
113
114 sg_free_table(sgt);
115 kfree(sgt);
116 kfree(prime_attach);
117 attach->priv = NULL;
88} 118}
89 119
90static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, 120static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
91 enum dma_data_direction dir) 121 enum dma_data_direction dir)
92{ 122{
123 struct drm_prime_attachment *prime_attach = attach->priv;
93 struct drm_gem_object *obj = attach->dmabuf->priv; 124 struct drm_gem_object *obj = attach->dmabuf->priv;
94 struct sg_table *sgt; 125 struct sg_table *sgt;
95 126
127 if (WARN_ON(dir == DMA_NONE || !prime_attach))
128 return ERR_PTR(-EINVAL);
129
130 /* return the cached mapping when possible */
131 if (prime_attach->dir == dir)
132 return prime_attach->sgt;
133
134 /*
135 * two mappings with different directions for the same attachment are
136 * not allowed
137 */
138 if (WARN_ON(prime_attach->dir != DMA_NONE))
139 return ERR_PTR(-EBUSY);
140
96 mutex_lock(&obj->dev->struct_mutex); 141 mutex_lock(&obj->dev->struct_mutex);
97 142
98 sgt = obj->dev->driver->gem_prime_get_sg_table(obj); 143 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
@@ -102,6 +147,9 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
102 sg_free_table(sgt); 147 sg_free_table(sgt);
103 kfree(sgt); 148 kfree(sgt);
104 sgt = ERR_PTR(-ENOMEM); 149 sgt = ERR_PTR(-ENOMEM);
150 } else {
151 prime_attach->sgt = sgt;
152 prime_attach->dir = dir;
105 } 153 }
106 } 154 }
107 155
@@ -112,9 +160,7 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
112static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, 160static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
113 struct sg_table *sgt, enum dma_data_direction dir) 161 struct sg_table *sgt, enum dma_data_direction dir)
114{ 162{
115 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 163 /* nothing to be done here */
116 sg_free_table(sgt);
117 kfree(sgt);
118} 164}
119 165
120static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) 166static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)