aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-03-26 09:36:56 -0400
committerDave Airlie <airlied@redhat.com>2012-05-31 09:13:59 -0400
commite8aa1d1ebcbcf98fbb20cad83098f25c7d52753f (patch)
treea4b4cb36871ac3174407c4baafef93c7fdf23f5e /drivers/gpu/drm
parent9a70cc2a7882dfc0d44a623b4a84f279714a6372 (diff)
udl: support vmapping imported dma-bufs
This allows udl to get a vmapping of an imported buffer for scanout. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c13
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c25
2 files changed, 33 insertions, 5 deletions
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index a029ee39b0c5..ce9a61179925 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
156 if (!fb->active_16) 156 if (!fb->active_16)
157 return 0; 157 return 0;
158 158
159 if (!fb->obj->vmapping) 159 if (!fb->obj->vmapping) {
160 udl_gem_vmap(fb->obj); 160 ret = udl_gem_vmap(fb->obj);
161 if (ret == -ENOMEM) {
162 DRM_ERROR("failed to vmap fb\n");
163 return 0;
164 }
165 if (!fb->obj->vmapping) {
166 DRM_ERROR("failed to vmapping\n");
167 return 0;
168 }
169 }
161 170
162 start_cycles = get_cycles(); 171 start_cycles = get_cycles();
163 172
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 97acc9c6c95b..7bd65bdd15a8 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -180,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
180 int page_count = obj->base.size / PAGE_SIZE; 180 int page_count = obj->base.size / PAGE_SIZE;
181 int ret; 181 int ret;
182 182
183 if (obj->base.import_attach) {
184 ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
185 0, obj->base.size, DMA_BIDIRECTIONAL);
186 if (ret)
187 return -EINVAL;
188
189 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
190 if (!obj->vmapping)
191 return -ENOMEM;
192 return 0;
193 }
194
183 ret = udl_gem_get_pages(obj, GFP_KERNEL); 195 ret = udl_gem_get_pages(obj, GFP_KERNEL);
184 if (ret) 196 if (ret)
185 return ret; 197 return ret;
@@ -192,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
192 204
193void udl_gem_vunmap(struct udl_gem_object *obj) 205void udl_gem_vunmap(struct udl_gem_object *obj)
194{ 206{
207 if (obj->base.import_attach) {
208 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
209 dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
210 obj->base.size, DMA_BIDIRECTIONAL);
211 return;
212 }
213
195 if (obj->vmapping) 214 if (obj->vmapping)
196 vunmap(obj->vmapping); 215 vunmap(obj->vmapping);
197 216
@@ -202,12 +221,12 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
202{ 221{
203 struct udl_gem_object *obj = to_udl_bo(gem_obj); 222 struct udl_gem_object *obj = to_udl_bo(gem_obj);
204 223
205 if (gem_obj->import_attach)
206 drm_prime_gem_destroy(gem_obj, obj->sg);
207
208 if (obj->vmapping) 224 if (obj->vmapping)
209 udl_gem_vunmap(obj); 225 udl_gem_vunmap(obj);
210 226
227 if (gem_obj->import_attach)
228 drm_prime_gem_destroy(gem_obj, obj->sg);
229
211 if (obj->pages) 230 if (obj->pages)
212 udl_gem_put_pages(obj); 231 udl_gem_put_pages(obj);
213 232