aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorInki Dae <inki.dae@samsung.com>2014-09-18 01:19:01 -0400
committerInki Dae <daeinki@gmail.com>2014-09-19 12:00:13 -0400
commit832316c704fe3d15ae6ca9a552ae80411d1bbbcd (patch)
treeec320b5b3efa49532e31ee9e5d1b77a190da2799 /drivers/gpu
parentd931589c01a20595d67192f075f9c84093c43c45 (diff)
drm/exynos: use drm generic mmap interface
This patch removes DRM_EXYNOS_GEM_MMAP ictrl feature specific to Exynos drm and instead uses drm generic mmap. We had used the interface specific to Exynos drm to do mmap directly, not to use demand paging which maps each page with physical memory at page fault handler. We don't need the specific mmap interface because the drm generic mmap which uses vm offset manager stuff can also do mmap directly. This patch makes a userspace region to be mapped with whole physical memory region allocated by userspace request when mmap system call is requested. Changelog v2: - do not set VM_IO, VM_DONTEXPEND and VM_DONTDUMP. These flags were already set by drm_gem_mmap - do not include <linux/anon_inodes.h>, which isn't needed anymore. Signed-off-by: Inki Dae <inki.dae@samsung.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h10
4 files changed, 16 insertions, 110 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index d69bd9723805..513ba940bae0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -15,7 +15,6 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17 17
18#include <linux/anon_inodes.h>
19#include <linux/component.h> 18#include <linux/component.h>
20 19
21#include <drm/exynos_drm.h> 20#include <drm/exynos_drm.h>
@@ -166,10 +165,6 @@ static int exynos_drm_unload(struct drm_device *dev)
166 return 0; 165 return 0;
167} 166}
168 167
169static const struct file_operations exynos_drm_gem_fops = {
170 .mmap = exynos_drm_gem_mmap_buffer,
171};
172
173static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state) 168static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
174{ 169{
175 struct drm_connector *connector; 170 struct drm_connector *connector;
@@ -208,7 +203,6 @@ static int exynos_drm_resume(struct drm_device *dev)
208static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) 203static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
209{ 204{
210 struct drm_exynos_file_private *file_priv; 205 struct drm_exynos_file_private *file_priv;
211 struct file *anon_filp;
212 int ret; 206 int ret;
213 207
214 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 208 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
@@ -221,21 +215,8 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
221 if (ret) 215 if (ret)
222 goto err_file_priv_free; 216 goto err_file_priv_free;
223 217
224 anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
225 NULL, 0);
226 if (IS_ERR(anon_filp)) {
227 ret = PTR_ERR(anon_filp);
228 goto err_subdrv_close;
229 }
230
231 anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
232 file_priv->anon_filp = anon_filp;
233
234 return ret; 218 return ret;
235 219
236err_subdrv_close:
237 exynos_drm_subdrv_close(dev, file);
238
239err_file_priv_free: 220err_file_priv_free:
240 kfree(file_priv); 221 kfree(file_priv);
241 file->driver_priv = NULL; 222 file->driver_priv = NULL;
@@ -251,7 +232,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
251static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) 232static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
252{ 233{
253 struct exynos_drm_private *private = dev->dev_private; 234 struct exynos_drm_private *private = dev->dev_private;
254 struct drm_exynos_file_private *file_priv;
255 struct drm_pending_vblank_event *v, *vt; 235 struct drm_pending_vblank_event *v, *vt;
256 struct drm_pending_event *e, *et; 236 struct drm_pending_event *e, *et;
257 unsigned long flags; 237 unsigned long flags;
@@ -277,10 +257,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
277 } 257 }
278 spin_unlock_irqrestore(&dev->event_lock, flags); 258 spin_unlock_irqrestore(&dev->event_lock, flags);
279 259
280 file_priv = file->driver_priv;
281 if (file_priv->anon_filp)
282 fput(file_priv->anon_filp);
283
284 kfree(file->driver_priv); 260 kfree(file->driver_priv);
285 file->driver_priv = NULL; 261 file->driver_priv = NULL;
286} 262}
@@ -299,8 +275,6 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
299static const struct drm_ioctl_desc exynos_ioctls[] = { 275static const struct drm_ioctl_desc exynos_ioctls[] = {
300 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl, 276 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
301 DRM_UNLOCKED | DRM_AUTH), 277 DRM_UNLOCKED | DRM_AUTH),
302 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
303 exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
304 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET, 278 DRM_IOCTL_DEF_DRV(EXYNOS_GEM_GET,
305 exynos_drm_gem_get_ioctl, DRM_UNLOCKED), 279 exynos_drm_gem_get_ioctl, DRM_UNLOCKED),
306 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION, 280 DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 69a6fa397d75..d22e640f59a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -240,7 +240,6 @@ struct exynos_drm_g2d_private {
240struct drm_exynos_file_private { 240struct drm_exynos_file_private {
241 struct exynos_drm_g2d_private *g2d_priv; 241 struct exynos_drm_g2d_private *g2d_priv;
242 struct device *ipp_dev; 242 struct device *ipp_dev;
243 struct file *anon_filp;
244}; 243};
245 244
246/* 245/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 2f3665de2d60..0d5b9698d384 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -318,23 +318,16 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
318 drm_gem_object_unreference_unlocked(obj); 318 drm_gem_object_unreference_unlocked(obj);
319} 319}
320 320
321int exynos_drm_gem_mmap_buffer(struct file *filp, 321int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
322 struct vm_area_struct *vma) 322 struct vm_area_struct *vma)
323{ 323{
324 struct drm_gem_object *obj = filp->private_data; 324 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
325 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
326 struct drm_device *drm_dev = obj->dev;
327 struct exynos_drm_gem_buf *buffer; 325 struct exynos_drm_gem_buf *buffer;
328 unsigned long vm_size; 326 unsigned long vm_size;
329 int ret; 327 int ret;
330 328
331 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 329 vma->vm_flags &= ~VM_PFNMAP;
332 330 vma->vm_pgoff = 0;
333 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
334 vma->vm_private_data = obj;
335 vma->vm_ops = drm_dev->driver->gem_vm_ops;
336
337 update_vm_cache_attr(exynos_gem_obj, vma);
338 331
339 vm_size = vma->vm_end - vma->vm_start; 332 vm_size = vma->vm_end - vma->vm_start;
340 333
@@ -356,60 +349,6 @@ int exynos_drm_gem_mmap_buffer(struct file *filp,
356 return ret; 349 return ret;
357 } 350 }
358 351
359 /*
360 * take a reference to this mapping of the object. And this reference
361 * is unreferenced by the corresponding vm_close call.
362 */
363 drm_gem_object_reference(obj);
364
365 drm_vm_open_locked(drm_dev, vma);
366
367 return 0;
368}
369
370int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
371 struct drm_file *file_priv)
372{
373 struct drm_exynos_file_private *exynos_file_priv;
374 struct drm_exynos_gem_mmap *args = data;
375 struct drm_gem_object *obj;
376 struct file *anon_filp;
377 unsigned long addr;
378
379 if (!(dev->driver->driver_features & DRIVER_GEM)) {
380 DRM_ERROR("does not support GEM.\n");
381 return -ENODEV;
382 }
383
384 mutex_lock(&dev->struct_mutex);
385
386 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
387 if (!obj) {
388 DRM_ERROR("failed to lookup gem object.\n");
389 mutex_unlock(&dev->struct_mutex);
390 return -EINVAL;
391 }
392
393 exynos_file_priv = file_priv->driver_priv;
394 anon_filp = exynos_file_priv->anon_filp;
395 anon_filp->private_data = obj;
396
397 addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
398 MAP_SHARED, 0);
399
400 drm_gem_object_unreference(obj);
401
402 if (IS_ERR_VALUE(addr)) {
403 mutex_unlock(&dev->struct_mutex);
404 return (int)addr;
405 }
406
407 mutex_unlock(&dev->struct_mutex);
408
409 args->mapped = addr;
410
411 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
412
413 return 0; 352 return 0;
414} 353}
415 354
@@ -693,16 +632,20 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
693 exynos_gem_obj = to_exynos_gem_obj(obj); 632 exynos_gem_obj = to_exynos_gem_obj(obj);
694 633
695 ret = check_gem_flags(exynos_gem_obj->flags); 634 ret = check_gem_flags(exynos_gem_obj->flags);
696 if (ret) { 635 if (ret)
697 drm_gem_vm_close(vma); 636 goto err_close_vm;
698 drm_gem_free_mmap_offset(obj);
699 return ret;
700 }
701
702 vma->vm_flags &= ~VM_PFNMAP;
703 vma->vm_flags |= VM_MIXEDMAP;
704 637
705 update_vm_cache_attr(exynos_gem_obj, vma); 638 update_vm_cache_attr(exynos_gem_obj, vma);
706 639
640 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
641 if (ret)
642 goto err_close_vm;
643
644 return ret;
645
646err_close_vm:
647 drm_gem_vm_close(vma);
648 drm_gem_free_mmap_offset(obj);
649
707 return ret; 650 return ret;
708} 651}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 8e460940d118..09d021bbccf5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -111,16 +111,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
111 unsigned int gem_handle, 111 unsigned int gem_handle,
112 struct drm_file *filp); 112 struct drm_file *filp);
113 113
114/*
115 * mmap the physically continuous memory that a gem object contains
116 * to user space.
117 */
118int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
119 struct drm_file *file_priv);
120
121int exynos_drm_gem_mmap_buffer(struct file *filp,
122 struct vm_area_struct *vma);
123
124/* map user space allocated by malloc to pages. */ 114/* map user space allocated by malloc to pages. */
125int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data, 115int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
126 struct drm_file *file_priv); 116 struct drm_file *file_priv);