diff options
author | Jan Kara <jack@suse.cz> | 2015-07-20 04:03:35 -0400 |
---|---|---|
committer | Mauro Carvalho Chehab <mchehab@osg.samsung.com> | 2015-08-16 12:15:58 -0400 |
commit | 63540f01917c0d8b03b9813a0d6539469b163139 (patch) | |
tree | 40db746f15285aa194bdbf832d65d1f00468bac5 | |
parent | 6690c8c78c745239bb1f22b23f3889a0a14c249b (diff) |
[media] drm/exynos: Convert g2d_userptr_get_dma_addr() to use get_vaddr_frames()
Convert g2d_userptr_get_dma_addr() to pin pages using get_vaddr_frames().
This removes the knowledge about vmas and mmap_sem locking from exynos
driver. Also it fixes a problem that the function has been mapping user
provided address without holding mmap_sem.
Acked-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
-rw-r--r-- | drivers/gpu/drm/exynos/Kconfig | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_g2d.c | 89 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_gem.c | 97 |
3 files changed, 30 insertions, 157 deletions
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 43003c4ad80b..b364562dc6c1 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI | |||
77 | config DRM_EXYNOS_G2D | 77 | config DRM_EXYNOS_G2D |
78 | bool "Exynos DRM G2D" | 78 | bool "Exynos DRM G2D" |
79 | depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D | 79 | depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D |
80 | select FRAME_VECTOR | ||
80 | help | 81 | help |
81 | Choose this option if you want to use Exynos G2D for DRM. | 82 | Choose this option if you want to use Exynos G2D for DRM. |
82 | 83 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 81a250830808..7584834a53c9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -190,10 +190,8 @@ struct g2d_cmdlist_userptr { | |||
190 | dma_addr_t dma_addr; | 190 | dma_addr_t dma_addr; |
191 | unsigned long userptr; | 191 | unsigned long userptr; |
192 | unsigned long size; | 192 | unsigned long size; |
193 | struct page **pages; | 193 | struct frame_vector *vec; |
194 | unsigned int npages; | ||
195 | struct sg_table *sgt; | 194 | struct sg_table *sgt; |
196 | struct vm_area_struct *vma; | ||
197 | atomic_t refcount; | 195 | atomic_t refcount; |
198 | bool in_pool; | 196 | bool in_pool; |
199 | bool out_of_list; | 197 | bool out_of_list; |
@@ -363,6 +361,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, | |||
363 | { | 361 | { |
364 | struct g2d_cmdlist_userptr *g2d_userptr = | 362 | struct g2d_cmdlist_userptr *g2d_userptr = |
365 | (struct g2d_cmdlist_userptr *)obj; | 363 | (struct g2d_cmdlist_userptr *)obj; |
364 | struct page **pages; | ||
366 | 365 | ||
367 | if (!obj) | 366 | if (!obj) |
368 | return; | 367 | return; |
@@ -382,19 +381,21 @@ out: | |||
382 | exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, | 381 | exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, |
383 | DMA_BIDIRECTIONAL); | 382 | DMA_BIDIRECTIONAL); |
384 | 383 | ||
385 | exynos_gem_put_pages_to_userptr(g2d_userptr->pages, | 384 | pages = frame_vector_pages(g2d_userptr->vec); |
386 | g2d_userptr->npages, | 385 | if (!IS_ERR(pages)) { |
387 | g2d_userptr->vma); | 386 | int i; |
388 | 387 | ||
389 | exynos_gem_put_vma(g2d_userptr->vma); | 388 | for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++) |
389 | set_page_dirty_lock(pages[i]); | ||
390 | } | ||
391 | put_vaddr_frames(g2d_userptr->vec); | ||
392 | frame_vector_destroy(g2d_userptr->vec); | ||
390 | 393 | ||
391 | if (!g2d_userptr->out_of_list) | 394 | if (!g2d_userptr->out_of_list) |
392 | list_del_init(&g2d_userptr->list); | 395 | list_del_init(&g2d_userptr->list); |
393 | 396 | ||
394 | sg_free_table(g2d_userptr->sgt); | 397 | sg_free_table(g2d_userptr->sgt); |
395 | kfree(g2d_userptr->sgt); | 398 | kfree(g2d_userptr->sgt); |
396 | |||
397 | drm_free_large(g2d_userptr->pages); | ||
398 | kfree(g2d_userptr); | 399 | kfree(g2d_userptr); |
399 | } | 400 | } |
400 | 401 | ||
@@ -408,9 +409,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, | |||
408 | struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; | 409 | struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; |
409 | struct g2d_cmdlist_userptr *g2d_userptr; | 410 | struct g2d_cmdlist_userptr *g2d_userptr; |
410 | struct g2d_data *g2d; | 411 | struct g2d_data *g2d; |
411 | struct page **pages; | ||
412 | struct sg_table *sgt; | 412 | struct sg_table *sgt; |
413 | struct vm_area_struct *vma; | ||
414 | unsigned long start, end; | 413 | unsigned long start, end; |
415 | unsigned int npages, offset; | 414 | unsigned int npages, offset; |
416 | int ret; | 415 | int ret; |
@@ -456,65 +455,40 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, | |||
456 | return ERR_PTR(-ENOMEM); | 455 | return ERR_PTR(-ENOMEM); |
457 | 456 | ||
458 | atomic_set(&g2d_userptr->refcount, 1); | 457 | atomic_set(&g2d_userptr->refcount, 1); |
458 | g2d_userptr->size = size; | ||
459 | 459 | ||
460 | start = userptr & PAGE_MASK; | 460 | start = userptr & PAGE_MASK; |
461 | offset = userptr & ~PAGE_MASK; | 461 | offset = userptr & ~PAGE_MASK; |
462 | end = PAGE_ALIGN(userptr + size); | 462 | end = PAGE_ALIGN(userptr + size); |
463 | npages = (end - start) >> PAGE_SHIFT; | 463 | npages = (end - start) >> PAGE_SHIFT; |
464 | g2d_userptr->npages = npages; | 464 | g2d_userptr->vec = frame_vector_create(npages); |
465 | 465 | if (!g2d_userptr->vec) { | |
466 | pages = drm_calloc_large(npages, sizeof(struct page *)); | ||
467 | if (!pages) { | ||
468 | DRM_ERROR("failed to allocate pages.\n"); | ||
469 | ret = -ENOMEM; | 466 | ret = -ENOMEM; |
470 | goto err_free; | 467 | goto err_free; |
471 | } | 468 | } |
472 | 469 | ||
473 | down_read(¤t->mm->mmap_sem); | 470 | ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec); |
474 | vma = find_vma(current->mm, userptr); | 471 | if (ret != npages) { |
475 | if (!vma) { | 472 | DRM_ERROR("failed to get user pages from userptr.\n"); |
476 | up_read(¤t->mm->mmap_sem); | 473 | if (ret < 0) |
477 | DRM_ERROR("failed to get vm region.\n"); | 474 | goto err_destroy_framevec; |
478 | ret = -EFAULT; | 475 | ret = -EFAULT; |
479 | goto err_free_pages; | 476 | goto err_put_framevec; |
480 | } | 477 | } |
481 | 478 | if (frame_vector_to_pages(g2d_userptr->vec) < 0) { | |
482 | if (vma->vm_end < userptr + size) { | ||
483 | up_read(¤t->mm->mmap_sem); | ||
484 | DRM_ERROR("vma is too small.\n"); | ||
485 | ret = -EFAULT; | 479 | ret = -EFAULT; |
486 | goto err_free_pages; | 480 | goto err_put_framevec; |
487 | } | ||
488 | |||
489 | g2d_userptr->vma = exynos_gem_get_vma(vma); | ||
490 | if (!g2d_userptr->vma) { | ||
491 | up_read(¤t->mm->mmap_sem); | ||
492 | DRM_ERROR("failed to copy vma.\n"); | ||
493 | ret = -ENOMEM; | ||
494 | goto err_free_pages; | ||
495 | } | ||
496 | |||
497 | g2d_userptr->size = size; | ||
498 | |||
499 | ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, | ||
500 | npages, pages, vma); | ||
501 | if (ret < 0) { | ||
502 | up_read(¤t->mm->mmap_sem); | ||
503 | DRM_ERROR("failed to get user pages from userptr.\n"); | ||
504 | goto err_put_vma; | ||
505 | } | 481 | } |
506 | 482 | ||
507 | up_read(¤t->mm->mmap_sem); | ||
508 | g2d_userptr->pages = pages; | ||
509 | |||
510 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | 483 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); |
511 | if (!sgt) { | 484 | if (!sgt) { |
512 | ret = -ENOMEM; | 485 | ret = -ENOMEM; |
513 | goto err_free_userptr; | 486 | goto err_put_framevec; |
514 | } | 487 | } |
515 | 488 | ||
516 | ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, | 489 | ret = sg_alloc_table_from_pages(sgt, |
517 | size, GFP_KERNEL); | 490 | frame_vector_pages(g2d_userptr->vec), |
491 | npages, offset, size, GFP_KERNEL); | ||
518 | if (ret < 0) { | 492 | if (ret < 0) { |
519 | DRM_ERROR("failed to get sgt from pages.\n"); | 493 | DRM_ERROR("failed to get sgt from pages.\n"); |
520 | goto err_free_sgt; | 494 | goto err_free_sgt; |
@@ -549,16 +523,11 @@ err_sg_free_table: | |||
549 | err_free_sgt: | 523 | err_free_sgt: |
550 | kfree(sgt); | 524 | kfree(sgt); |
551 | 525 | ||
552 | err_free_userptr: | 526 | err_put_framevec: |
553 | exynos_gem_put_pages_to_userptr(g2d_userptr->pages, | 527 | put_vaddr_frames(g2d_userptr->vec); |
554 | g2d_userptr->npages, | ||
555 | g2d_userptr->vma); | ||
556 | |||
557 | err_put_vma: | ||
558 | exynos_gem_put_vma(g2d_userptr->vma); | ||
559 | 528 | ||
560 | err_free_pages: | 529 | err_destroy_framevec: |
561 | drm_free_large(pages); | 530 | frame_vector_destroy(g2d_userptr->vec); |
562 | 531 | ||
563 | err_free: | 532 | err_free: |
564 | kfree(g2d_userptr); | 533 | kfree(g2d_userptr); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 0d5b9698d384..47068ae44ced 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -378,103 +378,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, | |||
378 | return 0; | 378 | return 0; |
379 | } | 379 | } |
380 | 380 | ||
381 | struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) | ||
382 | { | ||
383 | struct vm_area_struct *vma_copy; | ||
384 | |||
385 | vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); | ||
386 | if (!vma_copy) | ||
387 | return NULL; | ||
388 | |||
389 | if (vma->vm_ops && vma->vm_ops->open) | ||
390 | vma->vm_ops->open(vma); | ||
391 | |||
392 | if (vma->vm_file) | ||
393 | get_file(vma->vm_file); | ||
394 | |||
395 | memcpy(vma_copy, vma, sizeof(*vma)); | ||
396 | |||
397 | vma_copy->vm_mm = NULL; | ||
398 | vma_copy->vm_next = NULL; | ||
399 | vma_copy->vm_prev = NULL; | ||
400 | |||
401 | return vma_copy; | ||
402 | } | ||
403 | |||
404 | void exynos_gem_put_vma(struct vm_area_struct *vma) | ||
405 | { | ||
406 | if (!vma) | ||
407 | return; | ||
408 | |||
409 | if (vma->vm_ops && vma->vm_ops->close) | ||
410 | vma->vm_ops->close(vma); | ||
411 | |||
412 | if (vma->vm_file) | ||
413 | fput(vma->vm_file); | ||
414 | |||
415 | kfree(vma); | ||
416 | } | ||
417 | |||
418 | int exynos_gem_get_pages_from_userptr(unsigned long start, | ||
419 | unsigned int npages, | ||
420 | struct page **pages, | ||
421 | struct vm_area_struct *vma) | ||
422 | { | ||
423 | int get_npages; | ||
424 | |||
425 | /* the memory region mmaped with VM_PFNMAP. */ | ||
426 | if (vma_is_io(vma)) { | ||
427 | unsigned int i; | ||
428 | |||
429 | for (i = 0; i < npages; ++i, start += PAGE_SIZE) { | ||
430 | unsigned long pfn; | ||
431 | int ret = follow_pfn(vma, start, &pfn); | ||
432 | if (ret) | ||
433 | return ret; | ||
434 | |||
435 | pages[i] = pfn_to_page(pfn); | ||
436 | } | ||
437 | |||
438 | if (i != npages) { | ||
439 | DRM_ERROR("failed to get user_pages.\n"); | ||
440 | return -EINVAL; | ||
441 | } | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | get_npages = get_user_pages(current, current->mm, start, | ||
447 | npages, 1, 1, pages, NULL); | ||
448 | get_npages = max(get_npages, 0); | ||
449 | if (get_npages != npages) { | ||
450 | DRM_ERROR("failed to get user_pages.\n"); | ||
451 | while (get_npages) | ||
452 | put_page(pages[--get_npages]); | ||
453 | return -EFAULT; | ||
454 | } | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | void exynos_gem_put_pages_to_userptr(struct page **pages, | ||
460 | unsigned int npages, | ||
461 | struct vm_area_struct *vma) | ||
462 | { | ||
463 | if (!vma_is_io(vma)) { | ||
464 | unsigned int i; | ||
465 | |||
466 | for (i = 0; i < npages; i++) { | ||
467 | set_page_dirty_lock(pages[i]); | ||
468 | |||
469 | /* | ||
470 | * undo the reference we took when populating | ||
471 | * the table. | ||
472 | */ | ||
473 | put_page(pages[i]); | ||
474 | } | ||
475 | } | ||
476 | } | ||
477 | |||
478 | int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, | 381 | int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, |
479 | struct sg_table *sgt, | 382 | struct sg_table *sgt, |
480 | enum dma_data_direction dir) | 383 | enum dma_data_direction dir) |