diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/exynos/Kconfig | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_g2d.c | 89 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_gem.c | 97 |
3 files changed, 30 insertions, 157 deletions
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index df0b61a60501..bd1a4156f647 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig | |||
@@ -77,6 +77,7 @@ config DRM_EXYNOS_VIDI | |||
77 | config DRM_EXYNOS_G2D | 77 | config DRM_EXYNOS_G2D |
78 | bool "Exynos DRM G2D" | 78 | bool "Exynos DRM G2D" |
79 | depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D | 79 | depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D |
80 | select FRAME_VECTOR | ||
80 | help | 81 | help |
81 | Choose this option if you want to use Exynos G2D for DRM. | 82 | Choose this option if you want to use Exynos G2D for DRM. |
82 | 83 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 535b4ad6c4b1..3734c34aed16 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -194,10 +194,8 @@ struct g2d_cmdlist_userptr { | |||
194 | dma_addr_t dma_addr; | 194 | dma_addr_t dma_addr; |
195 | unsigned long userptr; | 195 | unsigned long userptr; |
196 | unsigned long size; | 196 | unsigned long size; |
197 | struct page **pages; | 197 | struct frame_vector *vec; |
198 | unsigned int npages; | ||
199 | struct sg_table *sgt; | 198 | struct sg_table *sgt; |
200 | struct vm_area_struct *vma; | ||
201 | atomic_t refcount; | 199 | atomic_t refcount; |
202 | bool in_pool; | 200 | bool in_pool; |
203 | bool out_of_list; | 201 | bool out_of_list; |
@@ -367,6 +365,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev, | |||
367 | { | 365 | { |
368 | struct g2d_cmdlist_userptr *g2d_userptr = | 366 | struct g2d_cmdlist_userptr *g2d_userptr = |
369 | (struct g2d_cmdlist_userptr *)obj; | 367 | (struct g2d_cmdlist_userptr *)obj; |
368 | struct page **pages; | ||
370 | 369 | ||
371 | if (!obj) | 370 | if (!obj) |
372 | return; | 371 | return; |
@@ -386,19 +385,21 @@ out: | |||
386 | exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, | 385 | exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt, |
387 | DMA_BIDIRECTIONAL); | 386 | DMA_BIDIRECTIONAL); |
388 | 387 | ||
389 | exynos_gem_put_pages_to_userptr(g2d_userptr->pages, | 388 | pages = frame_vector_pages(g2d_userptr->vec); |
390 | g2d_userptr->npages, | 389 | if (!IS_ERR(pages)) { |
391 | g2d_userptr->vma); | 390 | int i; |
392 | 391 | ||
393 | exynos_gem_put_vma(g2d_userptr->vma); | 392 | for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++) |
393 | set_page_dirty_lock(pages[i]); | ||
394 | } | ||
395 | put_vaddr_frames(g2d_userptr->vec); | ||
396 | frame_vector_destroy(g2d_userptr->vec); | ||
394 | 397 | ||
395 | if (!g2d_userptr->out_of_list) | 398 | if (!g2d_userptr->out_of_list) |
396 | list_del_init(&g2d_userptr->list); | 399 | list_del_init(&g2d_userptr->list); |
397 | 400 | ||
398 | sg_free_table(g2d_userptr->sgt); | 401 | sg_free_table(g2d_userptr->sgt); |
399 | kfree(g2d_userptr->sgt); | 402 | kfree(g2d_userptr->sgt); |
400 | |||
401 | drm_free_large(g2d_userptr->pages); | ||
402 | kfree(g2d_userptr); | 403 | kfree(g2d_userptr); |
403 | } | 404 | } |
404 | 405 | ||
@@ -412,9 +413,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, | |||
412 | struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; | 413 | struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; |
413 | struct g2d_cmdlist_userptr *g2d_userptr; | 414 | struct g2d_cmdlist_userptr *g2d_userptr; |
414 | struct g2d_data *g2d; | 415 | struct g2d_data *g2d; |
415 | struct page **pages; | ||
416 | struct sg_table *sgt; | 416 | struct sg_table *sgt; |
417 | struct vm_area_struct *vma; | ||
418 | unsigned long start, end; | 417 | unsigned long start, end; |
419 | unsigned int npages, offset; | 418 | unsigned int npages, offset; |
420 | int ret; | 419 | int ret; |
@@ -460,65 +459,40 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev, | |||
460 | return ERR_PTR(-ENOMEM); | 459 | return ERR_PTR(-ENOMEM); |
461 | 460 | ||
462 | atomic_set(&g2d_userptr->refcount, 1); | 461 | atomic_set(&g2d_userptr->refcount, 1); |
462 | g2d_userptr->size = size; | ||
463 | 463 | ||
464 | start = userptr & PAGE_MASK; | 464 | start = userptr & PAGE_MASK; |
465 | offset = userptr & ~PAGE_MASK; | 465 | offset = userptr & ~PAGE_MASK; |
466 | end = PAGE_ALIGN(userptr + size); | 466 | end = PAGE_ALIGN(userptr + size); |
467 | npages = (end - start) >> PAGE_SHIFT; | 467 | npages = (end - start) >> PAGE_SHIFT; |
468 | g2d_userptr->npages = npages; | 468 | g2d_userptr->vec = frame_vector_create(npages); |
469 | 469 | if (!g2d_userptr->vec) { | |
470 | pages = drm_calloc_large(npages, sizeof(struct page *)); | ||
471 | if (!pages) { | ||
472 | DRM_ERROR("failed to allocate pages.\n"); | ||
473 | ret = -ENOMEM; | 470 | ret = -ENOMEM; |
474 | goto err_free; | 471 | goto err_free; |
475 | } | 472 | } |
476 | 473 | ||
477 | down_read(¤t->mm->mmap_sem); | 474 | ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec); |
478 | vma = find_vma(current->mm, userptr); | 475 | if (ret != npages) { |
479 | if (!vma) { | 476 | DRM_ERROR("failed to get user pages from userptr.\n"); |
480 | up_read(¤t->mm->mmap_sem); | 477 | if (ret < 0) |
481 | DRM_ERROR("failed to get vm region.\n"); | 478 | goto err_destroy_framevec; |
482 | ret = -EFAULT; | 479 | ret = -EFAULT; |
483 | goto err_free_pages; | 480 | goto err_put_framevec; |
484 | } | 481 | } |
485 | 482 | if (frame_vector_to_pages(g2d_userptr->vec) < 0) { | |
486 | if (vma->vm_end < userptr + size) { | ||
487 | up_read(¤t->mm->mmap_sem); | ||
488 | DRM_ERROR("vma is too small.\n"); | ||
489 | ret = -EFAULT; | 483 | ret = -EFAULT; |
490 | goto err_free_pages; | 484 | goto err_put_framevec; |
491 | } | ||
492 | |||
493 | g2d_userptr->vma = exynos_gem_get_vma(vma); | ||
494 | if (!g2d_userptr->vma) { | ||
495 | up_read(¤t->mm->mmap_sem); | ||
496 | DRM_ERROR("failed to copy vma.\n"); | ||
497 | ret = -ENOMEM; | ||
498 | goto err_free_pages; | ||
499 | } | ||
500 | |||
501 | g2d_userptr->size = size; | ||
502 | |||
503 | ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK, | ||
504 | npages, pages, vma); | ||
505 | if (ret < 0) { | ||
506 | up_read(¤t->mm->mmap_sem); | ||
507 | DRM_ERROR("failed to get user pages from userptr.\n"); | ||
508 | goto err_put_vma; | ||
509 | } | 485 | } |
510 | 486 | ||
511 | up_read(¤t->mm->mmap_sem); | ||
512 | g2d_userptr->pages = pages; | ||
513 | |||
514 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | 487 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); |
515 | if (!sgt) { | 488 | if (!sgt) { |
516 | ret = -ENOMEM; | 489 | ret = -ENOMEM; |
517 | goto err_free_userptr; | 490 | goto err_put_framevec; |
518 | } | 491 | } |
519 | 492 | ||
520 | ret = sg_alloc_table_from_pages(sgt, pages, npages, offset, | 493 | ret = sg_alloc_table_from_pages(sgt, |
521 | size, GFP_KERNEL); | 494 | frame_vector_pages(g2d_userptr->vec), |
495 | npages, offset, size, GFP_KERNEL); | ||
522 | if (ret < 0) { | 496 | if (ret < 0) { |
523 | DRM_ERROR("failed to get sgt from pages.\n"); | 497 | DRM_ERROR("failed to get sgt from pages.\n"); |
524 | goto err_free_sgt; | 498 | goto err_free_sgt; |
@@ -553,16 +527,11 @@ err_sg_free_table: | |||
553 | err_free_sgt: | 527 | err_free_sgt: |
554 | kfree(sgt); | 528 | kfree(sgt); |
555 | 529 | ||
556 | err_free_userptr: | 530 | err_put_framevec: |
557 | exynos_gem_put_pages_to_userptr(g2d_userptr->pages, | 531 | put_vaddr_frames(g2d_userptr->vec); |
558 | g2d_userptr->npages, | ||
559 | g2d_userptr->vma); | ||
560 | |||
561 | err_put_vma: | ||
562 | exynos_gem_put_vma(g2d_userptr->vma); | ||
563 | 532 | ||
564 | err_free_pages: | 533 | err_destroy_framevec: |
565 | drm_free_large(pages); | 534 | frame_vector_destroy(g2d_userptr->vec); |
566 | 535 | ||
567 | err_free: | 536 | err_free: |
568 | kfree(g2d_userptr); | 537 | kfree(g2d_userptr); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 62b9ea1b07fb..f12fbc36b120 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -366,103 +366,6 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, | |||
366 | return 0; | 366 | return 0; |
367 | } | 367 | } |
368 | 368 | ||
369 | struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma) | ||
370 | { | ||
371 | struct vm_area_struct *vma_copy; | ||
372 | |||
373 | vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL); | ||
374 | if (!vma_copy) | ||
375 | return NULL; | ||
376 | |||
377 | if (vma->vm_ops && vma->vm_ops->open) | ||
378 | vma->vm_ops->open(vma); | ||
379 | |||
380 | if (vma->vm_file) | ||
381 | get_file(vma->vm_file); | ||
382 | |||
383 | memcpy(vma_copy, vma, sizeof(*vma)); | ||
384 | |||
385 | vma_copy->vm_mm = NULL; | ||
386 | vma_copy->vm_next = NULL; | ||
387 | vma_copy->vm_prev = NULL; | ||
388 | |||
389 | return vma_copy; | ||
390 | } | ||
391 | |||
392 | void exynos_gem_put_vma(struct vm_area_struct *vma) | ||
393 | { | ||
394 | if (!vma) | ||
395 | return; | ||
396 | |||
397 | if (vma->vm_ops && vma->vm_ops->close) | ||
398 | vma->vm_ops->close(vma); | ||
399 | |||
400 | if (vma->vm_file) | ||
401 | fput(vma->vm_file); | ||
402 | |||
403 | kfree(vma); | ||
404 | } | ||
405 | |||
406 | int exynos_gem_get_pages_from_userptr(unsigned long start, | ||
407 | unsigned int npages, | ||
408 | struct page **pages, | ||
409 | struct vm_area_struct *vma) | ||
410 | { | ||
411 | int get_npages; | ||
412 | |||
413 | /* the memory region mmaped with VM_PFNMAP. */ | ||
414 | if (vma_is_io(vma)) { | ||
415 | unsigned int i; | ||
416 | |||
417 | for (i = 0; i < npages; ++i, start += PAGE_SIZE) { | ||
418 | unsigned long pfn; | ||
419 | int ret = follow_pfn(vma, start, &pfn); | ||
420 | if (ret) | ||
421 | return ret; | ||
422 | |||
423 | pages[i] = pfn_to_page(pfn); | ||
424 | } | ||
425 | |||
426 | if (i != npages) { | ||
427 | DRM_ERROR("failed to get user_pages.\n"); | ||
428 | return -EINVAL; | ||
429 | } | ||
430 | |||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | get_npages = get_user_pages(current, current->mm, start, | ||
435 | npages, 1, 1, pages, NULL); | ||
436 | get_npages = max(get_npages, 0); | ||
437 | if (get_npages != npages) { | ||
438 | DRM_ERROR("failed to get user_pages.\n"); | ||
439 | while (get_npages) | ||
440 | put_page(pages[--get_npages]); | ||
441 | return -EFAULT; | ||
442 | } | ||
443 | |||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | void exynos_gem_put_pages_to_userptr(struct page **pages, | ||
448 | unsigned int npages, | ||
449 | struct vm_area_struct *vma) | ||
450 | { | ||
451 | if (!vma_is_io(vma)) { | ||
452 | unsigned int i; | ||
453 | |||
454 | for (i = 0; i < npages; i++) { | ||
455 | set_page_dirty_lock(pages[i]); | ||
456 | |||
457 | /* | ||
458 | * undo the reference we took when populating | ||
459 | * the table. | ||
460 | */ | ||
461 | put_page(pages[i]); | ||
462 | } | ||
463 | } | ||
464 | } | ||
465 | |||
466 | int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, | 369 | int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev, |
467 | struct sg_table *sgt, | 370 | struct sg_table *sgt, |
468 | enum dma_data_direction dir) | 371 | enum dma_data_direction dir) |