aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c342
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c123
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h45
-rw-r--r--include/uapi/drm/exynos_drm.h13
5 files changed, 499 insertions, 27 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 8c9f4b05fc17..9c9c2dc75828 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -231,8 +231,7 @@ struct exynos_drm_g2d_private {
231 struct device *dev; 231 struct device *dev;
232 struct list_head inuse_cmdlist; 232 struct list_head inuse_cmdlist;
233 struct list_head event_list; 233 struct list_head event_list;
234 struct list_head gem_list; 234 struct list_head userptr_list;
235 unsigned int gem_nr;
236}; 235};
237 236
238struct drm_exynos_file_private { 237struct drm_exynos_file_private {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index bac2399857d1..a9002adc4694 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -97,11 +97,19 @@
97 97
98#define MAX_BUF_ADDR_NR 6 98#define MAX_BUF_ADDR_NR 6
99 99
100/* maximum buffer pool size of userptr is 64MB as default */
101#define MAX_POOL (64 * 1024 * 1024)
102
103enum {
104 BUF_TYPE_GEM = 1,
105 BUF_TYPE_USERPTR,
106};
107
100/* cmdlist data structure */ 108/* cmdlist data structure */
101struct g2d_cmdlist { 109struct g2d_cmdlist {
102 u32 head; 110 u32 head;
103 u32 data[G2D_CMDLIST_DATA_NUM]; 111 unsigned long data[G2D_CMDLIST_DATA_NUM];
104 u32 last; /* last data offset */ 112 u32 last; /* last data offset */
105}; 113};
106 114
107struct drm_exynos_pending_g2d_event { 115struct drm_exynos_pending_g2d_event {
@@ -109,11 +117,26 @@ struct drm_exynos_pending_g2d_event {
109 struct drm_exynos_g2d_event event; 117 struct drm_exynos_g2d_event event;
110}; 118};
111 119
120struct g2d_cmdlist_userptr {
121 struct list_head list;
122 dma_addr_t dma_addr;
123 unsigned long userptr;
124 unsigned long size;
125 struct page **pages;
126 unsigned int npages;
127 struct sg_table *sgt;
128 struct vm_area_struct *vma;
129 atomic_t refcount;
130 bool in_pool;
131 bool out_of_list;
132};
133
112struct g2d_cmdlist_node { 134struct g2d_cmdlist_node {
113 struct list_head list; 135 struct list_head list;
114 struct g2d_cmdlist *cmdlist; 136 struct g2d_cmdlist *cmdlist;
115 unsigned int map_nr; 137 unsigned int map_nr;
116 unsigned int handles[MAX_BUF_ADDR_NR]; 138 unsigned long handles[MAX_BUF_ADDR_NR];
139 unsigned int obj_type[MAX_BUF_ADDR_NR];
117 dma_addr_t dma_addr; 140 dma_addr_t dma_addr;
118 141
119 struct drm_exynos_pending_g2d_event *event; 142 struct drm_exynos_pending_g2d_event *event;
@@ -152,6 +175,9 @@ struct g2d_data {
152 struct list_head runqueue; 175 struct list_head runqueue;
153 struct mutex runqueue_mutex; 176 struct mutex runqueue_mutex;
154 struct kmem_cache *runqueue_slab; 177 struct kmem_cache *runqueue_slab;
178
179 unsigned long current_pool;
180 unsigned long max_pool;
155}; 181};
156 182
157static int g2d_init_cmdlist(struct g2d_data *g2d) 183static int g2d_init_cmdlist(struct g2d_data *g2d)
@@ -256,6 +282,229 @@ add_to_list:
256 list_add_tail(&node->event->base.link, &g2d_priv->event_list); 282 list_add_tail(&node->event->base.link, &g2d_priv->event_list);
257} 283}
258 284
285static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
286 unsigned long obj,
287 bool force)
288{
289 struct g2d_cmdlist_userptr *g2d_userptr =
290 (struct g2d_cmdlist_userptr *)obj;
291
292 if (!obj)
293 return;
294
295 if (force)
296 goto out;
297
298 atomic_dec(&g2d_userptr->refcount);
299
300 if (atomic_read(&g2d_userptr->refcount) > 0)
301 return;
302
303 if (g2d_userptr->in_pool)
304 return;
305
306out:
307 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
308 DMA_BIDIRECTIONAL);
309
310 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
311 g2d_userptr->npages,
312 g2d_userptr->vma);
313
314 if (!g2d_userptr->out_of_list)
315 list_del_init(&g2d_userptr->list);
316
317 sg_free_table(g2d_userptr->sgt);
318 kfree(g2d_userptr->sgt);
319 g2d_userptr->sgt = NULL;
320
321 kfree(g2d_userptr->pages);
322 kfree(g2d_userptr);
323 g2d_userptr->pages = NULL;
324 g2d_userptr = NULL;
325}
326
327dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
328 unsigned long userptr,
329 unsigned long size,
330 struct drm_file *filp,
331 unsigned long *obj)
332{
333 struct drm_exynos_file_private *file_priv = filp->driver_priv;
334 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
335 struct g2d_cmdlist_userptr *g2d_userptr;
336 struct g2d_data *g2d;
337 struct page **pages;
338 struct sg_table *sgt;
339 struct vm_area_struct *vma;
340 unsigned long start, end;
341 unsigned int npages, offset;
342 int ret;
343
344 if (!size) {
345 DRM_ERROR("invalid userptr size.\n");
346 return ERR_PTR(-EINVAL);
347 }
348
349 g2d = dev_get_drvdata(g2d_priv->dev);
350
351 /* check if userptr already exists in userptr_list. */
352 list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
353 if (g2d_userptr->userptr == userptr) {
354 /*
355 * also check size because there could be same address
356 * and different size.
357 */
358 if (g2d_userptr->size == size) {
359 atomic_inc(&g2d_userptr->refcount);
360 *obj = (unsigned long)g2d_userptr;
361
362 return &g2d_userptr->dma_addr;
363 }
364
365 /*
366 * at this moment, maybe g2d dma is accessing this
367 * g2d_userptr memory region so just remove this
368 * g2d_userptr object from userptr_list not to be
369 * referred again and also except it the userptr
370 * pool to be released after the dma access completion.
371 */
372 g2d_userptr->out_of_list = true;
373 g2d_userptr->in_pool = false;
374 list_del_init(&g2d_userptr->list);
375
376 break;
377 }
378 }
379
380 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
381 if (!g2d_userptr) {
382 DRM_ERROR("failed to allocate g2d_userptr.\n");
383 return ERR_PTR(-ENOMEM);
384 }
385
386 atomic_set(&g2d_userptr->refcount, 1);
387
388 start = userptr & PAGE_MASK;
389 offset = userptr & ~PAGE_MASK;
390 end = PAGE_ALIGN(userptr + size);
391 npages = (end - start) >> PAGE_SHIFT;
392 g2d_userptr->npages = npages;
393
394 pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
395 if (!pages) {
396 DRM_ERROR("failed to allocate pages.\n");
397 kfree(g2d_userptr);
398 return ERR_PTR(-ENOMEM);
399 }
400
401 vma = find_vma(current->mm, userptr);
402 if (!vma) {
403 DRM_ERROR("failed to get vm region.\n");
404 ret = -EFAULT;
405 goto err_free_pages;
406 }
407
408 if (vma->vm_end < userptr + size) {
409 DRM_ERROR("vma is too small.\n");
410 ret = -EFAULT;
411 goto err_free_pages;
412 }
413
414 g2d_userptr->vma = exynos_gem_get_vma(vma);
415 if (!g2d_userptr->vma) {
416 DRM_ERROR("failed to copy vma.\n");
417 ret = -ENOMEM;
418 goto err_free_pages;
419 }
420
421 g2d_userptr->size = size;
422
423 ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
424 npages, pages, vma);
425 if (ret < 0) {
426 DRM_ERROR("failed to get user pages from userptr.\n");
427 goto err_put_vma;
428 }
429
430 g2d_userptr->pages = pages;
431
432 sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
433 if (!sgt) {
434 DRM_ERROR("failed to allocate sg table.\n");
435 ret = -ENOMEM;
436 goto err_free_userptr;
437 }
438
439 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
440 size, GFP_KERNEL);
441 if (ret < 0) {
442 DRM_ERROR("failed to get sgt from pages.\n");
443 goto err_free_sgt;
444 }
445
446 g2d_userptr->sgt = sgt;
447
448 ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
449 DMA_BIDIRECTIONAL);
450 if (ret < 0) {
451 DRM_ERROR("failed to map sgt with dma region.\n");
452 goto err_free_sgt;
453 }
454
455 g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
456 g2d_userptr->userptr = userptr;
457
458 list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
459
460 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
461 g2d->current_pool += npages << PAGE_SHIFT;
462 g2d_userptr->in_pool = true;
463 }
464
465 *obj = (unsigned long)g2d_userptr;
466
467 return &g2d_userptr->dma_addr;
468
469err_free_sgt:
470 sg_free_table(sgt);
471 kfree(sgt);
472 sgt = NULL;
473
474err_free_userptr:
475 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
476 g2d_userptr->npages,
477 g2d_userptr->vma);
478
479err_put_vma:
480 exynos_gem_put_vma(g2d_userptr->vma);
481
482err_free_pages:
483 kfree(pages);
484 kfree(g2d_userptr);
485 pages = NULL;
486 g2d_userptr = NULL;
487
488 return ERR_PTR(ret);
489}
490
491static void g2d_userptr_free_all(struct drm_device *drm_dev,
492 struct g2d_data *g2d,
493 struct drm_file *filp)
494{
495 struct drm_exynos_file_private *file_priv = filp->driver_priv;
496 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
497 struct g2d_cmdlist_userptr *g2d_userptr, *n;
498
499 list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
500 if (g2d_userptr->in_pool)
501 g2d_userptr_put_dma_addr(drm_dev,
502 (unsigned long)g2d_userptr,
503 true);
504
505 g2d->current_pool = 0;
506}
507
259static int g2d_map_cmdlist_gem(struct g2d_data *g2d, 508static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
260 struct g2d_cmdlist_node *node, 509 struct g2d_cmdlist_node *node,
261 struct drm_device *drm_dev, 510 struct drm_device *drm_dev,
@@ -272,10 +521,31 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
272 offset = cmdlist->last - (i * 2 + 1); 521 offset = cmdlist->last - (i * 2 + 1);
273 handle = cmdlist->data[offset]; 522 handle = cmdlist->data[offset];
274 523
275 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, file); 524 if (node->obj_type[i] == BUF_TYPE_GEM) {
276 if (IS_ERR(addr)) { 525 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
277 node->map_nr = i; 526 file);
278 return -EFAULT; 527 if (IS_ERR(addr)) {
528 node->map_nr = i;
529 return -EFAULT;
530 }
531 } else {
532 struct drm_exynos_g2d_userptr g2d_userptr;
533
534 if (copy_from_user(&g2d_userptr, (void __user *)handle,
535 sizeof(struct drm_exynos_g2d_userptr))) {
536 node->map_nr = i;
537 return -EFAULT;
538 }
539
540 addr = g2d_userptr_get_dma_addr(drm_dev,
541 g2d_userptr.userptr,
542 g2d_userptr.size,
543 file,
544 &handle);
545 if (IS_ERR(addr)) {
546 node->map_nr = i;
547 return -EFAULT;
548 }
279 } 549 }
280 550
281 cmdlist->data[offset] = *addr; 551 cmdlist->data[offset] = *addr;
@@ -293,9 +563,14 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
293 int i; 563 int i;
294 564
295 for (i = 0; i < node->map_nr; i++) { 565 for (i = 0; i < node->map_nr; i++) {
296 unsigned int handle = node->handles[i]; 566 unsigned long handle = node->handles[i];
297 567
298 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, filp); 568 if (node->obj_type[i] == BUF_TYPE_GEM)
569 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
570 filp);
571 else
572 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
573 false);
299 574
300 node->handles[i] = 0; 575 node->handles[i] = 0;
301 } 576 }
@@ -438,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
438 return IRQ_HANDLED; 713 return IRQ_HANDLED;
439} 714}
440 715
441static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist, 716static int g2d_check_reg_offset(struct device *dev,
717 struct g2d_cmdlist_node *node,
442 int nr, bool for_addr) 718 int nr, bool for_addr)
443{ 719{
720 struct g2d_cmdlist *cmdlist = node->cmdlist;
444 int reg_offset; 721 int reg_offset;
445 int index; 722 int index;
446 int i; 723 int i;
447 724
448 for (i = 0; i < nr; i++) { 725 for (i = 0; i < nr; i++) {
449 index = cmdlist->last - 2 * (i + 1); 726 index = cmdlist->last - 2 * (i + 1);
727
728 if (for_addr) {
729 /* check userptr buffer type. */
730 reg_offset = (cmdlist->data[index] &
731 ~0x7fffffff) >> 31;
732 if (reg_offset) {
733 node->obj_type[i] = BUF_TYPE_USERPTR;
734 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
735 }
736 }
737
450 reg_offset = cmdlist->data[index] & ~0xfffff000; 738 reg_offset = cmdlist->data[index] & ~0xfffff000;
451 739
452 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 740 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -463,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
463 case G2D_MSK_BASE_ADDR: 751 case G2D_MSK_BASE_ADDR:
464 if (!for_addr) 752 if (!for_addr)
465 goto err; 753 goto err;
754
755 if (node->obj_type[i] != BUF_TYPE_USERPTR)
756 node->obj_type[i] = BUF_TYPE_GEM;
466 break; 757 break;
467 default: 758 default:
468 if (for_addr) 759 if (for_addr)
@@ -474,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
474 return 0; 765 return 0;
475 766
476err: 767err:
477 dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]); 768 dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
478 return -EINVAL; 769 return -EINVAL;
479} 770}
480 771
@@ -574,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
574 } 865 }
575 866
576 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ 867 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
577 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2; 868 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
578 if (size > G2D_CMDLIST_DATA_NUM) { 869 if (size > G2D_CMDLIST_DATA_NUM) {
579 dev_err(dev, "cmdlist size is too big\n"); 870 dev_err(dev, "cmdlist size is too big\n");
580 ret = -EINVAL; 871 ret = -EINVAL;
@@ -591,25 +882,25 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
591 } 882 }
592 cmdlist->last += req->cmd_nr * 2; 883 cmdlist->last += req->cmd_nr * 2;
593 884
594 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false); 885 ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
595 if (ret < 0) 886 if (ret < 0)
596 goto err_free_event; 887 goto err_free_event;
597 888
598 node->map_nr = req->cmd_gem_nr; 889 node->map_nr = req->cmd_buf_nr;
599 if (req->cmd_gem_nr) { 890 if (req->cmd_buf_nr) {
600 struct drm_exynos_g2d_cmd *cmd_gem; 891 struct drm_exynos_g2d_cmd *cmd_buf;
601 892
602 cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem; 893 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
603 894
604 if (copy_from_user(cmdlist->data + cmdlist->last, 895 if (copy_from_user(cmdlist->data + cmdlist->last,
605 (void __user *)cmd_gem, 896 (void __user *)cmd_buf,
606 sizeof(*cmd_gem) * req->cmd_gem_nr)) { 897 sizeof(*cmd_buf) * req->cmd_buf_nr)) {
607 ret = -EFAULT; 898 ret = -EFAULT;
608 goto err_free_event; 899 goto err_free_event;
609 } 900 }
610 cmdlist->last += req->cmd_gem_nr * 2; 901 cmdlist->last += req->cmd_buf_nr * 2;
611 902
612 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true); 903 ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
613 if (ret < 0) 904 if (ret < 0)
614 goto err_free_event; 905 goto err_free_event;
615 906
@@ -759,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
759 1050
760 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); 1051 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
761 INIT_LIST_HEAD(&g2d_priv->event_list); 1052 INIT_LIST_HEAD(&g2d_priv->event_list);
762 INIT_LIST_HEAD(&g2d_priv->gem_list); 1053 INIT_LIST_HEAD(&g2d_priv->userptr_list);
763 1054
764 return 0; 1055 return 0;
765} 1056}
@@ -793,6 +1084,9 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
793 } 1084 }
794 mutex_unlock(&g2d->cmdlist_mutex); 1085 mutex_unlock(&g2d->cmdlist_mutex);
795 1086
1087 /* release all g2d_userptr in pool. */
1088 g2d_userptr_free_all(drm_dev, g2d, file);
1089
796 kfree(file_priv->g2d_priv); 1090 kfree(file_priv->g2d_priv);
797} 1091}
798 1092
@@ -863,6 +1157,8 @@ static int __devinit g2d_probe(struct platform_device *pdev)
863 goto err_put_clk; 1157 goto err_put_clk;
864 } 1158 }
865 1159
1160 g2d->max_pool = MAX_POOL;
1161
866 platform_set_drvdata(pdev, g2d); 1162 platform_set_drvdata(pdev, g2d);
867 1163
868 subdrv = &g2d->subdrv; 1164 subdrv = &g2d->subdrv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 5fdfb8f51a41..2cc6b3ae4e07 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -448,6 +448,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
448 return 0; 448 return 0;
449} 449}
450 450
451struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
452{
453 struct vm_area_struct *vma_copy;
454
455 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
456 if (!vma_copy)
457 return NULL;
458
459 if (vma->vm_ops && vma->vm_ops->open)
460 vma->vm_ops->open(vma);
461
462 if (vma->vm_file)
463 get_file(vma->vm_file);
464
465 memcpy(vma_copy, vma, sizeof(*vma));
466
467 vma_copy->vm_mm = NULL;
468 vma_copy->vm_next = NULL;
469 vma_copy->vm_prev = NULL;
470
471 return vma_copy;
472}
473
474void exynos_gem_put_vma(struct vm_area_struct *vma)
475{
476 if (!vma)
477 return;
478
479 if (vma->vm_ops && vma->vm_ops->close)
480 vma->vm_ops->close(vma);
481
482 if (vma->vm_file)
483 fput(vma->vm_file);
484
485 kfree(vma);
486}
487
488int exynos_gem_get_pages_from_userptr(unsigned long start,
489 unsigned int npages,
490 struct page **pages,
491 struct vm_area_struct *vma)
492{
493 int get_npages;
494
495 /* the memory region mmaped with VM_PFNMAP. */
496 if (vma_is_io(vma)) {
497 unsigned int i;
498
499 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
500 unsigned long pfn;
501 int ret = follow_pfn(vma, start, &pfn);
502 if (ret)
503 return ret;
504
505 pages[i] = pfn_to_page(pfn);
506 }
507
508 if (i != npages) {
509 DRM_ERROR("failed to get user_pages.\n");
510 return -EINVAL;
511 }
512
513 return 0;
514 }
515
516 get_npages = get_user_pages(current, current->mm, start,
517 npages, 1, 1, pages, NULL);
518 get_npages = max(get_npages, 0);
519 if (get_npages != npages) {
520 DRM_ERROR("failed to get user_pages.\n");
521 while (get_npages)
522 put_page(pages[--get_npages]);
523 return -EFAULT;
524 }
525
526 return 0;
527}
528
529void exynos_gem_put_pages_to_userptr(struct page **pages,
530 unsigned int npages,
531 struct vm_area_struct *vma)
532{
533 if (!vma_is_io(vma)) {
534 unsigned int i;
535
536 for (i = 0; i < npages; i++) {
537 set_page_dirty_lock(pages[i]);
538
539 /*
540 * undo the reference we took when populating
541 * the table.
542 */
543 put_page(pages[i]);
544 }
545 }
546}
547
548int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
549 struct sg_table *sgt,
550 enum dma_data_direction dir)
551{
552 int nents;
553
554 mutex_lock(&drm_dev->struct_mutex);
555
556 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
557 if (!nents) {
558 DRM_ERROR("failed to map sgl with dma.\n");
559 mutex_unlock(&drm_dev->struct_mutex);
560 return nents;
561 }
562
563 mutex_unlock(&drm_dev->struct_mutex);
564 return 0;
565}
566
567void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
568 struct sg_table *sgt,
569 enum dma_data_direction dir)
570{
571 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
572}
573
451int exynos_drm_gem_init_object(struct drm_gem_object *obj) 574int exynos_drm_gem_init_object(struct drm_gem_object *obj)
452{ 575{
453 DRM_DEBUG_KMS("%s\n", __FILE__); 576 DRM_DEBUG_KMS("%s\n", __FILE__);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 83d21ef1d1e9..4248d7fb698f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -35,22 +35,29 @@
35 * exynos drm gem buffer structure. 35 * exynos drm gem buffer structure.
36 * 36 *
37 * @kvaddr: kernel virtual address to allocated memory region. 37 * @kvaddr: kernel virtual address to allocated memory region.
38 * *userptr: user space address.
38 * @dma_addr: bus address(accessed by dma) to allocated memory region. 39 * @dma_addr: bus address(accessed by dma) to allocated memory region.
39 * - this address could be physical address without IOMMU and 40 * - this address could be physical address without IOMMU and
40 * device address with IOMMU. 41 * device address with IOMMU.
42 * @write: whether pages will be written to by the caller.
41 * @sgt: sg table to transfer page data. 43 * @sgt: sg table to transfer page data.
42 * @pages: contain all pages to allocated memory region. 44 * @pages: contain all pages to allocated memory region.
43 * @page_size: could be 4K, 64K or 1MB. 45 * @page_size: could be 4K, 64K or 1MB.
44 * @size: size of allocated memory region. 46 * @size: size of allocated memory region.
47 * @pfnmap: indicate whether memory region from userptr is mmaped with
48 * VM_PFNMAP or not.
45 */ 49 */
46struct exynos_drm_gem_buf { 50struct exynos_drm_gem_buf {
47 void __iomem *kvaddr; 51 void __iomem *kvaddr;
52 unsigned long userptr;
48 dma_addr_t dma_addr; 53 dma_addr_t dma_addr;
49 struct dma_attrs dma_attrs; 54 struct dma_attrs dma_attrs;
55 unsigned int write;
50 struct sg_table *sgt; 56 struct sg_table *sgt;
51 struct page **pages; 57 struct page **pages;
52 unsigned long page_size; 58 unsigned long page_size;
53 unsigned long size; 59 unsigned long size;
60 bool pfnmap;
54}; 61};
55 62
56/* 63/*
@@ -66,6 +73,7 @@ struct exynos_drm_gem_buf {
66 * or at framebuffer creation. 73 * or at framebuffer creation.
67 * @size: size requested from user, in bytes and this size is aligned 74 * @size: size requested from user, in bytes and this size is aligned
68 * in page unit. 75 * in page unit.
76 * @vma: a pointer to vm_area.
69 * @flags: indicate memory type to allocated buffer and cache attruibute. 77 * @flags: indicate memory type to allocated buffer and cache attruibute.
70 * 78 *
71 * P.S. this object would be transfered to user as kms_bo.handle so 79 * P.S. this object would be transfered to user as kms_bo.handle so
@@ -75,6 +83,7 @@ struct exynos_drm_gem_obj {
75 struct drm_gem_object base; 83 struct drm_gem_object base;
76 struct exynos_drm_gem_buf *buffer; 84 struct exynos_drm_gem_buf *buffer;
77 unsigned long size; 85 unsigned long size;
86 struct vm_area_struct *vma;
78 unsigned int flags; 87 unsigned int flags;
79}; 88};
80 89
@@ -129,6 +138,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
129int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, 138int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
130 struct drm_file *file_priv); 139 struct drm_file *file_priv);
131 140
141/* map user space allocated by malloc to pages. */
142int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
143 struct drm_file *file_priv);
144
132/* get buffer information to memory region allocated by gem. */ 145/* get buffer information to memory region allocated by gem. */
133int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 146int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
134 struct drm_file *file_priv); 147 struct drm_file *file_priv);
@@ -164,4 +177,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
164/* set vm_flags and we can change the vm attribute to other one at here. */ 177/* set vm_flags and we can change the vm attribute to other one at here. */
165int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 178int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
166 179
180static inline int vma_is_io(struct vm_area_struct *vma)
181{
182 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
183}
184
185/* get a copy of a virtual memory region. */
186struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
187
188/* release a userspace virtual memory area. */
189void exynos_gem_put_vma(struct vm_area_struct *vma);
190
191/* get pages from user space. */
192int exynos_gem_get_pages_from_userptr(unsigned long start,
193 unsigned int npages,
194 struct page **pages,
195 struct vm_area_struct *vma);
196
197/* drop the reference to pages. */
198void exynos_gem_put_pages_to_userptr(struct page **pages,
199 unsigned int npages,
200 struct vm_area_struct *vma);
201
202/* map sgt with dma region. */
203int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
204 struct sg_table *sgt,
205 enum dma_data_direction dir);
206
207/* unmap sgt from dma region. */
208void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
209 struct sg_table *sgt,
210 enum dma_data_direction dir);
211
167#endif 212#endif
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index c0494d586e23..49f010f2b27f 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
133 __u32 data; 133 __u32 data;
134}; 134};
135 135
136enum drm_exynos_g2d_buf_type {
137 G2D_BUF_USERPTR = 1 << 31,
138};
139
136enum drm_exynos_g2d_event_type { 140enum drm_exynos_g2d_event_type {
137 G2D_EVENT_NOT, 141 G2D_EVENT_NOT,
138 G2D_EVENT_NONSTOP, 142 G2D_EVENT_NONSTOP,
139 G2D_EVENT_STOP, /* not yet */ 143 G2D_EVENT_STOP, /* not yet */
140}; 144};
141 145
146struct drm_exynos_g2d_userptr {
147 unsigned long userptr;
148 unsigned long size;
149};
150
142struct drm_exynos_g2d_set_cmdlist { 151struct drm_exynos_g2d_set_cmdlist {
143 __u64 cmd; 152 __u64 cmd;
144 __u64 cmd_gem; 153 __u64 cmd_buf;
145 __u32 cmd_nr; 154 __u32 cmd_nr;
146 __u32 cmd_gem_nr; 155 __u32 cmd_buf_nr;
147 156
148 /* for g2d event */ 157 /* for g2d event */
149 __u64 event_type; 158 __u64 event_type;