diff options
author | Inki Dae <daeinki@gmail.com> | 2012-11-04 00:53:24 -0400 |
---|---|---|
committer | Inki Dae <inki.dae@samsung.com> | 2012-12-04 00:45:58 -0500 |
commit | d87342c10de68d75ecd976556299c68a300c3834 (patch) | |
tree | 6fa9624a0e36b17044ed9ee0264e238271339b15 /drivers/gpu/drm/exynos | |
parent | 1055b39facd1bf8f84031a07385f84b46a20540f (diff) |
drm/exynos: add iommu support for g2d
Chagelog v2:
removed unnecessary structure, struct g2d_gem_node.
Chagelog v1:
This patch adds iommu support for g2d driver. For this, it
adds subdrv_probe/remove callback to enable or disable
g2d iommu. And with this patch, in case of using g2d iommu,
we can get or put device address to a gem handle from user
through exynos_drm_gem_get/put_dma_addr(). Actually, these
functions take a reference to a gem handle so that the gem
object used by g2d dma is released properly.
And runqueue_node has a pointer to drm_file object of current
process to manage gem handles to owner.
This patch is based on the below patch set, "drm/exynos: add
iommu support for -next".
http://www.spinics.net/lists/dri-devel/msg29041.html
Signed-off-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Diffstat (limited to 'drivers/gpu/drm/exynos')
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_g2d.c | 171 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_gem.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_gem.h | 6 |
3 files changed, 118 insertions, 69 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index f7aab24ea46c..bac2399857d1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c | |||
@@ -17,11 +17,14 @@ | |||
17 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/dma-attrs.h> | ||
20 | 22 | ||
21 | #include <drm/drmP.h> | 23 | #include <drm/drmP.h> |
22 | #include <drm/exynos_drm.h> | 24 | #include <drm/exynos_drm.h> |
23 | #include "exynos_drm_drv.h" | 25 | #include "exynos_drm_drv.h" |
24 | #include "exynos_drm_gem.h" | 26 | #include "exynos_drm_gem.h" |
27 | #include "exynos_drm_iommu.h" | ||
25 | 28 | ||
26 | #define G2D_HW_MAJOR_VER 4 | 29 | #define G2D_HW_MAJOR_VER 4 |
27 | #define G2D_HW_MINOR_VER 1 | 30 | #define G2D_HW_MINOR_VER 1 |
@@ -92,6 +95,8 @@ | |||
92 | #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) | 95 | #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) |
93 | #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) | 96 | #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) |
94 | 97 | ||
98 | #define MAX_BUF_ADDR_NR 6 | ||
99 | |||
95 | /* cmdlist data structure */ | 100 | /* cmdlist data structure */ |
96 | struct g2d_cmdlist { | 101 | struct g2d_cmdlist { |
97 | u32 head; | 102 | u32 head; |
@@ -104,15 +109,11 @@ struct drm_exynos_pending_g2d_event { | |||
104 | struct drm_exynos_g2d_event event; | 109 | struct drm_exynos_g2d_event event; |
105 | }; | 110 | }; |
106 | 111 | ||
107 | struct g2d_gem_node { | ||
108 | struct list_head list; | ||
109 | unsigned int handle; | ||
110 | }; | ||
111 | |||
112 | struct g2d_cmdlist_node { | 112 | struct g2d_cmdlist_node { |
113 | struct list_head list; | 113 | struct list_head list; |
114 | struct g2d_cmdlist *cmdlist; | 114 | struct g2d_cmdlist *cmdlist; |
115 | unsigned int gem_nr; | 115 | unsigned int map_nr; |
116 | unsigned int handles[MAX_BUF_ADDR_NR]; | ||
116 | dma_addr_t dma_addr; | 117 | dma_addr_t dma_addr; |
117 | 118 | ||
118 | struct drm_exynos_pending_g2d_event *event; | 119 | struct drm_exynos_pending_g2d_event *event; |
@@ -122,6 +123,7 @@ struct g2d_runqueue_node { | |||
122 | struct list_head list; | 123 | struct list_head list; |
123 | struct list_head run_cmdlist; | 124 | struct list_head run_cmdlist; |
124 | struct list_head event_list; | 125 | struct list_head event_list; |
126 | struct drm_file *filp; | ||
125 | pid_t pid; | 127 | pid_t pid; |
126 | struct completion complete; | 128 | struct completion complete; |
127 | int async; | 129 | int async; |
@@ -143,6 +145,7 @@ struct g2d_data { | |||
143 | struct mutex cmdlist_mutex; | 145 | struct mutex cmdlist_mutex; |
144 | dma_addr_t cmdlist_pool; | 146 | dma_addr_t cmdlist_pool; |
145 | void *cmdlist_pool_virt; | 147 | void *cmdlist_pool_virt; |
148 | struct dma_attrs cmdlist_dma_attrs; | ||
146 | 149 | ||
147 | /* runqueue*/ | 150 | /* runqueue*/ |
148 | struct g2d_runqueue_node *runqueue_node; | 151 | struct g2d_runqueue_node *runqueue_node; |
@@ -155,11 +158,17 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) | |||
155 | { | 158 | { |
156 | struct device *dev = g2d->dev; | 159 | struct device *dev = g2d->dev; |
157 | struct g2d_cmdlist_node *node = g2d->cmdlist_node; | 160 | struct g2d_cmdlist_node *node = g2d->cmdlist_node; |
161 | struct exynos_drm_subdrv *subdrv = &g2d->subdrv; | ||
158 | int nr; | 162 | int nr; |
159 | int ret; | 163 | int ret; |
160 | 164 | ||
161 | g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, | 165 | init_dma_attrs(&g2d->cmdlist_dma_attrs); |
162 | &g2d->cmdlist_pool, GFP_KERNEL); | 166 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs); |
167 | |||
168 | g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev, | ||
169 | G2D_CMDLIST_POOL_SIZE, | ||
170 | &g2d->cmdlist_pool, GFP_KERNEL, | ||
171 | &g2d->cmdlist_dma_attrs); | ||
163 | if (!g2d->cmdlist_pool_virt) { | 172 | if (!g2d->cmdlist_pool_virt) { |
164 | dev_err(dev, "failed to allocate dma memory\n"); | 173 | dev_err(dev, "failed to allocate dma memory\n"); |
165 | return -ENOMEM; | 174 | return -ENOMEM; |
@@ -184,18 +193,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d) | |||
184 | return 0; | 193 | return 0; |
185 | 194 | ||
186 | err: | 195 | err: |
187 | dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, | 196 | dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, |
188 | g2d->cmdlist_pool); | 197 | g2d->cmdlist_pool_virt, |
198 | g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); | ||
189 | return ret; | 199 | return ret; |
190 | } | 200 | } |
191 | 201 | ||
192 | static void g2d_fini_cmdlist(struct g2d_data *g2d) | 202 | static void g2d_fini_cmdlist(struct g2d_data *g2d) |
193 | { | 203 | { |
194 | struct device *dev = g2d->dev; | 204 | struct exynos_drm_subdrv *subdrv = &g2d->subdrv; |
195 | 205 | ||
196 | kfree(g2d->cmdlist_node); | 206 | kfree(g2d->cmdlist_node); |
197 | dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, | 207 | dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, |
198 | g2d->cmdlist_pool); | 208 | g2d->cmdlist_pool_virt, |
209 | g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); | ||
199 | } | 210 | } |
200 | 211 | ||
201 | static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) | 212 | static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) |
@@ -245,62 +256,51 @@ add_to_list: | |||
245 | list_add_tail(&node->event->base.link, &g2d_priv->event_list); | 256 | list_add_tail(&node->event->base.link, &g2d_priv->event_list); |
246 | } | 257 | } |
247 | 258 | ||
248 | static int g2d_get_cmdlist_gem(struct drm_device *drm_dev, | 259 | static int g2d_map_cmdlist_gem(struct g2d_data *g2d, |
249 | struct drm_file *file, | 260 | struct g2d_cmdlist_node *node, |
250 | struct g2d_cmdlist_node *node) | 261 | struct drm_device *drm_dev, |
262 | struct drm_file *file) | ||
251 | { | 263 | { |
252 | struct drm_exynos_file_private *file_priv = file->driver_priv; | ||
253 | struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; | ||
254 | struct g2d_cmdlist *cmdlist = node->cmdlist; | 264 | struct g2d_cmdlist *cmdlist = node->cmdlist; |
255 | dma_addr_t *addr; | ||
256 | int offset; | 265 | int offset; |
257 | int i; | 266 | int i; |
258 | 267 | ||
259 | for (i = 0; i < node->gem_nr; i++) { | 268 | for (i = 0; i < node->map_nr; i++) { |
260 | struct g2d_gem_node *gem_node; | 269 | unsigned long handle; |
261 | 270 | dma_addr_t *addr; | |
262 | gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL); | ||
263 | if (!gem_node) { | ||
264 | dev_err(g2d_priv->dev, "failed to allocate gem node\n"); | ||
265 | return -ENOMEM; | ||
266 | } | ||
267 | 271 | ||
268 | offset = cmdlist->last - (i * 2 + 1); | 272 | offset = cmdlist->last - (i * 2 + 1); |
269 | gem_node->handle = cmdlist->data[offset]; | 273 | handle = cmdlist->data[offset]; |
270 | 274 | ||
271 | addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle, | 275 | addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, file); |
272 | file); | ||
273 | if (IS_ERR(addr)) { | 276 | if (IS_ERR(addr)) { |
274 | node->gem_nr = i; | 277 | node->map_nr = i; |
275 | kfree(gem_node); | 278 | return -EFAULT; |
276 | return PTR_ERR(addr); | ||
277 | } | 279 | } |
278 | 280 | ||
279 | cmdlist->data[offset] = *addr; | 281 | cmdlist->data[offset] = *addr; |
280 | list_add_tail(&gem_node->list, &g2d_priv->gem_list); | 282 | node->handles[i] = handle; |
281 | g2d_priv->gem_nr++; | ||
282 | } | 283 | } |
283 | 284 | ||
284 | return 0; | 285 | return 0; |
285 | } | 286 | } |
286 | 287 | ||
287 | static void g2d_put_cmdlist_gem(struct drm_device *drm_dev, | 288 | static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d, |
288 | struct drm_file *file, | 289 | struct g2d_cmdlist_node *node, |
289 | unsigned int nr) | 290 | struct drm_file *filp) |
290 | { | 291 | { |
291 | struct drm_exynos_file_private *file_priv = file->driver_priv; | 292 | struct exynos_drm_subdrv *subdrv = &g2d->subdrv; |
292 | struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; | 293 | int i; |
293 | struct g2d_gem_node *node, *n; | ||
294 | 294 | ||
295 | list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) { | 295 | for (i = 0; i < node->map_nr; i++) { |
296 | if (!nr) | 296 | unsigned int handle = node->handles[i]; |
297 | break; | ||
298 | 297 | ||
299 | exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file); | 298 | exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, filp); |
300 | list_del_init(&node->list); | 299 | |
301 | kfree(node); | 300 | node->handles[i] = 0; |
302 | nr--; | ||
303 | } | 301 | } |
302 | |||
303 | node->map_nr = 0; | ||
304 | } | 304 | } |
305 | 305 | ||
306 | static void g2d_dma_start(struct g2d_data *g2d, | 306 | static void g2d_dma_start(struct g2d_data *g2d, |
@@ -337,10 +337,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d) | |||
337 | static void g2d_free_runqueue_node(struct g2d_data *g2d, | 337 | static void g2d_free_runqueue_node(struct g2d_data *g2d, |
338 | struct g2d_runqueue_node *runqueue_node) | 338 | struct g2d_runqueue_node *runqueue_node) |
339 | { | 339 | { |
340 | struct g2d_cmdlist_node *node; | ||
341 | |||
340 | if (!runqueue_node) | 342 | if (!runqueue_node) |
341 | return; | 343 | return; |
342 | 344 | ||
343 | mutex_lock(&g2d->cmdlist_mutex); | 345 | mutex_lock(&g2d->cmdlist_mutex); |
346 | /* | ||
347 | * commands in run_cmdlist have been completed so unmap all gem | ||
348 | * objects in each command node so that they are unreferenced. | ||
349 | */ | ||
350 | list_for_each_entry(node, &runqueue_node->run_cmdlist, list) | ||
351 | g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp); | ||
344 | list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); | 352 | list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); |
345 | mutex_unlock(&g2d->cmdlist_mutex); | 353 | mutex_unlock(&g2d->cmdlist_mutex); |
346 | 354 | ||
@@ -587,7 +595,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, | |||
587 | if (ret < 0) | 595 | if (ret < 0) |
588 | goto err_free_event; | 596 | goto err_free_event; |
589 | 597 | ||
590 | node->gem_nr = req->cmd_gem_nr; | 598 | node->map_nr = req->cmd_gem_nr; |
591 | if (req->cmd_gem_nr) { | 599 | if (req->cmd_gem_nr) { |
592 | struct drm_exynos_g2d_cmd *cmd_gem; | 600 | struct drm_exynos_g2d_cmd *cmd_gem; |
593 | 601 | ||
@@ -605,7 +613,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, | |||
605 | if (ret < 0) | 613 | if (ret < 0) |
606 | goto err_free_event; | 614 | goto err_free_event; |
607 | 615 | ||
608 | ret = g2d_get_cmdlist_gem(drm_dev, file, node); | 616 | ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file); |
609 | if (ret < 0) | 617 | if (ret < 0) |
610 | goto err_unmap; | 618 | goto err_unmap; |
611 | } | 619 | } |
@@ -624,7 +632,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data, | |||
624 | return 0; | 632 | return 0; |
625 | 633 | ||
626 | err_unmap: | 634 | err_unmap: |
627 | g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr); | 635 | g2d_unmap_cmdlist_gem(g2d, node, file); |
628 | err_free_event: | 636 | err_free_event: |
629 | if (node->event) { | 637 | if (node->event) { |
630 | spin_lock_irqsave(&drm_dev->event_lock, flags); | 638 | spin_lock_irqsave(&drm_dev->event_lock, flags); |
@@ -680,6 +688,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data, | |||
680 | 688 | ||
681 | mutex_lock(&g2d->runqueue_mutex); | 689 | mutex_lock(&g2d->runqueue_mutex); |
682 | runqueue_node->pid = current->pid; | 690 | runqueue_node->pid = current->pid; |
691 | runqueue_node->filp = file; | ||
683 | list_add_tail(&runqueue_node->list, &g2d->runqueue); | 692 | list_add_tail(&runqueue_node->list, &g2d->runqueue); |
684 | if (!g2d->runqueue_node) | 693 | if (!g2d->runqueue_node) |
685 | g2d_exec_runqueue(g2d); | 694 | g2d_exec_runqueue(g2d); |
@@ -696,6 +705,43 @@ out: | |||
696 | } | 705 | } |
697 | EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); | 706 | EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); |
698 | 707 | ||
708 | static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | ||
709 | { | ||
710 | struct g2d_data *g2d; | ||
711 | int ret; | ||
712 | |||
713 | g2d = dev_get_drvdata(dev); | ||
714 | if (!g2d) | ||
715 | return -EFAULT; | ||
716 | |||
717 | /* allocate dma-aware cmdlist buffer. */ | ||
718 | ret = g2d_init_cmdlist(g2d); | ||
719 | if (ret < 0) { | ||
720 | dev_err(dev, "cmdlist init failed\n"); | ||
721 | return ret; | ||
722 | } | ||
723 | |||
724 | if (!is_drm_iommu_supported(drm_dev)) | ||
725 | return 0; | ||
726 | |||
727 | ret = drm_iommu_attach_device(drm_dev, dev); | ||
728 | if (ret < 0) { | ||
729 | dev_err(dev, "failed to enable iommu.\n"); | ||
730 | g2d_fini_cmdlist(g2d); | ||
731 | } | ||
732 | |||
733 | return ret; | ||
734 | |||
735 | } | ||
736 | |||
737 | static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev) | ||
738 | { | ||
739 | if (!is_drm_iommu_supported(drm_dev)) | ||
740 | return; | ||
741 | |||
742 | drm_iommu_detach_device(drm_dev, dev); | ||
743 | } | ||
744 | |||
699 | static int g2d_open(struct drm_device *drm_dev, struct device *dev, | 745 | static int g2d_open(struct drm_device *drm_dev, struct device *dev, |
700 | struct drm_file *file) | 746 | struct drm_file *file) |
701 | { | 747 | { |
@@ -734,12 +780,19 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev, | |||
734 | return; | 780 | return; |
735 | 781 | ||
736 | mutex_lock(&g2d->cmdlist_mutex); | 782 | mutex_lock(&g2d->cmdlist_mutex); |
737 | list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) | 783 | list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) { |
784 | /* | ||
785 | * unmap all gem objects not completed. | ||
786 | * | ||
787 | * P.S. if current process was terminated forcely then | ||
788 | * there may be some commands in inuse_cmdlist so unmap | ||
789 | * them. | ||
790 | */ | ||
791 | g2d_unmap_cmdlist_gem(g2d, node, file); | ||
738 | list_move_tail(&node->list, &g2d->free_cmdlist); | 792 | list_move_tail(&node->list, &g2d->free_cmdlist); |
793 | } | ||
739 | mutex_unlock(&g2d->cmdlist_mutex); | 794 | mutex_unlock(&g2d->cmdlist_mutex); |
740 | 795 | ||
741 | g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr); | ||
742 | |||
743 | kfree(file_priv->g2d_priv); | 796 | kfree(file_priv->g2d_priv); |
744 | } | 797 | } |
745 | 798 | ||
@@ -778,15 +831,11 @@ static int __devinit g2d_probe(struct platform_device *pdev) | |||
778 | mutex_init(&g2d->cmdlist_mutex); | 831 | mutex_init(&g2d->cmdlist_mutex); |
779 | mutex_init(&g2d->runqueue_mutex); | 832 | mutex_init(&g2d->runqueue_mutex); |
780 | 833 | ||
781 | ret = g2d_init_cmdlist(g2d); | ||
782 | if (ret < 0) | ||
783 | goto err_destroy_workqueue; | ||
784 | |||
785 | g2d->gate_clk = clk_get(dev, "fimg2d"); | 834 | g2d->gate_clk = clk_get(dev, "fimg2d"); |
786 | if (IS_ERR(g2d->gate_clk)) { | 835 | if (IS_ERR(g2d->gate_clk)) { |
787 | dev_err(dev, "failed to get gate clock\n"); | 836 | dev_err(dev, "failed to get gate clock\n"); |
788 | ret = PTR_ERR(g2d->gate_clk); | 837 | ret = PTR_ERR(g2d->gate_clk); |
789 | goto err_fini_cmdlist; | 838 | goto err_destroy_workqueue; |
790 | } | 839 | } |
791 | 840 | ||
792 | pm_runtime_enable(dev); | 841 | pm_runtime_enable(dev); |
@@ -818,6 +867,8 @@ static int __devinit g2d_probe(struct platform_device *pdev) | |||
818 | 867 | ||
819 | subdrv = &g2d->subdrv; | 868 | subdrv = &g2d->subdrv; |
820 | subdrv->dev = dev; | 869 | subdrv->dev = dev; |
870 | subdrv->probe = g2d_subdrv_probe; | ||
871 | subdrv->remove = g2d_subdrv_remove; | ||
821 | subdrv->open = g2d_open; | 872 | subdrv->open = g2d_open; |
822 | subdrv->close = g2d_close; | 873 | subdrv->close = g2d_close; |
823 | 874 | ||
@@ -835,8 +886,6 @@ static int __devinit g2d_probe(struct platform_device *pdev) | |||
835 | err_put_clk: | 886 | err_put_clk: |
836 | pm_runtime_disable(dev); | 887 | pm_runtime_disable(dev); |
837 | clk_put(g2d->gate_clk); | 888 | clk_put(g2d->gate_clk); |
838 | err_fini_cmdlist: | ||
839 | g2d_fini_cmdlist(g2d); | ||
840 | err_destroy_workqueue: | 889 | err_destroy_workqueue: |
841 | destroy_workqueue(g2d->g2d_workq); | 890 | destroy_workqueue(g2d->g2d_workq); |
842 | err_destroy_slab: | 891 | err_destroy_slab: |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 08d0218d5ba6..8cb6824923c8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
@@ -266,14 +266,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | |||
266 | return 0; | 266 | return 0; |
267 | } | 267 | } |
268 | 268 | ||
269 | void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, | 269 | dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, |
270 | unsigned int gem_handle, | 270 | unsigned int gem_handle, |
271 | struct drm_file *file_priv) | 271 | struct drm_file *filp) |
272 | { | 272 | { |
273 | struct exynos_drm_gem_obj *exynos_gem_obj; | 273 | struct exynos_drm_gem_obj *exynos_gem_obj; |
274 | struct drm_gem_object *obj; | 274 | struct drm_gem_object *obj; |
275 | 275 | ||
276 | obj = drm_gem_object_lookup(dev, file_priv, gem_handle); | 276 | obj = drm_gem_object_lookup(dev, filp, gem_handle); |
277 | if (!obj) { | 277 | if (!obj) { |
278 | DRM_ERROR("failed to lookup gem object.\n"); | 278 | DRM_ERROR("failed to lookup gem object.\n"); |
279 | return ERR_PTR(-EINVAL); | 279 | return ERR_PTR(-EINVAL); |
@@ -294,12 +294,12 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, | |||
294 | 294 | ||
295 | void exynos_drm_gem_put_dma_addr(struct drm_device *dev, | 295 | void exynos_drm_gem_put_dma_addr(struct drm_device *dev, |
296 | unsigned int gem_handle, | 296 | unsigned int gem_handle, |
297 | struct drm_file *file_priv) | 297 | struct drm_file *filp) |
298 | { | 298 | { |
299 | struct exynos_drm_gem_obj *exynos_gem_obj; | 299 | struct exynos_drm_gem_obj *exynos_gem_obj; |
300 | struct drm_gem_object *obj; | 300 | struct drm_gem_object *obj; |
301 | 301 | ||
302 | obj = drm_gem_object_lookup(dev, file_priv, gem_handle); | 302 | obj = drm_gem_object_lookup(dev, filp, gem_handle); |
303 | if (!obj) { | 303 | if (!obj) { |
304 | DRM_ERROR("failed to lookup gem object.\n"); | 304 | DRM_ERROR("failed to lookup gem object.\n"); |
305 | return; | 305 | return; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 0236321521a1..83d21ef1d1e9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h | |||
@@ -105,9 +105,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, | |||
105 | * other drivers such as 2d/3d acceleration drivers. | 105 | * other drivers such as 2d/3d acceleration drivers. |
106 | * with this function call, gem object reference count would be increased. | 106 | * with this function call, gem object reference count would be increased. |
107 | */ | 107 | */ |
108 | void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, | 108 | dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev, |
109 | unsigned int gem_handle, | 109 | unsigned int gem_handle, |
110 | struct drm_file *file_priv); | 110 | struct drm_file *filp); |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * put dma address from gem handle and this function could be used for | 113 | * put dma address from gem handle and this function could be used for |
@@ -116,7 +116,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, | |||
116 | */ | 116 | */ |
117 | void exynos_drm_gem_put_dma_addr(struct drm_device *dev, | 117 | void exynos_drm_gem_put_dma_addr(struct drm_device *dev, |
118 | unsigned int gem_handle, | 118 | unsigned int gem_handle, |
119 | struct drm_file *file_priv); | 119 | struct drm_file *filp); |
120 | 120 | ||
121 | /* get buffer offset to map to user space. */ | 121 | /* get buffer offset to map to user space. */ |
122 | int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, | 122 | int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, |