diff options
author | Inki Dae <inki.dae@samsung.com> | 2012-11-28 05:09:31 -0500 |
---|---|---|
committer | Inki Dae <daeinki@gmail.com> | 2012-12-13 09:05:43 -0500 |
commit | a7b362fb3bba310426b90e7bac83a2206808e425 (patch) | |
tree | 37f66c99508fae68baea57ca24237557891a9f31 /drivers/gpu/drm | |
parent | ae9dace2903db86b27f19d40c1d1b21a6f712895 (diff) |
drm/exynos: add dmabuf attach/detach callbacks.
With this patch, When dma_buf_unmap_attachment is called,
the pages of sgt aren't unmapped from iommu table.
Instead, when dma_buf_detach is called, that would be done.
And also removes exynos_get_sgt function used to get clone sgt
and uses attachment's sgt instead. This patch would resolve
performance deterioration issue when v4l2-based driver is using
the buffer imported from gem.
This change is derived from videobuf2-dma-contig.c
Signed-off-by: Inki Dae <inki.dae@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | 104 |
1 files changed, 73 insertions, 31 deletions
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 539da9f4eb97..61d5a8402eb8 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c | |||
@@ -30,63 +30,107 @@ | |||
30 | 30 | ||
31 | #include <linux/dma-buf.h> | 31 | #include <linux/dma-buf.h> |
32 | 32 | ||
33 | static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev, | 33 | struct exynos_drm_dmabuf_attachment { |
34 | struct exynos_drm_gem_buf *buf) | 34 | struct sg_table sgt; |
35 | enum dma_data_direction dir; | ||
36 | }; | ||
37 | |||
38 | static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf, | ||
39 | struct device *dev, | ||
40 | struct dma_buf_attachment *attach) | ||
35 | { | 41 | { |
36 | struct sg_table *sgt = NULL; | 42 | struct exynos_drm_dmabuf_attachment *exynos_attach; |
37 | int ret; | ||
38 | 43 | ||
39 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | 44 | exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL); |
40 | if (!sgt) | 45 | if (!exynos_attach) |
41 | goto out; | 46 | return -ENOMEM; |
42 | 47 | ||
43 | ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr, | 48 | exynos_attach->dir = DMA_NONE; |
44 | buf->dma_addr, buf->size); | 49 | attach->priv = exynos_attach; |
45 | if (ret < 0) { | ||
46 | DRM_ERROR("failed to get sgtable.\n"); | ||
47 | goto err_free_sgt; | ||
48 | } | ||
49 | 50 | ||
50 | return sgt; | 51 | return 0; |
52 | } | ||
51 | 53 | ||
52 | err_free_sgt: | 54 | static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf, |
53 | kfree(sgt); | 55 | struct dma_buf_attachment *attach) |
54 | sgt = NULL; | 56 | { |
55 | out: | 57 | struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; |
56 | return NULL; | 58 | struct sg_table *sgt; |
59 | |||
60 | if (!exynos_attach) | ||
61 | return; | ||
62 | |||
63 | sgt = &exynos_attach->sgt; | ||
64 | |||
65 | if (exynos_attach->dir != DMA_NONE) | ||
66 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, | ||
67 | exynos_attach->dir); | ||
68 | |||
69 | sg_free_table(sgt); | ||
70 | kfree(exynos_attach); | ||
71 | attach->priv = NULL; | ||
57 | } | 72 | } |
58 | 73 | ||
59 | static struct sg_table * | 74 | static struct sg_table * |
60 | exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, | 75 | exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, |
61 | enum dma_data_direction dir) | 76 | enum dma_data_direction dir) |
62 | { | 77 | { |
78 | struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv; | ||
63 | struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; | 79 | struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; |
64 | struct drm_device *dev = gem_obj->base.dev; | 80 | struct drm_device *dev = gem_obj->base.dev; |
65 | struct exynos_drm_gem_buf *buf; | 81 | struct exynos_drm_gem_buf *buf; |
82 | struct scatterlist *rd, *wr; | ||
66 | struct sg_table *sgt = NULL; | 83 | struct sg_table *sgt = NULL; |
67 | int nents; | 84 | unsigned int i; |
85 | int nents, ret; | ||
68 | 86 | ||
69 | DRM_DEBUG_PRIME("%s\n", __FILE__); | 87 | DRM_DEBUG_PRIME("%s\n", __FILE__); |
70 | 88 | ||
89 | if (WARN_ON(dir == DMA_NONE)) | ||
90 | return ERR_PTR(-EINVAL); | ||
91 | |||
92 | /* just return current sgt if already requested. */ | ||
93 | if (exynos_attach->dir == dir) | ||
94 | return &exynos_attach->sgt; | ||
95 | |||
96 | /* reattaching is not allowed. */ | ||
97 | if (WARN_ON(exynos_attach->dir != DMA_NONE)) | ||
98 | return ERR_PTR(-EBUSY); | ||
99 | |||
71 | buf = gem_obj->buffer; | 100 | buf = gem_obj->buffer; |
72 | if (!buf) { | 101 | if (!buf) { |
73 | DRM_ERROR("buffer is null.\n"); | 102 | DRM_ERROR("buffer is null.\n"); |
74 | return sgt; | 103 | return ERR_PTR(-ENOMEM); |
104 | } | ||
105 | |||
106 | sgt = &exynos_attach->sgt; | ||
107 | |||
108 | ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL); | ||
109 | if (ret) { | ||
110 | DRM_ERROR("failed to alloc sgt.\n"); | ||
111 | return ERR_PTR(-ENOMEM); | ||
75 | } | 112 | } |
76 | 113 | ||
77 | mutex_lock(&dev->struct_mutex); | 114 | mutex_lock(&dev->struct_mutex); |
78 | 115 | ||
79 | sgt = exynos_get_sgt(dev, buf); | 116 | rd = buf->sgt->sgl; |
80 | if (!sgt) | 117 | wr = sgt->sgl; |
81 | goto err_unlock; | 118 | for (i = 0; i < sgt->orig_nents; ++i) { |
119 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); | ||
120 | rd = sg_next(rd); | ||
121 | wr = sg_next(wr); | ||
122 | } | ||
82 | 123 | ||
83 | nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); | 124 | nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); |
84 | if (!nents) { | 125 | if (!nents) { |
85 | DRM_ERROR("failed to map sgl with iommu.\n"); | 126 | DRM_ERROR("failed to map sgl with iommu.\n"); |
86 | sgt = NULL; | 127 | sgt = ERR_PTR(-EIO); |
87 | goto err_unlock; | 128 | goto err_unlock; |
88 | } | 129 | } |
89 | 130 | ||
131 | exynos_attach->dir = dir; | ||
132 | attach->priv = exynos_attach; | ||
133 | |||
90 | DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); | 134 | DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); |
91 | 135 | ||
92 | err_unlock: | 136 | err_unlock: |
@@ -98,11 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, | |||
98 | struct sg_table *sgt, | 142 | struct sg_table *sgt, |
99 | enum dma_data_direction dir) | 143 | enum dma_data_direction dir) |
100 | { | 144 | { |
101 | dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); | 145 | /* Nothing to do. */ |
102 | |||
103 | sg_free_table(sgt); | ||
104 | kfree(sgt); | ||
105 | sgt = NULL; | ||
106 | } | 146 | } |
107 | 147 | ||
108 | static void exynos_dmabuf_release(struct dma_buf *dmabuf) | 148 | static void exynos_dmabuf_release(struct dma_buf *dmabuf) |
@@ -164,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, | |||
164 | } | 204 | } |
165 | 205 | ||
166 | static struct dma_buf_ops exynos_dmabuf_ops = { | 206 | static struct dma_buf_ops exynos_dmabuf_ops = { |
207 | .attach = exynos_gem_attach_dma_buf, | ||
208 | .detach = exynos_gem_detach_dma_buf, | ||
167 | .map_dma_buf = exynos_gem_map_dma_buf, | 209 | .map_dma_buf = exynos_gem_map_dma_buf, |
168 | .unmap_dma_buf = exynos_gem_unmap_dma_buf, | 210 | .unmap_dma_buf = exynos_gem_unmap_dma_buf, |
169 | .kmap = exynos_gem_dmabuf_kmap, | 211 | .kmap = exynos_gem_dmabuf_kmap, |