summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/dmabuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/dmabuf.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/dmabuf.c247
1 files changed, 247 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dmabuf.c b/drivers/gpu/nvgpu/common/linux/dmabuf.c
new file mode 100644
index 00000000..0b07b255
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/dmabuf.c
@@ -0,0 +1,247 @@
1/*
2* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/device.h>
18#include <linux/dma-buf.h>
19#include <linux/scatterlist.h>
20
21#include <nvgpu/comptags.h>
22#include <nvgpu/enabled.h>
23
24#include <nvgpu/linux/vidmem.h>
25
26#include "gk20a/gk20a.h"
27#include "gk20a/platform_gk20a.h"
28
29#include "dmabuf.h"
30#include "vm_priv.h"
31#include "os_linux.h"
32
33static void gk20a_mm_delete_priv(void *_priv)
34{
35 struct gk20a_buffer_state *s, *s_tmp;
36 struct gk20a_dmabuf_priv *priv = _priv;
37 struct gk20a *g;
38
39 if (!priv)
40 return;
41
42 g = priv->g;
43
44 if (priv->comptags.lines) {
45 BUG_ON(!priv->comptag_allocator);
46 gk20a_comptaglines_free(priv->comptag_allocator,
47 priv->comptags.offset,
48 priv->comptags.allocated_lines);
49 }
50
51 /* Free buffer states */
52 nvgpu_list_for_each_entry_safe(s, s_tmp, &priv->states,
53 gk20a_buffer_state, list) {
54 gk20a_fence_put(s->fence);
55 nvgpu_list_del(&s->list);
56 nvgpu_kfree(g, s);
57 }
58
59 nvgpu_kfree(g, priv);
60}
61
62enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
63 struct dma_buf *dmabuf)
64{
65 struct gk20a *buf_owner = nvgpu_vidmem_buf_owner(dmabuf);
66 bool unified_memory = nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY);
67
68 if (buf_owner == NULL) {
69 /* Not nvgpu-allocated, assume system memory */
70 return APERTURE_SYSMEM;
71 } else if (WARN_ON(buf_owner == g && unified_memory)) {
72 /* Looks like our video memory, but this gpu doesn't support
73 * it. Warn about a bug and bail out */
74 nvgpu_warn(g,
75 "dmabuf is our vidmem but we don't have local vidmem");
76 return APERTURE_INVALID;
77 } else if (buf_owner != g) {
78 /* Someone else's vidmem */
79 return APERTURE_INVALID;
80 } else {
81 /* Yay, buf_owner == g */
82 return APERTURE_VIDMEM;
83 }
84}
85
86struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf)
87{
88 struct gk20a_dmabuf_priv *priv;
89
90 priv = dma_buf_get_drvdata(dmabuf, dev);
91 if (WARN_ON(!priv))
92 return ERR_PTR(-EINVAL);
93
94 nvgpu_mutex_acquire(&priv->lock);
95
96 if (priv->pin_count == 0) {
97 priv->attach = dma_buf_attach(dmabuf, dev);
98 if (IS_ERR(priv->attach)) {
99 nvgpu_mutex_release(&priv->lock);
100 return (struct sg_table *)priv->attach;
101 }
102
103 priv->sgt = dma_buf_map_attachment(priv->attach,
104 DMA_BIDIRECTIONAL);
105 if (IS_ERR(priv->sgt)) {
106 dma_buf_detach(dmabuf, priv->attach);
107 nvgpu_mutex_release(&priv->lock);
108 return priv->sgt;
109 }
110 }
111
112 priv->pin_count++;
113 nvgpu_mutex_release(&priv->lock);
114 return priv->sgt;
115}
116
117void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
118 struct sg_table *sgt)
119{
120 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
121 dma_addr_t dma_addr;
122
123 if (IS_ERR(priv) || !priv)
124 return;
125
126 nvgpu_mutex_acquire(&priv->lock);
127 WARN_ON(priv->sgt != sgt);
128 priv->pin_count--;
129 WARN_ON(priv->pin_count < 0);
130 dma_addr = sg_dma_address(priv->sgt->sgl);
131 if (priv->pin_count == 0) {
132 dma_buf_unmap_attachment(priv->attach, priv->sgt,
133 DMA_BIDIRECTIONAL);
134 dma_buf_detach(dmabuf, priv->attach);
135 }
136 nvgpu_mutex_release(&priv->lock);
137}
138
139int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
140{
141 struct gk20a *g = gk20a_get_platform(dev)->g;
142 struct gk20a_dmabuf_priv *priv;
143 static u64 priv_count = 0;
144
145 priv = dma_buf_get_drvdata(dmabuf, dev);
146 if (likely(priv))
147 return 0;
148
149 nvgpu_mutex_acquire(&g->mm.priv_lock);
150 priv = dma_buf_get_drvdata(dmabuf, dev);
151 if (priv)
152 goto priv_exist_or_err;
153
154 priv = nvgpu_kzalloc(g, sizeof(*priv));
155 if (!priv) {
156 priv = ERR_PTR(-ENOMEM);
157 goto priv_exist_or_err;
158 }
159
160 nvgpu_mutex_init(&priv->lock);
161 nvgpu_init_list_node(&priv->states);
162 priv->buffer_id = ++priv_count;
163 priv->g = g;
164 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
165
166priv_exist_or_err:
167 nvgpu_mutex_release(&g->mm.priv_lock);
168 if (IS_ERR(priv))
169 return -ENOMEM;
170
171 return 0;
172}
173
174int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g,
175 u64 offset, struct gk20a_buffer_state **state)
176{
177 int err = 0;
178 struct gk20a_dmabuf_priv *priv;
179 struct gk20a_buffer_state *s;
180 struct device *dev = dev_from_gk20a(g);
181
182 if (WARN_ON(offset >= (u64)dmabuf->size))
183 return -EINVAL;
184
185 err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
186 if (err)
187 return err;
188
189 priv = dma_buf_get_drvdata(dmabuf, dev);
190 if (WARN_ON(!priv))
191 return -ENOSYS;
192
193 nvgpu_mutex_acquire(&priv->lock);
194
195 nvgpu_list_for_each_entry(s, &priv->states, gk20a_buffer_state, list)
196 if (s->offset == offset)
197 goto out;
198
199 /* State not found, create state. */
200 s = nvgpu_kzalloc(g, sizeof(*s));
201 if (!s) {
202 err = -ENOMEM;
203 goto out;
204 }
205
206 s->offset = offset;
207 nvgpu_init_list_node(&s->list);
208 nvgpu_mutex_init(&s->lock);
209 nvgpu_list_add_tail(&s->list, &priv->states);
210
211out:
212 nvgpu_mutex_release(&priv->lock);
213 if (!err)
214 *state = s;
215 return err;
216}
217
218int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd,
219 u64 *buffer_id, u64 *buffer_len)
220{
221 struct dma_buf *dmabuf;
222 struct gk20a_dmabuf_priv *priv;
223 int err = 0;
224
225 dmabuf = dma_buf_get(dmabuf_fd);
226 if (IS_ERR(dmabuf)) {
227 dev_warn(dev, "%s: fd %d is not a dmabuf", __func__, dmabuf_fd);
228 return PTR_ERR(dmabuf);
229 }
230
231 err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
232 if (err) {
233 dev_warn(dev, "Failed to allocate dmabuf drvdata (err = %d)",
234 err);
235 goto clean_up;
236 }
237
238 priv = dma_buf_get_drvdata(dmabuf, dev);
239 if (likely(priv)) {
240 *buffer_id = priv->buffer_id;
241 *buffer_len = dmabuf->size;
242 }
243
244clean_up:
245 dma_buf_put(dmabuf);
246 return err;
247}