aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/dmabuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/os/linux/dmabuf.c')
-rw-r--r--include/os/linux/dmabuf.c219
1 files changed, 219 insertions, 0 deletions
diff --git a/include/os/linux/dmabuf.c b/include/os/linux/dmabuf.c
new file mode 100644
index 0000000..e8e3313
--- /dev/null
+++ b/include/os/linux/dmabuf.c
@@ -0,0 +1,219 @@
1/*
2* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/device.h>
18#include <linux/dma-buf.h>
19#include <linux/scatterlist.h>
20
21#include <nvgpu/comptags.h>
22#include <nvgpu/enabled.h>
23#include <nvgpu/gk20a.h>
24
25#include <nvgpu/linux/vm.h>
26
27#include "gk20a/fence_gk20a.h"
28
29#include "platform_gk20a.h"
30#include "dmabuf.h"
31#include "os_linux.h"
32#include "dmabuf_vidmem.h"
33
34static void gk20a_mm_delete_priv(void *_priv)
35{
36 struct gk20a_buffer_state *s, *s_tmp;
37 struct gk20a_dmabuf_priv *priv = _priv;
38 struct gk20a *g;
39
40 if (!priv)
41 return;
42
43 g = priv->g;
44
45 if (priv->comptags.allocated && priv->comptags.lines) {
46 BUG_ON(!priv->comptag_allocator);
47 gk20a_comptaglines_free(priv->comptag_allocator,
48 priv->comptags.offset,
49 priv->comptags.lines);
50 }
51
52 /* Free buffer states */
53 nvgpu_list_for_each_entry_safe(s, s_tmp, &priv->states,
54 gk20a_buffer_state, list) {
55 gk20a_fence_put(s->fence);
56 nvgpu_list_del(&s->list);
57 nvgpu_kfree(g, s);
58 }
59
60 nvgpu_kfree(g, priv);
61}
62
63enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
64 struct dma_buf *dmabuf)
65{
66 struct gk20a *buf_owner = nvgpu_vidmem_buf_owner(dmabuf);
67 bool unified_memory = nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY);
68
69 if (buf_owner == NULL) {
70 /* Not nvgpu-allocated, assume system memory */
71 return APERTURE_SYSMEM;
72 } else if (WARN_ON(buf_owner == g && unified_memory)) {
73 /* Looks like our video memory, but this gpu doesn't support
74 * it. Warn about a bug and bail out */
75 nvgpu_warn(g,
76 "dmabuf is our vidmem but we don't have local vidmem");
77 return APERTURE_INVALID;
78 } else if (buf_owner != g) {
79 /* Someone else's vidmem */
80 return APERTURE_INVALID;
81 } else {
82 /* Yay, buf_owner == g */
83 return APERTURE_VIDMEM;
84 }
85}
86
87struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf,
88 struct dma_buf_attachment **attachment)
89{
90 struct gk20a_dmabuf_priv *priv;
91
92 priv = dma_buf_get_drvdata(dmabuf, dev);
93 if (WARN_ON(!priv))
94 return ERR_PTR(-EINVAL);
95
96 nvgpu_mutex_acquire(&priv->lock);
97
98 if (priv->pin_count == 0) {
99 priv->attach = dma_buf_attach(dmabuf, dev);
100 if (IS_ERR(priv->attach)) {
101 nvgpu_mutex_release(&priv->lock);
102 return (struct sg_table *)priv->attach;
103 }
104
105 priv->sgt = dma_buf_map_attachment(priv->attach,
106 DMA_BIDIRECTIONAL);
107 if (IS_ERR(priv->sgt)) {
108 dma_buf_detach(dmabuf, priv->attach);
109 nvgpu_mutex_release(&priv->lock);
110 return priv->sgt;
111 }
112 }
113
114 priv->pin_count++;
115 nvgpu_mutex_release(&priv->lock);
116 *attachment = priv->attach;
117 return priv->sgt;
118}
119
120void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
121 struct dma_buf_attachment *attachment,
122 struct sg_table *sgt)
123{
124 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
125 dma_addr_t dma_addr;
126
127 if (IS_ERR(priv) || !priv)
128 return;
129
130 nvgpu_mutex_acquire(&priv->lock);
131 WARN_ON(priv->sgt != sgt);
132 WARN_ON(priv->attach != attachment);
133 priv->pin_count--;
134 WARN_ON(priv->pin_count < 0);
135 dma_addr = sg_dma_address(priv->sgt->sgl);
136 if (priv->pin_count == 0) {
137 dma_buf_unmap_attachment(priv->attach, priv->sgt,
138 DMA_BIDIRECTIONAL);
139 dma_buf_detach(dmabuf, priv->attach);
140 }
141 nvgpu_mutex_release(&priv->lock);
142}
143
144int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
145{
146 struct gk20a *g = gk20a_get_platform(dev)->g;
147 struct gk20a_dmabuf_priv *priv;
148
149 priv = dma_buf_get_drvdata(dmabuf, dev);
150 if (likely(priv))
151 return 0;
152
153 nvgpu_mutex_acquire(&g->mm.priv_lock);
154 priv = dma_buf_get_drvdata(dmabuf, dev);
155 if (priv)
156 goto priv_exist_or_err;
157
158 priv = nvgpu_kzalloc(g, sizeof(*priv));
159 if (!priv) {
160 priv = ERR_PTR(-ENOMEM);
161 goto priv_exist_or_err;
162 }
163
164 nvgpu_mutex_init(&priv->lock);
165 nvgpu_init_list_node(&priv->states);
166 priv->g = g;
167 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
168
169priv_exist_or_err:
170 nvgpu_mutex_release(&g->mm.priv_lock);
171 if (IS_ERR(priv))
172 return -ENOMEM;
173
174 return 0;
175}
176
177int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g,
178 u64 offset, struct gk20a_buffer_state **state)
179{
180 int err = 0;
181 struct gk20a_dmabuf_priv *priv;
182 struct gk20a_buffer_state *s;
183 struct device *dev = dev_from_gk20a(g);
184
185 if (WARN_ON(offset >= (u64)dmabuf->size))
186 return -EINVAL;
187
188 err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
189 if (err)
190 return err;
191
192 priv = dma_buf_get_drvdata(dmabuf, dev);
193 if (WARN_ON(!priv))
194 return -ENOSYS;
195
196 nvgpu_mutex_acquire(&priv->lock);
197
198 nvgpu_list_for_each_entry(s, &priv->states, gk20a_buffer_state, list)
199 if (s->offset == offset)
200 goto out;
201
202 /* State not found, create state. */
203 s = nvgpu_kzalloc(g, sizeof(*s));
204 if (!s) {
205 err = -ENOMEM;
206 goto out;
207 }
208
209 s->offset = offset;
210 nvgpu_init_list_node(&s->list);
211 nvgpu_mutex_init(&s->lock);
212 nvgpu_list_add_tail(&s->list, &priv->states);
213
214out:
215 nvgpu_mutex_release(&priv->lock);
216 if (!err)
217 *state = s;
218 return err;
219}