summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/dmabuf.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/dmabuf.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/dmabuf.c218
1 files changed, 0 insertions, 218 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dmabuf.c b/drivers/gpu/nvgpu/common/linux/dmabuf.c
deleted file mode 100644
index 129739f0..00000000
--- a/drivers/gpu/nvgpu/common/linux/dmabuf.c
+++ /dev/null
@@ -1,218 +0,0 @@
1/*
2* Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/device.h>
18#include <linux/dma-buf.h>
19#include <linux/scatterlist.h>
20
21#include <nvgpu/comptags.h>
22#include <nvgpu/enabled.h>
23
24#include <nvgpu/linux/vm.h>
25#include <nvgpu/linux/vidmem.h>
26
27#include "gk20a/gk20a.h"
28
29#include "platform_gk20a.h"
30#include "dmabuf.h"
31#include "os_linux.h"
32
33static void gk20a_mm_delete_priv(void *_priv)
34{
35 struct gk20a_buffer_state *s, *s_tmp;
36 struct gk20a_dmabuf_priv *priv = _priv;
37 struct gk20a *g;
38
39 if (!priv)
40 return;
41
42 g = priv->g;
43
44 if (priv->comptags.allocated && priv->comptags.lines) {
45 BUG_ON(!priv->comptag_allocator);
46 gk20a_comptaglines_free(priv->comptag_allocator,
47 priv->comptags.offset,
48 priv->comptags.lines);
49 }
50
51 /* Free buffer states */
52 nvgpu_list_for_each_entry_safe(s, s_tmp, &priv->states,
53 gk20a_buffer_state, list) {
54 gk20a_fence_put(s->fence);
55 nvgpu_list_del(&s->list);
56 nvgpu_kfree(g, s);
57 }
58
59 nvgpu_kfree(g, priv);
60}
61
62enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
63 struct dma_buf *dmabuf)
64{
65 struct gk20a *buf_owner = nvgpu_vidmem_buf_owner(dmabuf);
66 bool unified_memory = nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY);
67
68 if (buf_owner == NULL) {
69 /* Not nvgpu-allocated, assume system memory */
70 return APERTURE_SYSMEM;
71 } else if (WARN_ON(buf_owner == g && unified_memory)) {
72 /* Looks like our video memory, but this gpu doesn't support
73 * it. Warn about a bug and bail out */
74 nvgpu_warn(g,
75 "dmabuf is our vidmem but we don't have local vidmem");
76 return APERTURE_INVALID;
77 } else if (buf_owner != g) {
78 /* Someone else's vidmem */
79 return APERTURE_INVALID;
80 } else {
81 /* Yay, buf_owner == g */
82 return APERTURE_VIDMEM;
83 }
84}
85
86struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf,
87 struct dma_buf_attachment **attachment)
88{
89 struct gk20a_dmabuf_priv *priv;
90
91 priv = dma_buf_get_drvdata(dmabuf, dev);
92 if (WARN_ON(!priv))
93 return ERR_PTR(-EINVAL);
94
95 nvgpu_mutex_acquire(&priv->lock);
96
97 if (priv->pin_count == 0) {
98 priv->attach = dma_buf_attach(dmabuf, dev);
99 if (IS_ERR(priv->attach)) {
100 nvgpu_mutex_release(&priv->lock);
101 return (struct sg_table *)priv->attach;
102 }
103
104 priv->sgt = dma_buf_map_attachment(priv->attach,
105 DMA_BIDIRECTIONAL);
106 if (IS_ERR(priv->sgt)) {
107 dma_buf_detach(dmabuf, priv->attach);
108 nvgpu_mutex_release(&priv->lock);
109 return priv->sgt;
110 }
111 }
112
113 priv->pin_count++;
114 nvgpu_mutex_release(&priv->lock);
115 *attachment = priv->attach;
116 return priv->sgt;
117}
118
119void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
120 struct dma_buf_attachment *attachment,
121 struct sg_table *sgt)
122{
123 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
124 dma_addr_t dma_addr;
125
126 if (IS_ERR(priv) || !priv)
127 return;
128
129 nvgpu_mutex_acquire(&priv->lock);
130 WARN_ON(priv->sgt != sgt);
131 WARN_ON(priv->attach != attachment);
132 priv->pin_count--;
133 WARN_ON(priv->pin_count < 0);
134 dma_addr = sg_dma_address(priv->sgt->sgl);
135 if (priv->pin_count == 0) {
136 dma_buf_unmap_attachment(priv->attach, priv->sgt,
137 DMA_BIDIRECTIONAL);
138 dma_buf_detach(dmabuf, priv->attach);
139 }
140 nvgpu_mutex_release(&priv->lock);
141}
142
143int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
144{
145 struct gk20a *g = gk20a_get_platform(dev)->g;
146 struct gk20a_dmabuf_priv *priv;
147
148 priv = dma_buf_get_drvdata(dmabuf, dev);
149 if (likely(priv))
150 return 0;
151
152 nvgpu_mutex_acquire(&g->mm.priv_lock);
153 priv = dma_buf_get_drvdata(dmabuf, dev);
154 if (priv)
155 goto priv_exist_or_err;
156
157 priv = nvgpu_kzalloc(g, sizeof(*priv));
158 if (!priv) {
159 priv = ERR_PTR(-ENOMEM);
160 goto priv_exist_or_err;
161 }
162
163 nvgpu_mutex_init(&priv->lock);
164 nvgpu_init_list_node(&priv->states);
165 priv->g = g;
166 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
167
168priv_exist_or_err:
169 nvgpu_mutex_release(&g->mm.priv_lock);
170 if (IS_ERR(priv))
171 return -ENOMEM;
172
173 return 0;
174}
175
176int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g,
177 u64 offset, struct gk20a_buffer_state **state)
178{
179 int err = 0;
180 struct gk20a_dmabuf_priv *priv;
181 struct gk20a_buffer_state *s;
182 struct device *dev = dev_from_gk20a(g);
183
184 if (WARN_ON(offset >= (u64)dmabuf->size))
185 return -EINVAL;
186
187 err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
188 if (err)
189 return err;
190
191 priv = dma_buf_get_drvdata(dmabuf, dev);
192 if (WARN_ON(!priv))
193 return -ENOSYS;
194
195 nvgpu_mutex_acquire(&priv->lock);
196
197 nvgpu_list_for_each_entry(s, &priv->states, gk20a_buffer_state, list)
198 if (s->offset == offset)
199 goto out;
200
201 /* State not found, create state. */
202 s = nvgpu_kzalloc(g, sizeof(*s));
203 if (!s) {
204 err = -ENOMEM;
205 goto out;
206 }
207
208 s->offset = offset;
209 nvgpu_init_list_node(&s->list);
210 nvgpu_mutex_init(&s->lock);
211 nvgpu_list_add_tail(&s->list, &priv->states);
212
213out:
214 nvgpu_mutex_release(&priv->lock);
215 if (!err)
216 *state = s;
217 return err;
218}