summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c298
1 files changed, 1 insertions, 297 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 8936cd03..69d9e983 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -22,7 +22,6 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include <linux/scatterlist.h>
26#include <linux/dma-buf.h> 25#include <linux/dma-buf.h>
27#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
28#include <linux/dma-attrs.h> 27#include <linux/dma-attrs.h>
@@ -70,6 +69,7 @@
70 * all the common APIs no longers have Linux stuff in them. 69 * all the common APIs no longers have Linux stuff in them.
71 */ 70 */
72#include "common/linux/vm_priv.h" 71#include "common/linux/vm_priv.h"
72#include "common/linux/dmabuf.h"
73 73
74/* 74/*
75 * GPU mapping life cycle 75 * GPU mapping life cycle
@@ -108,190 +108,6 @@ static int __must_check gk20a_init_hwpm(struct mm_gk20a *mm);
108static int __must_check gk20a_init_cde_vm(struct mm_gk20a *mm); 108static int __must_check gk20a_init_cde_vm(struct mm_gk20a *mm);
109static int __must_check gk20a_init_ce_vm(struct mm_gk20a *mm); 109static int __must_check gk20a_init_ce_vm(struct mm_gk20a *mm);
110 110
111struct gk20a_dmabuf_priv {
112 struct nvgpu_mutex lock;
113
114 struct gk20a *g;
115
116 struct gk20a_comptag_allocator *comptag_allocator;
117 struct gk20a_comptags comptags;
118
119 struct dma_buf_attachment *attach;
120 struct sg_table *sgt;
121
122 int pin_count;
123
124 struct nvgpu_list_node states;
125
126 u64 buffer_id;
127};
128
129static int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator,
130 u32 *offset, u32 len)
131{
132 unsigned long addr;
133 int err = 0;
134
135 nvgpu_mutex_acquire(&allocator->lock);
136 addr = bitmap_find_next_zero_area(allocator->bitmap, allocator->size,
137 0, len, 0);
138 if (addr < allocator->size) {
139 /* number zero is reserved; bitmap base is 1 */
140 *offset = 1 + addr;
141 bitmap_set(allocator->bitmap, addr, len);
142 } else {
143 err = -ENOMEM;
144 }
145 nvgpu_mutex_release(&allocator->lock);
146
147 return err;
148}
149
150static void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator,
151 u32 offset, u32 len)
152{
153 /* number zero is reserved; bitmap base is 1 */
154 u32 addr = offset - 1;
155 WARN_ON(offset == 0);
156 WARN_ON(addr > allocator->size);
157 WARN_ON(addr + len > allocator->size);
158
159 nvgpu_mutex_acquire(&allocator->lock);
160 bitmap_clear(allocator->bitmap, addr, len);
161 nvgpu_mutex_release(&allocator->lock);
162}
163
164static void gk20a_mm_delete_priv(void *_priv)
165{
166 struct gk20a_buffer_state *s, *s_tmp;
167 struct gk20a_dmabuf_priv *priv = _priv;
168 struct gk20a *g;
169
170 if (!priv)
171 return;
172
173 g = priv->g;
174
175 if (priv->comptags.lines) {
176 BUG_ON(!priv->comptag_allocator);
177 gk20a_comptaglines_free(priv->comptag_allocator,
178 priv->comptags.offset,
179 priv->comptags.allocated_lines);
180 }
181
182 /* Free buffer states */
183 nvgpu_list_for_each_entry_safe(s, s_tmp, &priv->states,
184 gk20a_buffer_state, list) {
185 gk20a_fence_put(s->fence);
186 nvgpu_list_del(&s->list);
187 nvgpu_kfree(g, s);
188 }
189
190 nvgpu_kfree(g, priv);
191}
192
193struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf)
194{
195 struct gk20a_dmabuf_priv *priv;
196
197 priv = dma_buf_get_drvdata(dmabuf, dev);
198 if (WARN_ON(!priv))
199 return ERR_PTR(-EINVAL);
200
201 nvgpu_mutex_acquire(&priv->lock);
202
203 if (priv->pin_count == 0) {
204 priv->attach = dma_buf_attach(dmabuf, dev);
205 if (IS_ERR(priv->attach)) {
206 nvgpu_mutex_release(&priv->lock);
207 return (struct sg_table *)priv->attach;
208 }
209
210 priv->sgt = dma_buf_map_attachment(priv->attach,
211 DMA_BIDIRECTIONAL);
212 if (IS_ERR(priv->sgt)) {
213 dma_buf_detach(dmabuf, priv->attach);
214 nvgpu_mutex_release(&priv->lock);
215 return priv->sgt;
216 }
217 }
218
219 priv->pin_count++;
220 nvgpu_mutex_release(&priv->lock);
221 return priv->sgt;
222}
223
224void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
225 struct sg_table *sgt)
226{
227 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
228 dma_addr_t dma_addr;
229
230 if (IS_ERR(priv) || !priv)
231 return;
232
233 nvgpu_mutex_acquire(&priv->lock);
234 WARN_ON(priv->sgt != sgt);
235 priv->pin_count--;
236 WARN_ON(priv->pin_count < 0);
237 dma_addr = sg_dma_address(priv->sgt->sgl);
238 if (priv->pin_count == 0) {
239 dma_buf_unmap_attachment(priv->attach, priv->sgt,
240 DMA_BIDIRECTIONAL);
241 dma_buf_detach(dmabuf, priv->attach);
242 }
243 nvgpu_mutex_release(&priv->lock);
244}
245
246void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf,
247 struct gk20a_comptags *comptags)
248{
249 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
250
251 if (!comptags)
252 return;
253
254 if (!priv) {
255 memset(comptags, 0, sizeof(*comptags));
256 return;
257 }
258
259 *comptags = priv->comptags;
260}
261
262int gk20a_alloc_comptags(struct gk20a *g,
263 struct device *dev,
264 struct dma_buf *dmabuf,
265 struct gk20a_comptag_allocator *allocator,
266 u32 lines)
267{
268 struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
269 u32 ctaglines_allocsize;
270 u32 offset;
271 int err;
272
273 if (!priv)
274 return -ENOSYS;
275
276 if (!lines)
277 return -EINVAL;
278
279 ctaglines_allocsize = lines;
280
281 /* store the allocator so we can use it when we free the ctags */
282 priv->comptag_allocator = allocator;
283 err = gk20a_comptaglines_alloc(allocator, &offset,
284 ctaglines_allocsize);
285 if (err)
286 return err;
287
288 priv->comptags.offset = offset;
289 priv->comptags.lines = lines;
290 priv->comptags.allocated_lines = ctaglines_allocsize;
291
292 return 0;
293}
294
295static int gk20a_init_mm_reset_enable_hw(struct gk20a *g) 111static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
296{ 112{
297 gk20a_dbg_fn(""); 113 gk20a_dbg_fn("");
@@ -1037,87 +853,6 @@ int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
1037 return __gk20a_vm_bind_channel(as_share->vm, ch); 853 return __gk20a_vm_bind_channel(as_share->vm, ch);
1038} 854}
1039 855
1040int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
1041{
1042 struct gk20a *g = gk20a_get_platform(dev)->g;
1043 struct gk20a_dmabuf_priv *priv;
1044 static u64 priv_count = 0;
1045
1046 priv = dma_buf_get_drvdata(dmabuf, dev);
1047 if (likely(priv))
1048 return 0;
1049
1050 nvgpu_mutex_acquire(&g->mm.priv_lock);
1051 priv = dma_buf_get_drvdata(dmabuf, dev);
1052 if (priv)
1053 goto priv_exist_or_err;
1054
1055 priv = nvgpu_kzalloc(g, sizeof(*priv));
1056 if (!priv) {
1057 priv = ERR_PTR(-ENOMEM);
1058 goto priv_exist_or_err;
1059 }
1060
1061 nvgpu_mutex_init(&priv->lock);
1062 nvgpu_init_list_node(&priv->states);
1063 priv->buffer_id = ++priv_count;
1064 priv->g = g;
1065 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
1066
1067priv_exist_or_err:
1068 nvgpu_mutex_release(&g->mm.priv_lock);
1069 if (IS_ERR(priv))
1070 return -ENOMEM;
1071
1072 return 0;
1073}
1074
1075int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct gk20a *g,
1076 u64 offset, struct gk20a_buffer_state **state)
1077{
1078 int err = 0;
1079 struct gk20a_dmabuf_priv *priv;
1080 struct gk20a_buffer_state *s;
1081 struct device *dev = dev_from_gk20a(g);
1082
1083 if (WARN_ON(offset >= (u64)dmabuf->size))
1084 return -EINVAL;
1085
1086 err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
1087 if (err)
1088 return err;
1089
1090 priv = dma_buf_get_drvdata(dmabuf, dev);
1091 if (WARN_ON(!priv))
1092 return -ENOSYS;
1093
1094 nvgpu_mutex_acquire(&priv->lock);
1095
1096 nvgpu_list_for_each_entry(s, &priv->states, gk20a_buffer_state, list)
1097 if (s->offset == offset)
1098 goto out;
1099
1100 /* State not found, create state. */
1101 s = nvgpu_kzalloc(g, sizeof(*s));
1102 if (!s) {
1103 err = -ENOMEM;
1104 goto out;
1105 }
1106
1107 s->offset = offset;
1108 nvgpu_init_list_node(&s->list);
1109 nvgpu_mutex_init(&s->lock);
1110 nvgpu_list_add_tail(&s->list, &priv->states);
1111
1112out:
1113 nvgpu_mutex_release(&priv->lock);
1114 if (!err)
1115 *state = s;
1116 return err;
1117
1118
1119}
1120
1121int nvgpu_vm_map_buffer(struct vm_gk20a *vm, 856int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
1122 int dmabuf_fd, 857 int dmabuf_fd,
1123 u64 *offset_align, 858 u64 *offset_align,
@@ -1613,34 +1348,3 @@ const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
1613 return (big_page_size == SZ_64K) ? 1348 return (big_page_size == SZ_64K) ?
1614 gk20a_mm_levels_64k : gk20a_mm_levels_128k; 1349 gk20a_mm_levels_64k : gk20a_mm_levels_128k;
1615} 1350}
1616
1617int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd,
1618 u64 *buffer_id, u64 *buffer_len)
1619{
1620 struct dma_buf *dmabuf;
1621 struct gk20a_dmabuf_priv *priv;
1622 int err = 0;
1623
1624 dmabuf = dma_buf_get(dmabuf_fd);
1625 if (IS_ERR(dmabuf)) {
1626 dev_warn(dev, "%s: fd %d is not a dmabuf", __func__, dmabuf_fd);
1627 return PTR_ERR(dmabuf);
1628 }
1629
1630 err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev);
1631 if (err) {
1632 dev_warn(dev, "Failed to allocate dmabuf drvdata (err = %d)",
1633 err);
1634 goto clean_up;
1635 }
1636
1637 priv = dma_buf_get_drvdata(dmabuf, dev);
1638 if (likely(priv)) {
1639 *buffer_id = priv->buffer_id;
1640 *buffer_len = dmabuf->size;
1641 }
1642
1643clean_up:
1644 dma_buf_put(dmabuf);
1645 return err;
1646}