summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/vm.c
blob: 9cd1798114d3bd5e84bfe8f62aef87012d8f5d13 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
/*
 * Copyright (c) 2017-2022, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/dma-buf.h>
#include <linux/nvmap.h>
#include <linux/scatterlist.h>
#include <uapi/linux/nvgpu.h>

#include <nvgpu/log.h>
#include <nvgpu/lock.h>
#include <nvgpu/rbtree.h>
#include <nvgpu/vm_area.h>
#include <nvgpu/nvgpu_mem.h>
#include <nvgpu/page_allocator.h>
#include <nvgpu/vidmem.h>
#include <nvgpu/utils.h>
#include <nvgpu/gk20a.h>

#include <nvgpu/linux/vm.h>
#include <nvgpu/linux/nvgpu_mem.h>

#include "gk20a/mm_gk20a.h"

#include "platform_gk20a.h"
#include "os_linux.h"
#include "dmabuf.h"
#include "dmabuf_vidmem.h"

static u32 nvgpu_vm_translate_linux_flags(struct gk20a *g, u32 flags)
{
	u32 core_flags = 0;

	if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)
		core_flags |= NVGPU_VM_MAP_FIXED_OFFSET;
	if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE)
		core_flags |= NVGPU_VM_MAP_CACHEABLE;
	if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT)
		core_flags |= NVGPU_VM_MAP_IO_COHERENT;
	if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE)
		core_flags |= NVGPU_VM_MAP_UNMAPPED_PTE;
	if (!nvgpu_is_enabled(g, NVGPU_DISABLE_L3_SUPPORT)) {
		if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC)
			core_flags |= NVGPU_VM_MAP_L3_ALLOC;
	}
	if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL)
		core_flags |= NVGPU_VM_MAP_DIRECT_KIND_CTRL;
	if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_PLATFORM_ATOMIC)
		core_flags |= NVGPU_VM_MAP_PLATFORM_ATOMIC;

	if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS)
		nvgpu_warn(g, "Ignoring deprecated flag: "
			   "NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS");

	return core_flags;
}

static struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_reverse(
	struct vm_gk20a *vm, struct dma_buf *dmabuf, u32 kind)
{
	struct nvgpu_rbtree_node *node = NULL;
	struct nvgpu_rbtree_node *root = vm->mapped_buffers;
	struct list_head* nvmap_priv;

	// Try fast lookup first
	if (!IS_ERR(nvmap_priv = nvmap_get_priv_list(dmabuf))) {
		struct nvgpu_mapped_buf *mapped_buffer;
		struct nvgpu_mapped_buf_priv *priv;

		list_for_each_entry(priv, nvmap_priv, nvmap_priv_entry) {
			mapped_buffer = container_of(priv, struct nvgpu_mapped_buf, os_priv);
			if (mapped_buffer->os_priv.dmabuf == dmabuf &&
			    mapped_buffer->kind == kind)
				return mapped_buffer;
		}
	}

	// Full traversal (not an nvmap buffer?)
	printk(KERN_INFO "nvmap: Fast reverse lookup failed!");
	nvgpu_rbtree_enum_start(0, &node, root);

	while (node) {
		struct nvgpu_mapped_buf *mapped_buffer =
				mapped_buffer_from_rbtree_node(node);

		if (mapped_buffer->os_priv.dmabuf == dmabuf &&
		    mapped_buffer->kind == kind)
			return mapped_buffer;

		nvgpu_rbtree_enum_next(&node, node);
	}

	return NULL;
}

int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
		      struct dma_buf **dmabuf,
		      u64 *offset)
{
	struct nvgpu_mapped_buf *mapped_buffer;
	struct gk20a *g = gk20a_from_vm(vm);

	nvgpu_log_fn(g, "gpu_va=0x%llx", gpu_va);

	nvgpu_mutex_acquire(&vm->update_gmmu_lock);

	mapped_buffer = __nvgpu_vm_find_mapped_buf_range(vm, gpu_va);
	if (!mapped_buffer) {
		nvgpu_mutex_release(&vm->update_gmmu_lock);
		return -EINVAL;
	}

	*dmabuf = mapped_buffer->os_priv.dmabuf;
	*offset = gpu_va - mapped_buffer->addr;

	nvgpu_mutex_release(&vm->update_gmmu_lock);

	return 0;
}

u64 nvgpu_os_buf_get_size(struct nvgpu_os_buffer *os_buf)
{
	return os_buf->dmabuf->size;
}

/*
 * vm->update_gmmu_lock must be held. This checks to see if we already have
 * mapped the passed buffer into this VM. If so, just return the existing
 * mapping address.
 */
struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
					       struct nvgpu_os_buffer *os_buf,
					       u64 map_addr,
					       u32 flags,
					       int kind)
{
	struct gk20a *g = gk20a_from_vm(vm);
	struct nvgpu_mapped_buf *mapped_buffer = NULL;

	if (flags & NVGPU_VM_MAP_FIXED_OFFSET) {
		mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, map_addr);
		if (!mapped_buffer)
			return NULL;

		if (mapped_buffer->os_priv.dmabuf != os_buf->dmabuf ||
		    mapped_buffer->kind != (u32)kind)
			return NULL;
	} else {
		mapped_buffer =
			__nvgpu_vm_find_mapped_buf_reverse(vm,
							   os_buf->dmabuf,
							   kind);
		if (!mapped_buffer)
			return NULL;
	}

	if (mapped_buffer->flags != flags)
		return NULL;

	/*
	 * If we find the mapping here then that means we have mapped it already
	 * and the prior pin and get must be undone.
	 */
	gk20a_mm_unpin(os_buf->dev, os_buf->dmabuf, os_buf->attachment,
		       mapped_buffer->os_priv.sgt);
	list_del(&mapped_buffer->os_priv.nvmap_priv_entry);
	dma_buf_put(os_buf->dmabuf);

	nvgpu_log(g, gpu_dbg_map,
		  "gv: 0x%04x_%08x + 0x%-7zu "
		  "[dma: 0x%010llx, pa: 0x%010llx] "
		  "pgsz=%-3dKb as=%-2d "
		  "flags=0x%x apt=%s (reused)",
		  u64_hi32(mapped_buffer->addr), u64_lo32(mapped_buffer->addr),
		  os_buf->dmabuf->size,
		  (u64)sg_dma_address(mapped_buffer->os_priv.sgt->sgl),
		  (u64)sg_phys(mapped_buffer->os_priv.sgt->sgl),
		  vm->gmmu_page_sizes[mapped_buffer->pgsz_idx] >> 10,
		  vm_aspace_id(vm),
		  mapped_buffer->flags,
		  nvgpu_aperture_str(g,
				     gk20a_dmabuf_aperture(g, os_buf->dmabuf)));

	return mapped_buffer;
}

int nvgpu_vm_map_linux(struct vm_gk20a *vm,
		       struct dma_buf *dmabuf,
		       u64 map_addr,
		       u32 flags,
		       u32 page_size,
		       s16 compr_kind,
		       s16 incompr_kind,
		       int rw_flag,
		       u64 buffer_offset,
		       u64 mapping_size,
		       struct vm_gk20a_mapping_batch *batch,
		       u64 *gpu_va)
{
	struct gk20a *g = gk20a_from_vm(vm);
	struct device *dev = dev_from_gk20a(g);
	struct nvgpu_os_buffer os_buf;
	struct sg_table *sgt;
	struct nvgpu_sgt *nvgpu_sgt = NULL;
	struct nvgpu_mapped_buf *mapped_buffer = NULL;
	struct dma_buf_attachment *attachment;
	struct list_head *nvmap_priv;
	int err = 0;

	sgt = gk20a_mm_pin(dev, dmabuf, &attachment);
	if (IS_ERR(sgt)) {
		nvgpu_warn(g, "Failed to pin dma_buf!");
		return PTR_ERR(sgt);
	}
	os_buf.dmabuf = dmabuf;
	os_buf.attachment = attachment;
	os_buf.dev = dev;

	if (gk20a_dmabuf_aperture(g, dmabuf) == APERTURE_INVALID) {
		err = -EINVAL;
		goto clean_up;
	}

	nvgpu_sgt = nvgpu_linux_sgt_create(g, sgt);
	if (!nvgpu_sgt) {
		err = -ENOMEM;
		goto clean_up;
	}

	mapped_buffer = nvgpu_vm_map(vm,
				     &os_buf,
				     nvgpu_sgt,
				     map_addr,
				     mapping_size,
				     buffer_offset,
				     rw_flag,
				     flags,
				     compr_kind,
				     incompr_kind,
				     batch,
				     gk20a_dmabuf_aperture(g, dmabuf));

	nvgpu_sgt_free(g, nvgpu_sgt);

	if (IS_ERR(mapped_buffer)) {
		err = PTR_ERR(mapped_buffer);
		goto clean_up;
	}

	mapped_buffer->os_priv.dmabuf = dmabuf;
	mapped_buffer->os_priv.attachment = attachment;
	mapped_buffer->os_priv.sgt    = sgt;
	init_completion(&mapped_buffer->os_priv.swap_io_done);
	nvmap_priv = nvmap_get_priv_list(dmabuf);
	if (!IS_ERR(nvmap_priv))
		list_add(&mapped_buffer->os_priv.nvmap_priv_entry, nvmap_priv);
	else
		// So we can always safely call list_del()
		INIT_LIST_HEAD(&mapped_buffer->os_priv.nvmap_priv_entry);

	*gpu_va = mapped_buffer->addr;
	return 0;

clean_up:
	gk20a_mm_unpin(dev, dmabuf, attachment, sgt);

	return err;
}

int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
			int dmabuf_fd,
			u64 *map_addr,
			u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/
			u32 page_size,
			s16 compr_kind,
			s16 incompr_kind,
			u64 buffer_offset,
			u64 mapping_size,
			struct vm_gk20a_mapping_batch *batch)
{
	struct gk20a *g = gk20a_from_vm(vm);
	struct dma_buf *dmabuf;
	u64 ret_va;
	int err = 0;

	/* get ref to the mem handle (released on unmap_locked) */
	dmabuf = dma_buf_get(dmabuf_fd);
	if (IS_ERR(dmabuf)) {
		nvgpu_warn(g, "%s: fd %d is not a dmabuf",
			   __func__, dmabuf_fd);
		return PTR_ERR(dmabuf);
	}

	/*
	 * For regular maps we do not accept either an input address or a
	 * buffer_offset.
	 */
	if (!(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) &&
	    (buffer_offset || *map_addr)) {
		nvgpu_err(g,
			  "Regular map with addr/buf offset is not supported!");
		dma_buf_put(dmabuf);
		return -EINVAL;
	}

	/*
	 * Map size is always buffer size for non fixed mappings. As such map
	 * size should be left as zero by userspace for non-fixed maps.
	 */
	if (mapping_size && !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) {
		nvgpu_err(g, "map_size && non-fixed-mapping!");
		dma_buf_put(dmabuf);
		return -EINVAL;
	}

	/* verify that we're not overflowing the buffer, i.e.
	 * (buffer_offset + mapping_size) > dmabuf->size.
	 *
	 * Since buffer_offset + mapping_size could overflow, first check
	 * that mapping size < dmabuf_size, at which point we can subtract
	 * mapping_size from both sides for the final comparison.
	 */
	if ((mapping_size > dmabuf->size) ||
			(buffer_offset > (dmabuf->size - mapping_size))) {
		nvgpu_err(g,
			  "buf size %llx < (offset(%llx) + map_size(%llx))",
			  (u64)dmabuf->size, buffer_offset, mapping_size);
		dma_buf_put(dmabuf);
		return -EINVAL;
	}

	err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev_from_vm(vm));
	if (err) {
		dma_buf_put(dmabuf);
		return err;
	}

	err = nvgpu_vm_map_linux(vm, dmabuf, *map_addr,
				 nvgpu_vm_translate_linux_flags(g, flags),
				 page_size,
				 compr_kind, incompr_kind,
				 gk20a_mem_flag_none,
				 buffer_offset,
				 mapping_size,
				 batch,
				 &ret_va);

	if (!err)
		*map_addr = ret_va;
	else
		dma_buf_put(dmabuf);

	return err;
}

/*
 * This is the function call-back for freeing OS specific components of an
 * nvgpu_mapped_buf. This should most likely never be called outside of the
 * core MM framework!
 *
 * Note: the VM lock will be held.
 */
void nvgpu_vm_unmap_system(struct nvgpu_mapped_buf *mapped_buffer)
{
	struct vm_gk20a *vm = mapped_buffer->vm;

	gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->os_priv.dmabuf,
		       mapped_buffer->os_priv.attachment,
		       mapped_buffer->os_priv.sgt);
	list_del(&mapped_buffer->os_priv.nvmap_priv_entry);
	dma_buf_put(mapped_buffer->os_priv.dmabuf);
}

/**
 * Given an nvgpu_mapped_buf m, map m->os_priv.sgt into m->addr
 * Very similar to nvgpu_vm_map_buffer, except that this assumes all necessary
 * PTEs and PDEs have been created. This merely updates the physical address(es)
 * in the associated PTEs, leaving all other attributes unchanged.
 *
 * NOP if sgt is already mapped for addr.
 *
 * vm->gmmu_update_lock must be held.
 *
 * Caller is responsible for flushing the TLB and L2 caches.
 */
void nvgpu_vm_remap(struct nvgpu_mapped_buf *m)
{
	// TODO: Input validation
	struct scatterlist *sg;
	unsigned int i = 0;
	u64 curr_vaddr = m->addr;

	// For each element of the scatterlist
	// (based off for_each_sgtable_dma_sg() macro in newer kernels)
	for_each_sg(m->os_priv.sgt->sgl, sg, m->os_priv.sgt->nents, i) {
		unsigned int sg_off = 0;
		// Keep mapping data at the next unmapped virtual address
		// until each scatterlist element is entirely mapped
		while (sg_off < sg_dma_len(sg)) {
			int amt_mapped = __nvgpu_update_paddr(gk20a_from_vm(m->vm),
							      m->vm,
							      curr_vaddr,
							      sg_dma_address(sg) + sg_off);
			if (amt_mapped < 0) {
				printk(KERN_ERR "nvgpu: Error %d from __nvgpu_update_paddr() in nvgpu_vm_remap()! Had mapped %llu of %llu bytes.\n", amt_mapped, curr_vaddr - m->addr, m->size);
				return;
			}
			curr_vaddr += amt_mapped;
			sg_off += amt_mapped;
		}
	}
	if (curr_vaddr != m->addr + m->size) {
		printk(KERN_ERR "nvgpu: Mapped %llu bytes when %llu bytes expected! Expect page table corruption!\n", curr_vaddr - m->addr, m->size);
	}
}