summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h
blob: 7aacf496e1a040f716892b42b39f51d66e7fc0db (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
/*
 * Copyright (c) 2017, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#ifndef __COMMON_LINUX_VM_PRIV_H__
#define __COMMON_LINUX_VM_PRIV_H__

#include <nvgpu/types.h>

#include <asm/cacheflush.h>

/*
 * Couple of places explicitly flush caches still. Any DMA buffer we allocate
 * from within the GPU is writecombine and as a result does not need this but
 * there seem to be exceptions.
 */
#ifdef CONFIG_ARM64
#define outer_flush_range(a, b)
#define __cpuc_flush_dcache_area __flush_dcache_area
#endif

struct sg_table;
struct dma_buf;

struct vm_gk20a;
struct vm_gk20a_mapping_batch;
struct nvgpu_vm_area;

struct buffer_attrs {
	struct sg_table *sgt;
	u64 size;
	u64 align;
	u32 ctag_offset;
	u32 ctag_lines;
	u32 ctag_allocated_lines;
	int pgsz_idx;
	u8 kind_v;
	bool use_kind_v;
	u8 uc_kind_v;
	bool use_uc_kind_v;
	bool ctag_user_mappable;
};

u64 nvgpu_vm_map_linux(struct vm_gk20a *vm,
		       struct dma_buf *dmabuf,
		       u64 offset_align,
		       u32 flags,

		       /*
			* compressible kind if
			* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
			* specified, otherwise just the kind
			*/
		       s16 compr_kind,

		       /*
			* incompressible kind if
			* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is
			* specified, otherwise ignored
			*/
		       s16 incompr_kind,

		       bool user_mapped,
		       int rw_flag,
		       u64 buffer_offset,
		       u64 mapping_size,
		       struct vm_gk20a_mapping_batch *mapping_batch);

/*
 * Notes:
 * - Batch may be NULL if map op is not part of a batch.
 * - If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is set,
 *   compr_kind and incompr_kind work as explained in nvgpu.h.
 * - If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is NOT set,
 *   compr_kind holds the kind and kernel will figure out whether
 *   it is a compressible or incompressible kind. If compressible, kernel will
 *   also figure out the incompressible counterpart or return an error.
 */
int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
			int dmabuf_fd,
			u64 *offset_align,
			u32 flags, /* NVGPU_AS_MAP_BUFFER_FLAGS_ */
			s16 compr_kind,
			s16 incompr_kind,
			u64 buffer_offset,
			u64 mapping_size,
			struct vm_gk20a_mapping_batch *batch);

/* Note: batch may be NULL if unmap op is not part of a batch */
int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
			  struct vm_gk20a_mapping_batch *batch);

/* find buffer corresponding to va */
int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
		      struct dma_buf **dmabuf,
		      u64 *offset);

enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
					  struct dma_buf *dmabuf);
int validate_fixed_buffer(struct vm_gk20a *vm,
			  struct buffer_attrs *bfr,
			  u64 map_offset, u64 map_size,
			  struct nvgpu_vm_area **pva_node);

#endif