summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h
blob: 4fa4242c57a503e31616f633d042353ded3ab8b5 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
/*
 * Copyright (c) 2017-2018, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#ifndef __COMMON_LINUX_VM_PRIV_H__
#define __COMMON_LINUX_VM_PRIV_H__

#include <nvgpu/types.h>

#include <asm/cacheflush.h>

/*
 * Couple of places explicitly flush caches still. Any DMA buffer we allocate
 * from within the GPU is writecombine and as a result does not need this but
 * there seem to be exceptions.
 */
#ifdef CONFIG_ARM64
#define outer_flush_range(a, b)
#define __cpuc_flush_dcache_area __flush_dcache_area
#endif

struct sg_table;
struct dma_buf;
struct device;

struct vm_gk20a;
struct vm_gk20a_mapping_batch;
struct nvgpu_vm_area;

struct nvgpu_os_buffer {
	struct dma_buf *dmabuf;
	struct dma_buf_attachment *attachment;
	struct device *dev;
};

struct nvgpu_mapped_buf_priv {
	struct dma_buf *dmabuf;
	struct dma_buf_attachment *attachment;
	struct sg_table *sgt;
	// For fast reverse lookup (FD -> mapped_buf)
	struct list_head nvmap_priv_entry;
	// To allow waiting on swap I/O completion
	struct completion swap_io_done;
	// Sector assignment for swapped-out data
	sector_t swap_sector;
};

/* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL must be set */
int nvgpu_vm_map_linux(struct vm_gk20a *vm,
		       struct dma_buf *dmabuf,
		       u64 map_addr,
		       u32 flags,
		       u32 page_size,
		       s16 compr_kind,
		       s16 incompr_kind,
		       int rw_flag,
		       u64 buffer_offset,
		       u64 mapping_size,
		       struct vm_gk20a_mapping_batch *mapping_batch,
		       u64 *gpu_va);

/*
 * Notes:
 * - Batch may be NULL if map op is not part of a batch.
 * - NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL must be set
 */
int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
			int dmabuf_fd,
			u64 *map_addr,
			u32 flags, /* NVGPU_AS_MAP_BUFFER_FLAGS_ */
			u32 page_size,
			s16 compr_kind,
			s16 incompr_kind,
			u64 buffer_offset,
			u64 mapping_size,
			struct vm_gk20a_mapping_batch *batch);

/* find buffer corresponding to va */
int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
		      struct dma_buf **dmabuf,
		      u64 *offset);

enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g,
					  struct dma_buf *dmabuf);

#endif