aboutsummaryrefslogtreecommitdiffstats
path: root/include/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/gk20a/mm_gk20a.h')
-rw-r--r--include/gk20a/mm_gk20a.h155
1 files changed, 155 insertions, 0 deletions
diff --git a/include/gk20a/mm_gk20a.h b/include/gk20a/mm_gk20a.h
new file mode 100644
index 0000000..76a1621
--- /dev/null
+++ b/include/gk20a/mm_gk20a.h
@@ -0,0 +1,155 @@
1/*
2 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef MM_GK20A_H
24#define MM_GK20A_H
25
26#include <nvgpu/nvgpu_mem.h>
27#include <nvgpu/allocator.h>
28#include <nvgpu/vm.h>
29#include <nvgpu/list.h>
30#include <nvgpu/rbtree.h>
31#include <nvgpu/kref.h>
32
33enum gk20a_mem_rw_flag;
34
35struct patch_desc {
36 struct nvgpu_mem mem;
37 u32 data_count;
38};
39
40struct zcull_ctx_desc {
41 u64 gpu_va;
42 u32 ctx_attr;
43 u32 ctx_sw_mode;
44};
45
46struct pm_ctx_desc {
47 struct nvgpu_mem mem;
48 u32 pm_mode;
49};
50
51struct compbit_store_desc {
52 struct nvgpu_mem mem;
53
54 /* The value that is written to the hardware. This depends on
55 * on the number of ltcs and is not an address. */
56 u64 base_hw;
57};
58
59struct gk20a_buffer_state {
60 struct nvgpu_list_node list;
61
62 /* The valid compbits and the fence must be changed atomically. */
63 struct nvgpu_mutex lock;
64
65 /* Offset of the surface within the dma-buf whose state is
66 * described by this struct (one dma-buf can contain multiple
67 * surfaces with different states). */
68 size_t offset;
69
70 /* A bitmask of valid sets of compbits (0 = uncompressed). */
71 u32 valid_compbits;
72
73 /* The ZBC color used on this buffer. */
74 u32 zbc_color;
75
76 /* This struct reflects the state of the buffer when this
77 * fence signals. */
78 struct gk20a_fence *fence;
79};
80
81static inline struct gk20a_buffer_state *
82gk20a_buffer_state_from_list(struct nvgpu_list_node *node)
83{
84 return (struct gk20a_buffer_state *)
85 ((uintptr_t)node - offsetof(struct gk20a_buffer_state, list));
86};
87
88struct gk20a;
89struct channel_gk20a;
90
91int gk20a_mm_fb_flush(struct gk20a *g);
92void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate);
93void gk20a_mm_cbc_clean(struct gk20a *g);
94void gk20a_mm_l2_invalidate(struct gk20a *g);
95
96#define dev_from_vm(vm) dev_from_gk20a(vm->mm->g)
97
98void gk20a_mm_ltc_isr(struct gk20a *g);
99
100bool gk20a_mm_mmu_debug_mode_enabled(struct gk20a *g);
101
102int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block);
103void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
104 u32 big_page_size);
105int gk20a_init_mm_setup_hw(struct gk20a *g);
106
107u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
108 u64 map_offset,
109 struct nvgpu_sgt *sgt,
110 u64 buffer_offset,
111 u64 size,
112 u32 pgsz_idx,
113 u8 kind_v,
114 u32 ctag_offset,
115 u32 flags,
116 enum gk20a_mem_rw_flag rw_flag,
117 bool clear_ctags,
118 bool sparse,
119 bool priv,
120 struct vm_gk20a_mapping_batch *batch,
121 enum nvgpu_aperture aperture);
122
123void gk20a_locked_gmmu_unmap(struct vm_gk20a *vm,
124 u64 vaddr,
125 u64 size,
126 u32 pgsz_idx,
127 bool va_allocated,
128 enum gk20a_mem_rw_flag rw_flag,
129 bool sparse,
130 struct vm_gk20a_mapping_batch *batch);
131
132/* vm-as interface */
133struct nvgpu_as_alloc_space_args;
134struct nvgpu_as_free_space_args;
135int gk20a_vm_release_share(struct gk20a_as_share *as_share);
136int gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch);
137
138void pde_range_from_vaddr_range(struct vm_gk20a *vm,
139 u64 addr_lo, u64 addr_hi,
140 u32 *pde_lo, u32 *pde_hi);
141u32 gk20a_mm_get_iommu_bit(struct gk20a *g);
142
143const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g,
144 u32 big_page_size);
145void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *mem,
146 struct vm_gk20a *vm);
147
148extern const struct gk20a_mmu_level gk20a_mm_levels_64k[];
149extern const struct gk20a_mmu_level gk20a_mm_levels_128k[];
150
151u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
152 struct nvgpu_gmmu_pd *pd, u32 pd_idx);
153u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
154 struct nvgpu_gmmu_pd *pd, u32 pd_idx);
155#endif /* MM_GK20A_H */