diff options
Diffstat (limited to 'drivers/gpu/nvgpu/include')
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/mm.h | 220 |
1 files changed, 220 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/include/nvgpu/mm.h b/drivers/gpu/nvgpu/include/nvgpu/mm.h new file mode 100644 index 00000000..13b33d9f --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/mm.h | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
3 | * copy of this software and associated documentation files (the "Software"), | ||
4 | * to deal in the Software without restriction, including without limitation | ||
5 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
6 | * and/or sell copies of the Software, and to permit persons to whom the | ||
7 | * Software is furnished to do so, subject to the following conditions: | ||
8 | * | ||
9 | * The above copyright notice and this permission notice shall be included in | ||
10 | * all copies or substantial portions of the Software. | ||
11 | * | ||
12 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
13 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
15 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
16 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
17 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
18 | * DEALINGS IN THE SOFTWARE. | ||
19 | */ | ||
20 | |||
21 | #ifndef __NVGPU_MM_H__ | ||
22 | #define __NVGPU_MM_H__ | ||
23 | |||
24 | #include <nvgpu/types.h> | ||
25 | #include <nvgpu/cond.h> | ||
26 | #include <nvgpu/thread.h> | ||
27 | #include <nvgpu/lock.h> | ||
28 | #include <nvgpu/atomic.h> | ||
29 | #include <nvgpu/nvgpu_mem.h> | ||
30 | #include <nvgpu/allocator.h> | ||
31 | #include <nvgpu/list.h> | ||
32 | |||
33 | struct gk20a; | ||
34 | struct vm_gk20a; | ||
35 | struct nvgpu_mem; | ||
36 | struct nvgpu_pd_cache; | ||
37 | |||
38 | #define FAULT_TYPE_NUM 2 /* replay and nonreplay faults */ | ||
39 | |||
40 | struct mmu_fault_info { | ||
41 | u64 inst_ptr; | ||
42 | u32 inst_aperture; | ||
43 | u64 fault_addr; | ||
44 | u32 fault_addr_aperture; | ||
45 | u32 timestamp_lo; | ||
46 | u32 timestamp_hi; | ||
47 | u32 mmu_engine_id; | ||
48 | u32 gpc_id; | ||
49 | u32 client_type; | ||
50 | u32 client_id; | ||
51 | u32 fault_type; | ||
52 | u32 access_type; | ||
53 | u32 protected_mode; | ||
54 | u32 replayable_fault; | ||
55 | u32 replay_fault_en; | ||
56 | u32 valid; | ||
57 | u32 faulted_pbdma; | ||
58 | u32 faulted_engine; | ||
59 | u32 faulted_subid; | ||
60 | u32 chid; | ||
61 | struct channel_gk20a *refch; | ||
62 | const char *client_type_desc; | ||
63 | const char *fault_type_desc; | ||
64 | const char *client_id_desc; | ||
65 | }; | ||
66 | |||
67 | enum nvgpu_flush_op { | ||
68 | NVGPU_FLUSH_DEFAULT, | ||
69 | NVGPU_FLUSH_FB, | ||
70 | NVGPU_FLUSH_L2_INV, | ||
71 | NVGPU_FLUSH_L2_FLUSH, | ||
72 | NVGPU_FLUSH_CBC_CLEAN, | ||
73 | }; | ||
74 | |||
75 | struct mm_gk20a { | ||
76 | struct gk20a *g; | ||
77 | |||
78 | /* GPU VA default sizes address spaces for channels */ | ||
79 | struct { | ||
80 | u64 user_size; /* userspace-visible GPU VA region */ | ||
81 | u64 kernel_size; /* kernel-only GPU VA region */ | ||
82 | } channel; | ||
83 | |||
84 | struct { | ||
85 | u32 aperture_size; | ||
86 | struct vm_gk20a *vm; | ||
87 | struct nvgpu_mem inst_block; | ||
88 | } bar1; | ||
89 | |||
90 | struct { | ||
91 | u32 aperture_size; | ||
92 | struct vm_gk20a *vm; | ||
93 | struct nvgpu_mem inst_block; | ||
94 | } bar2; | ||
95 | |||
96 | struct { | ||
97 | u32 aperture_size; | ||
98 | struct vm_gk20a *vm; | ||
99 | struct nvgpu_mem inst_block; | ||
100 | } pmu; | ||
101 | |||
102 | struct { | ||
103 | /* using pmu vm currently */ | ||
104 | struct nvgpu_mem inst_block; | ||
105 | } hwpm; | ||
106 | |||
107 | struct { | ||
108 | struct vm_gk20a *vm; | ||
109 | struct nvgpu_mem inst_block; | ||
110 | } perfbuf; | ||
111 | |||
112 | struct { | ||
113 | struct vm_gk20a *vm; | ||
114 | } cde; | ||
115 | |||
116 | struct { | ||
117 | struct vm_gk20a *vm; | ||
118 | } ce; | ||
119 | |||
120 | struct nvgpu_pd_cache *pd_cache; | ||
121 | |||
122 | struct nvgpu_mutex l2_op_lock; | ||
123 | struct nvgpu_mutex tlb_lock; | ||
124 | struct nvgpu_mutex priv_lock; | ||
125 | |||
126 | struct nvgpu_mem bar2_desc; | ||
127 | |||
128 | #ifdef CONFIG_TEGRA_19x_GPU | ||
129 | struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM]; | ||
130 | unsigned int hw_fault_buf_status[FAULT_TYPE_NUM]; | ||
131 | struct mmu_fault_info *fault_info[FAULT_TYPE_NUM]; | ||
132 | struct nvgpu_mutex hub_isr_mutex; | ||
133 | u32 hub_intr_types; | ||
134 | #endif | ||
135 | /* | ||
136 | * Separate function to cleanup the CE since it requires a channel to | ||
137 | * be closed which must happen before fifo cleanup. | ||
138 | */ | ||
139 | void (*remove_ce_support)(struct mm_gk20a *mm); | ||
140 | void (*remove_support)(struct mm_gk20a *mm); | ||
141 | bool sw_ready; | ||
142 | int physical_bits; | ||
143 | bool use_full_comp_tag_line; | ||
144 | bool ltc_enabled_current; | ||
145 | bool ltc_enabled_target; | ||
146 | bool bypass_smmu; | ||
147 | bool disable_bigpage; | ||
148 | bool has_physical_mode; | ||
149 | |||
150 | struct nvgpu_mem sysmem_flush; | ||
151 | |||
152 | u32 pramin_window; | ||
153 | struct nvgpu_spinlock pramin_window_lock; | ||
154 | bool force_pramin; /* via debugfs */ | ||
155 | |||
156 | struct { | ||
157 | size_t size; | ||
158 | u64 base; | ||
159 | size_t bootstrap_size; | ||
160 | u64 bootstrap_base; | ||
161 | |||
162 | struct nvgpu_allocator allocator; | ||
163 | struct nvgpu_allocator bootstrap_allocator; | ||
164 | |||
165 | u32 ce_ctx_id; | ||
166 | volatile bool cleared; | ||
167 | struct nvgpu_mutex first_clear_mutex; | ||
168 | |||
169 | struct nvgpu_list_node clear_list_head; | ||
170 | struct nvgpu_mutex clear_list_mutex; | ||
171 | |||
172 | struct nvgpu_cond clearing_thread_cond; | ||
173 | struct nvgpu_thread clearing_thread; | ||
174 | struct nvgpu_mutex clearing_thread_lock; | ||
175 | nvgpu_atomic_t pause_count; | ||
176 | |||
177 | nvgpu_atomic64_t bytes_pending; | ||
178 | } vidmem; | ||
179 | }; | ||
180 | |||
181 | #define gk20a_from_mm(mm) ((mm)->g) | ||
182 | #define gk20a_from_vm(vm) ((vm)->mm->g) | ||
183 | |||
184 | static inline int bar1_aperture_size_mb_gk20a(void) | ||
185 | { | ||
186 | return 16; /* 16MB is more than enough atm. */ | ||
187 | } | ||
188 | |||
189 | /* The maximum GPU VA range supported */ | ||
190 | #define NV_GMMU_VA_RANGE 38 | ||
191 | |||
192 | /* The default userspace-visible GPU VA size */ | ||
193 | #define NV_MM_DEFAULT_USER_SIZE (1ULL << 37) | ||
194 | |||
195 | /* The default kernel-reserved GPU VA size */ | ||
196 | #define NV_MM_DEFAULT_KERNEL_SIZE (1ULL << 32) | ||
197 | |||
198 | /* | ||
199 | * When not using unified address spaces, the bottom 56GB of the space are used | ||
200 | * for small pages, and the remaining high memory is used for large pages. | ||
201 | */ | ||
202 | static inline u64 __nv_gmmu_va_small_page_limit(void) | ||
203 | { | ||
204 | return ((u64)SZ_1G * 56); | ||
205 | } | ||
206 | |||
207 | enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, | ||
208 | u64 base, u64 size); | ||
209 | enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size); | ||
210 | |||
211 | void nvgpu_init_mm_ce_context(struct gk20a *g); | ||
212 | int nvgpu_init_mm_support(struct gk20a *g); | ||
213 | int nvgpu_init_mm_setup_hw(struct gk20a *g); | ||
214 | |||
215 | u64 nvgpu_inst_block_addr(struct gk20a *g, struct nvgpu_mem *mem); | ||
216 | void nvgpu_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block); | ||
217 | |||
218 | int nvgpu_mm_suspend(struct gk20a *g); | ||
219 | |||
220 | #endif | ||