summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c417
1 files changed, 417 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
new file mode 100644
index 00000000..1b6b6641
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -0,0 +1,417 @@
1/*
2 * GP10B MMU
3 *
4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/pm_runtime.h>
17#include <linux/dma-mapping.h>
18#include "gk20a/gk20a.h"
19#include "mm_gp10b.h"
20#include "rpfb_gp10b.h"
21#include "hw_fb_gp10b.h"
22#include "hw_ram_gp10b.h"
23#include "hw_bus_gp10b.h"
24#include "hw_gmmu_gp10b.h"
25#include "gk20a/semaphore_gk20a.h"
26
27static u32 gp10b_mm_get_physical_addr_bits(struct gk20a *g)
28{
29 return 36;
30}
31
32static int gp10b_init_mm_setup_hw(struct gk20a *g)
33{
34 struct mm_gk20a *mm = &g->mm;
35 struct mem_desc *inst_block = &mm->bar1.inst_block;
36 int err = 0;
37
38 gk20a_dbg_fn("");
39
40 g->ops.fb.set_mmu_page_size(g);
41
42 gk20a_writel(g, fb_niso_flush_sysmem_addr_r(),
43 (g->ops.mm.get_iova_addr(g, g->mm.sysmem_flush.sgt->sgl, 0)
44 >> 8ULL));
45
46 g->ops.mm.bar1_bind(g, inst_block);
47
48 if (g->ops.mm.init_bar2_mm_hw_setup) {
49 err = g->ops.mm.init_bar2_mm_hw_setup(g);
50 if (err)
51 return err;
52 }
53
54 if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g))
55 return -EBUSY;
56
57 err = gp10b_replayable_pagefault_buffer_init(g);
58
59 gk20a_dbg_fn("done");
60 return err;
61
62}
63
64static int gb10b_init_bar2_vm(struct gk20a *g)
65{
66 int err;
67 struct mm_gk20a *mm = &g->mm;
68 struct vm_gk20a *vm = &mm->bar2.vm;
69 struct mem_desc *inst_block = &mm->bar2.inst_block;
70 u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size;
71
72 /* BAR2 aperture size is 32MB */
73 mm->bar2.aperture_size = 32 << 20;
74 gk20a_dbg_info("bar2 vm size = 0x%x", mm->bar2.aperture_size);
75 gk20a_init_vm(mm, vm, big_page_size, SZ_4K,
76 mm->bar2.aperture_size - SZ_4K,
77 mm->bar2.aperture_size, false, false, "bar2");
78
79 /* allocate instance mem for bar2 */
80 err = gk20a_alloc_inst_block(g, inst_block);
81 if (err)
82 goto clean_up_va;
83
84 g->ops.mm.init_inst_block(inst_block, vm, big_page_size);
85
86 return 0;
87
88clean_up_va:
89 gk20a_deinit_vm(vm);
90 return err;
91}
92
93
94static int gb10b_init_bar2_mm_hw_setup(struct gk20a *g)
95{
96 struct mm_gk20a *mm = &g->mm;
97 struct mem_desc *inst_block = &mm->bar2.inst_block;
98 u64 inst_pa = gk20a_mm_inst_block_addr(g, inst_block);
99
100 gk20a_dbg_fn("");
101
102 g->ops.fb.set_mmu_page_size(g);
103
104 inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v());
105 gk20a_dbg_info("bar2 inst block ptr: 0x%08x", (u32)inst_pa);
106
107 gk20a_writel(g, bus_bar2_block_r(),
108 gk20a_aperture_mask(g, inst_block,
109 bus_bar2_block_target_sys_mem_ncoh_f(),
110 bus_bar2_block_target_vid_mem_f()) |
111 bus_bar2_block_mode_virtual_f() |
112 bus_bar2_block_ptr_f(inst_pa));
113
114 gk20a_dbg_fn("done");
115 return 0;
116}
117
118static u64 gp10b_mm_phys_addr_translate(struct gk20a *g, u64 phys_addr,
119 u32 flags)
120{
121 if (!device_is_iommuable(dev_from_gk20a(g)))
122 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT)
123 return phys_addr |
124 1ULL << NVGPU_MM_GET_IO_COHERENCE_BIT;
125
126 return phys_addr;
127}
128
129static u64 gp10b_mm_iova_addr(struct gk20a *g, struct scatterlist *sgl,
130 u32 flags)
131{
132 if (!device_is_iommuable(dev_from_gk20a(g)))
133 return gp10b_mm_phys_addr_translate(g, sg_phys(sgl), flags);
134
135 if (sg_dma_address(sgl) == 0)
136 return gp10b_mm_phys_addr_translate(g, sg_phys(sgl), flags);
137
138 if (sg_dma_address(sgl) == DMA_ERROR_CODE)
139 return 0;
140
141 return gk20a_mm_smmu_vaddr_translate(g, sg_dma_address(sgl));
142}
143
144static u32 pde3_from_index(u32 i)
145{
146 return i * gmmu_new_pde__size_v() / sizeof(u32);
147}
148
149static u32 pte3_from_index(u32 i)
150{
151 return i * gmmu_new_pte__size_v() / sizeof(u32);
152}
153
154static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
155 struct gk20a_mm_entry *parent,
156 u32 i, u32 gmmu_pgsz_idx,
157 struct scatterlist **sgl,
158 u64 *offset,
159 u64 *iova,
160 u32 kind_v, u64 *ctag,
161 bool cacheable, bool unmapped_pte,
162 int rw_flag, bool sparse, bool priv,
163 enum gk20a_aperture aperture)
164{
165 struct gk20a *g = gk20a_from_vm(vm);
166 u64 pte_addr = 0;
167 struct gk20a_mm_entry *pte = parent->entries + i;
168 u32 pde_v[2] = {0, 0};
169 u32 pde;
170
171 gk20a_dbg_fn("");
172
173 pte_addr = gk20a_pde_addr(g, pte) >> gmmu_new_pde_address_shift_v();
174
175 pde_v[0] |= gk20a_aperture_mask(g, &pte->mem,
176 gmmu_new_pde_aperture_sys_mem_ncoh_f(),
177 gmmu_new_pde_aperture_video_memory_f());
178 pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr));
179 pde_v[0] |= gmmu_new_pde_vol_true_f();
180 pde_v[1] |= pte_addr >> 24;
181 pde = pde3_from_index(i);
182
183 gk20a_pde_wr32(g, parent, pde + 0, pde_v[0]);
184 gk20a_pde_wr32(g, parent, pde + 1, pde_v[1]);
185
186 gk20a_dbg(gpu_dbg_pte, "pde:%d,sz=%d = 0x%x,0x%08x",
187 i, gmmu_pgsz_idx, pde_v[1], pde_v[0]);
188 gk20a_dbg_fn("done");
189 return 0;
190}
191
192static u32 pde0_from_index(u32 i)
193{
194 return i * gmmu_new_dual_pde__size_v() / sizeof(u32);
195}
196
197static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
198 struct gk20a_mm_entry *pte,
199 u32 i, u32 gmmu_pgsz_idx,
200 struct scatterlist **sgl,
201 u64 *offset,
202 u64 *iova,
203 u32 kind_v, u64 *ctag,
204 bool cacheable, bool unmapped_pte,
205 int rw_flag, bool sparse, bool priv,
206 enum gk20a_aperture aperture)
207{
208 struct gk20a *g = gk20a_from_vm(vm);
209 bool small_valid, big_valid;
210 u32 pte_addr_small = 0, pte_addr_big = 0;
211 struct gk20a_mm_entry *entry = pte->entries + i;
212 u32 pde_v[4] = {0, 0, 0, 0};
213 u32 pde;
214
215 gk20a_dbg_fn("");
216
217 small_valid = entry->mem.size && entry->pgsz == gmmu_page_size_small;
218 big_valid = entry->mem.size && entry->pgsz == gmmu_page_size_big;
219
220 if (small_valid) {
221 pte_addr_small = gk20a_pde_addr(g, entry)
222 >> gmmu_new_dual_pde_address_shift_v();
223 }
224
225 if (big_valid)
226 pte_addr_big = gk20a_pde_addr(g, entry)
227 >> gmmu_new_dual_pde_address_big_shift_v();
228
229 if (small_valid) {
230 pde_v[2] |= gmmu_new_dual_pde_address_small_sys_f(pte_addr_small);
231 pde_v[2] |= gk20a_aperture_mask(g, &entry->mem,
232 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(),
233 gmmu_new_dual_pde_aperture_small_video_memory_f());
234 pde_v[2] |= gmmu_new_dual_pde_vol_small_true_f();
235 pde_v[3] |= pte_addr_small >> 24;
236 }
237
238 if (big_valid) {
239 pde_v[0] |= gmmu_new_dual_pde_address_big_sys_f(pte_addr_big);
240 pde_v[0] |= gmmu_new_dual_pde_vol_big_true_f();
241 pde_v[0] |= gk20a_aperture_mask(g, &entry->mem,
242 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(),
243 gmmu_new_dual_pde_aperture_big_video_memory_f());
244 pde_v[1] |= pte_addr_big >> 28;
245 }
246
247 pde = pde0_from_index(i);
248
249 gk20a_pde_wr32(g, pte, pde + 0, pde_v[0]);
250 gk20a_pde_wr32(g, pte, pde + 1, pde_v[1]);
251 gk20a_pde_wr32(g, pte, pde + 2, pde_v[2]);
252 gk20a_pde_wr32(g, pte, pde + 3, pde_v[3]);
253
254 gk20a_dbg(gpu_dbg_pte, "pde:%d,sz=%d [0x%08x, 0x%08x, 0x%x, 0x%08x]",
255 i, gmmu_pgsz_idx, pde_v[3], pde_v[2], pde_v[1], pde_v[0]);
256 gk20a_dbg_fn("done");
257 return 0;
258}
259
260static int update_gmmu_pte_locked(struct vm_gk20a *vm,
261 struct gk20a_mm_entry *pte,
262 u32 i, u32 gmmu_pgsz_idx,
263 struct scatterlist **sgl,
264 u64 *offset,
265 u64 *iova,
266 u32 kind_v, u64 *ctag,
267 bool cacheable, bool unmapped_pte,
268 int rw_flag, bool sparse, bool priv,
269 enum gk20a_aperture aperture)
270{
271 struct gk20a *g = vm->mm->g;
272 u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
273 u64 ctag_granularity = g->ops.fb.compression_page_size(g);
274 u32 pte_w[2] = {0, 0}; /* invalid pte */
275 u32 pte_i;
276
277 if (*iova) {
278 u32 pte_valid = unmapped_pte ?
279 gmmu_new_pte_valid_false_f() :
280 gmmu_new_pte_valid_true_f();
281 u32 iova_v = *iova >> gmmu_new_pte_address_shift_v();
282 u32 pte_addr = aperture == APERTURE_SYSMEM ?
283 gmmu_new_pte_address_sys_f(iova_v) :
284 gmmu_new_pte_address_vid_f(iova_v);
285 u32 pte_tgt = __gk20a_aperture_mask(g, aperture,
286 gmmu_new_pte_aperture_sys_mem_ncoh_f(),
287 gmmu_new_pte_aperture_video_memory_f());
288
289 pte_w[0] = pte_valid | pte_addr | pte_tgt;
290
291 if (priv)
292 pte_w[0] |= gmmu_new_pte_privilege_true_f();
293
294 pte_w[1] = *iova >> (24 + gmmu_new_pte_address_shift_v()) |
295 gmmu_new_pte_kind_f(kind_v) |
296 gmmu_new_pte_comptagline_f((u32)(*ctag / ctag_granularity));
297
298 if (rw_flag == gk20a_mem_flag_read_only)
299 pte_w[0] |= gmmu_new_pte_read_only_true_f();
300 if (unmapped_pte && !cacheable)
301 pte_w[0] |= gmmu_new_pte_read_only_true_f();
302 else if (!cacheable)
303 pte_w[0] |= gmmu_new_pte_vol_true_f();
304
305 gk20a_dbg(gpu_dbg_pte, "pte=%d iova=0x%llx kind=%d"
306 " ctag=%d vol=%d"
307 " [0x%08x, 0x%08x]",
308 i, *iova,
309 kind_v, (u32)(*ctag / ctag_granularity), !cacheable,
310 pte_w[1], pte_w[0]);
311
312 if (*ctag)
313 *ctag += page_size;
314 } else if (sparse) {
315 pte_w[0] = gmmu_new_pte_valid_false_f();
316 pte_w[0] |= gmmu_new_pte_vol_true_f();
317 } else {
318 gk20a_dbg(gpu_dbg_pte, "pte_cur=%d [0x0,0x0]", i);
319 }
320
321 pte_i = pte3_from_index(i);
322
323 gk20a_pde_wr32(g, pte, pte_i + 0, pte_w[0]);
324 gk20a_pde_wr32(g, pte, pte_i + 1, pte_w[1]);
325
326 if (*iova) {
327 *iova += page_size;
328 *offset += page_size;
329 if (*sgl && *offset + page_size > (*sgl)->length) {
330 u64 new_iova;
331 *sgl = sg_next(*sgl);
332 if (*sgl) {
333 new_iova = sg_phys(*sgl);
334 gk20a_dbg(gpu_dbg_pte, "chunk address %llx, size %d",
335 new_iova, (*sgl)->length);
336 if (new_iova) {
337 *offset = 0;
338 *iova = new_iova;
339 }
340 }
341 }
342 }
343 return 0;
344}
345
346static const struct gk20a_mmu_level gp10b_mm_levels[] = {
347 {.hi_bit = {48, 48},
348 .lo_bit = {47, 47},
349 .update_entry = update_gmmu_pde3_locked,
350 .entry_size = 8},
351 {.hi_bit = {46, 46},
352 .lo_bit = {38, 38},
353 .update_entry = update_gmmu_pde3_locked,
354 .entry_size = 8},
355 {.hi_bit = {37, 37},
356 .lo_bit = {29, 29},
357 .update_entry = update_gmmu_pde3_locked,
358 .entry_size = 8},
359 {.hi_bit = {28, 28},
360 .lo_bit = {21, 21},
361 .update_entry = update_gmmu_pde0_locked,
362 .entry_size = 16},
363 {.hi_bit = {20, 20},
364 .lo_bit = {12, 16},
365 .update_entry = update_gmmu_pte_locked,
366 .entry_size = 8},
367 {.update_entry = NULL}
368};
369
370static const struct gk20a_mmu_level *gp10b_mm_get_mmu_levels(struct gk20a *g,
371 u32 big_page_size)
372{
373 return gp10b_mm_levels;
374}
375
376static void gp10b_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block,
377 struct vm_gk20a *vm)
378{
379 u64 pdb_addr = gk20a_mem_get_base_addr(g, &vm->pdb.mem, 0);
380 u32 pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
381 u32 pdb_addr_hi = u64_hi32(pdb_addr);
382
383 gk20a_dbg_info("pde pa=0x%llx", pdb_addr);
384
385 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
386 gk20a_aperture_mask(g, &vm->pdb.mem,
387 ram_in_page_dir_base_target_sys_mem_ncoh_f(),
388 ram_in_page_dir_base_target_vid_mem_f()) |
389 ram_in_page_dir_base_vol_true_f() |
390 ram_in_page_dir_base_lo_f(pdb_addr_lo) |
391 1 << 10);
392
393 gk20a_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(),
394 ram_in_page_dir_base_hi_f(pdb_addr_hi));
395}
396
397static void gp10b_remove_bar2_vm(struct gk20a *g)
398{
399 struct mm_gk20a *mm = &g->mm;
400
401 gp10b_replayable_pagefault_buffer_deinit(g);
402 gk20a_remove_vm(&mm->bar2.vm, &mm->bar2.inst_block);
403}
404
405
406void gp10b_init_mm(struct gpu_ops *gops)
407{
408 gm20b_init_mm(gops);
409 gops->mm.get_physical_addr_bits = gp10b_mm_get_physical_addr_bits;
410 gops->mm.init_mm_setup_hw = gp10b_init_mm_setup_hw;
411 gops->mm.init_bar2_vm = gb10b_init_bar2_vm;
412 gops->mm.init_bar2_mm_hw_setup = gb10b_init_bar2_mm_hw_setup;
413 gops->mm.get_iova_addr = gp10b_mm_iova_addr;
414 gops->mm.get_mmu_levels = gp10b_mm_get_mmu_levels;
415 gops->mm.init_pdb = gp10b_mm_init_pdb;
416 gops->mm.remove_bar2_vm = gp10b_remove_bar2_vm;
417}