summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-12-17 13:12:21 -0500
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-01-05 10:50:02 -0500
commit9812bd5eea1c5d2c97149d64c5ccf81eae75fda1 (patch)
tree5b9c006b9c94d8c6920fc779f71a9bf36db43766 /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parentc9d6a79a4ce28744bc1c68f8c16c28d453b91a51 (diff)
gpu: nvgpu: Control comptagline assignment from kernel
On Maxwell comptaglines are assigned per 128k, but preferred big page size for graphics is 64k. Bit 16 of GPU VA is used for determining which half of comptagline is used. This creates problems if user space wants to map a page multiple times and to arbitrary GPU VA. In one mapping the page might be mapped to lower half of 128k comptagline, and in another mapping the page might be mapped to upper half. Turn on mode where MSB of comptagline in PTE is used instead of bit 16 for determining the comptagline lower/upper half selection. Bug 1704834 Change-Id: If87e8f6ac0fc9c5624e80fa1ba2ceeb02781355b Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/924322 Reviewed-by: Alex Waterman <alexw@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 6c4637e8..76c33512 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -414,6 +414,10 @@ int gk20a_init_mm_setup_hw(struct gk20a *g)
414 gk20a_dbg_fn(""); 414 gk20a_dbg_fn("");
415 415
416 g->ops.fb.set_mmu_page_size(g); 416 g->ops.fb.set_mmu_page_size(g);
417 if (g->ops.fb.set_use_full_comp_tag_line)
418 mm->use_full_comp_tag_line =
419 g->ops.fb.set_use_full_comp_tag_line(g);
420
417 421
418 inst_pa = (u32)(inst_pa >> bar1_instance_block_shift_gk20a()); 422 inst_pa = (u32)(inst_pa >> bar1_instance_block_shift_gk20a());
419 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", (u32)inst_pa); 423 gk20a_dbg_info("bar1 inst block ptr: 0x%08x", (u32)inst_pa);
@@ -2327,6 +2331,11 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
2327 gmmu_pte_kind_f(kind_v) | 2331 gmmu_pte_kind_f(kind_v) |
2328 gmmu_pte_comptagline_f((u32)(*ctag / ctag_granularity)); 2332 gmmu_pte_comptagline_f((u32)(*ctag / ctag_granularity));
2329 2333
2334 if (vm->mm->use_full_comp_tag_line && *iova & 0x10000) {
2335 pte_w[1] |= gmmu_pte_comptagline_f(
2336 1 << (gmmu_pte_comptagline_s() - 1));
2337 }
2338
2330 if (rw_flag == gk20a_mem_flag_read_only) { 2339 if (rw_flag == gk20a_mem_flag_read_only) {
2331 pte_w[0] |= gmmu_pte_read_only_true_f(); 2340 pte_w[0] |= gmmu_pte_read_only_true_f();
2332 pte_w[1] |= 2341 pte_w[1] |=