summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2016-04-08 15:02:37 -0400
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:52:11 -0500
commit03614bff771ee7ca93382d4464b1df373b6fe501 (patch)
tree44d0862d4530ce94b6d31b015b14652c67a7c038 /drivers/gpu/nvgpu/gp10b/mm_gp10b.c
parentfce01666d57c9c76c21d4ac31adc225a2a6c2e42 (diff)
gpu: nvgpu: gp10b: Support GPUs with no physical mode
Support GPUs which cannot choose between SMMU and physical addressing. Change-Id: Ic097fccb313d98fcea918a705eefb5cd619138f1 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/1122590 Reviewed-by: Konsta Holtta <kholtta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index d4a4e7f3..deb8c138 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GP10B MMU 2 * GP10B MMU
3 * 3 *
4 * Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved. 4 * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -32,7 +32,7 @@ static int gp10b_init_mm_setup_hw(struct gk20a *g)
32{ 32{
33 struct mm_gk20a *mm = &g->mm; 33 struct mm_gk20a *mm = &g->mm;
34 struct mem_desc *inst_block = &mm->bar1.inst_block; 34 struct mem_desc *inst_block = &mm->bar1.inst_block;
35 phys_addr_t inst_pa = gk20a_mem_phys(inst_block); 35 u64 inst_pa = gk20a_mm_inst_block_addr(g, inst_block);
36 int err = 0; 36 int err = 0;
37 37
38 gk20a_dbg_fn(""); 38 gk20a_dbg_fn("");
@@ -97,7 +97,7 @@ static int gb10b_init_bar2_mm_hw_setup(struct gk20a *g)
97{ 97{
98 struct mm_gk20a *mm = &g->mm; 98 struct mm_gk20a *mm = &g->mm;
99 struct mem_desc *inst_block = &mm->bar2.inst_block; 99 struct mem_desc *inst_block = &mm->bar2.inst_block;
100 phys_addr_t inst_pa = gk20a_mem_phys(inst_block); 100 u64 inst_pa = gk20a_mm_inst_block_addr(g, inst_block);
101 101
102 gk20a_dbg_fn(""); 102 gk20a_dbg_fn("");
103 103
@@ -146,6 +146,17 @@ static u32 *pde3_from_index(struct gk20a_mm_entry *entry, u32 i)
146 return (u32 *) (((u8 *)entry->cpu_va) + i*gmmu_new_pde__size_v()); 146 return (u32 *) (((u8 *)entry->cpu_va) + i*gmmu_new_pde__size_v());
147} 147}
148 148
149static u64 entry_addr(struct gk20a *g, struct gk20a_mm_entry *entry)
150{
151 u64 addr;
152 if (g->mm.has_physical_mode)
153 addr = sg_phys(entry->sgt->sgl);
154 else
155 addr = g->ops.mm.get_iova_addr(g, entry->sgt->sgl, 0);
156
157 return addr;
158}
159
149static int update_gmmu_pde3_locked(struct vm_gk20a *vm, 160static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
150 struct gk20a_mm_entry *parent, 161 struct gk20a_mm_entry *parent,
151 u32 i, u32 gmmu_pgsz_idx, 162 u32 i, u32 gmmu_pgsz_idx,
@@ -156,6 +167,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
156 bool cacheable, bool unmapped_pte, 167 bool cacheable, bool unmapped_pte,
157 int rw_flag, bool sparse, bool priv) 168 int rw_flag, bool sparse, bool priv)
158{ 169{
170 struct gk20a *g = gk20a_from_vm(vm);
159 u64 pte_addr = 0; 171 u64 pte_addr = 0;
160 u64 pde_addr = 0; 172 u64 pde_addr = 0;
161 struct gk20a_mm_entry *pte = parent->entries + i; 173 struct gk20a_mm_entry *pte = parent->entries + i;
@@ -164,8 +176,8 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
164 176
165 gk20a_dbg_fn(""); 177 gk20a_dbg_fn("");
166 178
167 pte_addr = sg_phys(pte->sgt->sgl) >> gmmu_new_pde_address_shift_v(); 179 pte_addr = entry_addr(g, pte) >> gmmu_new_pde_address_shift_v();
168 pde_addr = sg_phys(parent->sgt->sgl); 180 pde_addr = entry_addr(g, parent);
169 181
170 pde_v[0] |= gmmu_new_pde_aperture_video_memory_f(); 182 pde_v[0] |= gmmu_new_pde_aperture_video_memory_f();
171 pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr)); 183 pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr));
@@ -197,6 +209,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
197 bool cacheable, bool unmapped_pte, 209 bool cacheable, bool unmapped_pte,
198 int rw_flag, bool sparse, bool priv) 210 int rw_flag, bool sparse, bool priv)
199{ 211{
212 struct gk20a *g = gk20a_from_vm(vm);
200 bool small_valid, big_valid; 213 bool small_valid, big_valid;
201 u32 pte_addr_small = 0, pte_addr_big = 0; 214 u32 pte_addr_small = 0, pte_addr_big = 0;
202 struct gk20a_mm_entry *entry = pte->entries + i; 215 struct gk20a_mm_entry *entry = pte->entries + i;
@@ -208,12 +221,13 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
208 small_valid = entry->size && entry->pgsz == gmmu_page_size_small; 221 small_valid = entry->size && entry->pgsz == gmmu_page_size_small;
209 big_valid = entry->size && entry->pgsz == gmmu_page_size_big; 222 big_valid = entry->size && entry->pgsz == gmmu_page_size_big;
210 223
211 if (small_valid) 224 if (small_valid) {
212 pte_addr_small = sg_phys(entry->sgt->sgl) 225 pte_addr_small = entry_addr(g, entry)
213 >> gmmu_new_dual_pde_address_shift_v(); 226 >> gmmu_new_dual_pde_address_shift_v();
227 }
214 228
215 if (big_valid) 229 if (big_valid)
216 pte_addr_big = sg_phys(entry->sgt->sgl) 230 pte_addr_big = entry_addr(g, entry)
217 >> gmmu_new_dual_pde_address_big_shift_v(); 231 >> gmmu_new_dual_pde_address_big_shift_v();
218 232
219 if (small_valid) { 233 if (small_valid) {