summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2015-11-09 12:06:37 -0500
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:52:09 -0500
commitde2656300ae74df5075a3a7e38a701c8048af3b2 (patch)
tree54960a4af85e7fd9fd661ed9a23935ed031fe30a /drivers/gpu/nvgpu/gp10b/mm_gp10b.c
parent4ff59992afa50fb946b57e5556513b106cd17e8c (diff)
Revert "gpu: nvgpu: gp10b: Implement sparse PDEs"
This reverts commit c2707054192b058eec24a52c7f586b030f9ff007. It introduces regression in T124. Bug 1702063 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Change-Id: I8516c0bfe129bb1ac3d7a1983846061df8ae967b Reviewed-on: http://git-master/r/830787 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c21
1 files changed, 6 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index b5ea5d68..d3297e31 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -157,18 +157,17 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
157 int rw_flag, bool sparse, bool priv) 157 int rw_flag, bool sparse, bool priv)
158{ 158{
159 u64 pte_addr = 0; 159 u64 pte_addr = 0;
160 u64 pde_addr = 0;
160 struct gk20a_mm_entry *pte = parent->entries + i; 161 struct gk20a_mm_entry *pte = parent->entries + i;
161 u32 pde_v[2] = {0, 0}; 162 u32 pde_v[2] = {0, 0};
162 u32 *pde; 163 u32 *pde;
163 164
164 gk20a_dbg_fn(""); 165 gk20a_dbg_fn("");
165 166
166 if (!sparse) 167 pte_addr = sg_phys(pte->sgt->sgl) >> gmmu_new_pde_address_shift_v();
167 pte_addr = sg_phys(pte->sgt->sgl) 168 pde_addr = sg_phys(parent->sgt->sgl);
168 >> gmmu_new_pde_address_shift_v();
169 169
170 pde_v[0] |= sparse ? gmmu_new_pde_aperture_invalid_f() 170 pde_v[0] |= gmmu_new_pde_aperture_video_memory_f();
171 : gmmu_new_pde_aperture_video_memory_f();
172 pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr)); 171 pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(pte_addr));
173 pde_v[0] |= gmmu_new_pde_vol_true_f(); 172 pde_v[0] |= gmmu_new_pde_vol_true_f();
174 pde_v[1] |= pte_addr >> 24; 173 pde_v[1] |= pte_addr >> 24;
@@ -205,12 +204,9 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
205 u32 *pde; 204 u32 *pde;
206 205
207 gk20a_dbg_fn(""); 206 gk20a_dbg_fn("");
208 gk20a_dbg(gpu_dbg_pte, "entry %p\n", entry);
209 207
210 small_valid = !sparse && entry->size 208 small_valid = entry->size && entry->pgsz == gmmu_page_size_small;
211 && entry->pgsz == gmmu_page_size_small; 209 big_valid = entry->size && entry->pgsz == gmmu_page_size_big;
212 big_valid = !sparse && entry->size
213 && entry->pgsz == gmmu_page_size_big;
214 210
215 if (small_valid) 211 if (small_valid)
216 pte_addr_small = sg_phys(entry->sgt->sgl) 212 pte_addr_small = sg_phys(entry->sgt->sgl)
@@ -234,11 +230,6 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
234 pde_v[1] |= pte_addr_big >> 28; 230 pde_v[1] |= pte_addr_big >> 28;
235 } 231 }
236 232
237 if (sparse) {
238 pde_v[0] |= gmmu_new_dual_pde_aperture_big_invalid_f();
239 pde_v[0] |= gmmu_new_dual_pde_vol_big_true_f();
240 }
241
242 pde = pde0_from_index(pte, i); 233 pde = pde0_from_index(pte, i);
243 234
244 gk20a_mem_wr32(pde, 0, pde_v[0]); 235 gk20a_mem_wr32(pde, 0, pde_v[0]);