diff options
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/mm_gp10b.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c index 9b347f00..c651eeb9 100644 --- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c | |||
@@ -253,7 +253,9 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm, | |||
253 | bool cacheable, bool unmapped_pte, | 253 | bool cacheable, bool unmapped_pte, |
254 | int rw_flag, bool sparse, u32 flags) | 254 | int rw_flag, bool sparse, u32 flags) |
255 | { | 255 | { |
256 | struct gk20a *g = vm->mm->g; | ||
256 | u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx]; | 257 | u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx]; |
258 | u32 ctag_granularity = g->ops.fb.compression_page_size(g); | ||
257 | u32 pte_w[2] = {0, 0}; /* invalid pte */ | 259 | u32 pte_w[2] = {0, 0}; /* invalid pte */ |
258 | 260 | ||
259 | gk20a_dbg_fn(""); | 261 | gk20a_dbg_fn(""); |
@@ -268,7 +270,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm, | |||
268 | >> gmmu_new_pte_address_shift_v()); | 270 | >> gmmu_new_pte_address_shift_v()); |
269 | 271 | ||
270 | pte_w[1] = gmmu_new_pte_kind_f(kind_v) | | 272 | pte_w[1] = gmmu_new_pte_kind_f(kind_v) | |
271 | gmmu_new_pte_comptagline_f(*ctag / SZ_128K); | 273 | gmmu_new_pte_comptagline_f(*ctag / ctag_granularity); |
272 | 274 | ||
273 | if (rw_flag == gk20a_mem_flag_read_only) | 275 | if (rw_flag == gk20a_mem_flag_read_only) |
274 | pte_w[0] |= gmmu_new_pte_read_only_true_f(); | 276 | pte_w[0] |= gmmu_new_pte_read_only_true_f(); |
@@ -281,7 +283,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm, | |||
281 | " ctag=%d vol=%d" | 283 | " ctag=%d vol=%d" |
282 | " [0x%08x, 0x%08x]", | 284 | " [0x%08x, 0x%08x]", |
283 | i, *iova, | 285 | i, *iova, |
284 | kind_v, *ctag, !cacheable, | 286 | kind_v, *ctag / ctag_granularity, !cacheable, |
285 | pte_w[1], pte_w[0]); | 287 | pte_w[1], pte_w[0]); |
286 | 288 | ||
287 | if (*ctag) | 289 | if (*ctag) |