summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
diff options
context:
space:
mode:
authorJussi Rasanen <jrasanen@nvidia.com>2015-09-29 06:21:05 -0400
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:52:08 -0500
commit040d71d07bdad49bbd2aac4564f0ea33800fa595 (patch)
tree4ecceffb1fb08cf89cc236ae7e74b55aaf40ac25 /drivers/gpu/nvgpu/gp10b/mm_gp10b.c
parentf2b4fcdce747015dddddc35dc96eb5d3a4a2bc5d (diff)
gpu: nvgpu: fix ctag computation overflow with 8GB
Bug 1689976 Change-Id: Ibf1c296fac4f2a2c6fcf062cbd80b3526a4fd4ed Signed-off-by: Jussi Rasanen <jrasanen@nvidia.com> Reviewed-on: http://git-master/r/806588 (cherry picked from commit 24b57989dc9636b41004bac32ee56dce90318350) Reviewed-on: http://git-master/r/808242 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/mm_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index 0c76abb5..ae9c5c7e 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -152,7 +152,7 @@ static int update_gmmu_pde3_locked(struct vm_gk20a *vm,
152 struct scatterlist **sgl, 152 struct scatterlist **sgl,
153 u64 *offset, 153 u64 *offset,
154 u64 *iova, 154 u64 *iova,
155 u32 kind_v, u32 *ctag, 155 u32 kind_v, u64 *ctag,
156 bool cacheable, bool unmapped_pte, 156 bool cacheable, bool unmapped_pte,
157 int rw_flag, bool sparse, bool priv) 157 int rw_flag, bool sparse, bool priv)
158{ 158{
@@ -193,7 +193,7 @@ static int update_gmmu_pde0_locked(struct vm_gk20a *vm,
193 struct scatterlist **sgl, 193 struct scatterlist **sgl,
194 u64 *offset, 194 u64 *offset,
195 u64 *iova, 195 u64 *iova,
196 u32 kind_v, u32 *ctag, 196 u32 kind_v, u64 *ctag,
197 bool cacheable, bool unmapped_pte, 197 bool cacheable, bool unmapped_pte,
198 int rw_flag, bool sparse, bool priv) 198 int rw_flag, bool sparse, bool priv)
199{ 199{
@@ -249,13 +249,13 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
249 struct scatterlist **sgl, 249 struct scatterlist **sgl,
250 u64 *offset, 250 u64 *offset,
251 u64 *iova, 251 u64 *iova,
252 u32 kind_v, u32 *ctag, 252 u32 kind_v, u64 *ctag,
253 bool cacheable, bool unmapped_pte, 253 bool cacheable, bool unmapped_pte,
254 int rw_flag, bool sparse, bool priv) 254 int rw_flag, bool sparse, bool priv)
255{ 255{
256 struct gk20a *g = vm->mm->g; 256 struct gk20a *g = vm->mm->g;
257 u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx]; 257 u32 page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
258 u32 ctag_granularity = g->ops.fb.compression_page_size(g); 258 u64 ctag_granularity = g->ops.fb.compression_page_size(g);
259 u32 pte_w[2] = {0, 0}; /* invalid pte */ 259 u32 pte_w[2] = {0, 0}; /* invalid pte */
260 260
261 gk20a_dbg_fn(""); 261 gk20a_dbg_fn("");
@@ -274,7 +274,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
274 274
275 pte_w[1] = *iova >> (24 + gmmu_new_pte_address_shift_v()) | 275 pte_w[1] = *iova >> (24 + gmmu_new_pte_address_shift_v()) |
276 gmmu_new_pte_kind_f(kind_v) | 276 gmmu_new_pte_kind_f(kind_v) |
277 gmmu_new_pte_comptagline_f(*ctag / ctag_granularity); 277 gmmu_new_pte_comptagline_f((u32)(*ctag / ctag_granularity));
278 278
279 if (rw_flag == gk20a_mem_flag_read_only) 279 if (rw_flag == gk20a_mem_flag_read_only)
280 pte_w[0] |= gmmu_new_pte_read_only_true_f(); 280 pte_w[0] |= gmmu_new_pte_read_only_true_f();
@@ -287,7 +287,7 @@ static int update_gmmu_pte_locked(struct vm_gk20a *vm,
287 " ctag=%d vol=%d" 287 " ctag=%d vol=%d"
288 " [0x%08x, 0x%08x]", 288 " [0x%08x, 0x%08x]",
289 i, *iova, 289 i, *iova,
290 kind_v, *ctag / ctag_granularity, !cacheable, 290 kind_v, (u32)(*ctag / ctag_granularity), !cacheable,
291 pte_w[1], pte_w[0]); 291 pte_w[1], pte_w[0]);
292 292
293 if (*ctag) 293 if (*ctag)