summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-09 01:10:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-22 20:31:33 -0400
commit1c13da1d29c344cb60953eabeca56b601446c64a (patch)
tree145a1a133b2d85592e0ddd1a25b12fc48e879829 /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parentf3c3e4dece89c5e2f77fbfaf3cacd877ba62406c (diff)
gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros
Changed the enum gmmu_pgsz_gk20a into macros and changed all the instances of it. The enum gmmu_pgsz_gk20a was being used in for loops, where it was compared with an integer. This violates MISRA rule 10.4, which only allows arithmetic operations on operands of the same essential type category. Changing this enum into macro will fix this violation. JIRA NVGPU-993 Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795593 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index ee63489e..b5626035 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -158,8 +158,8 @@ static void update_gmmu_pde_locked(struct vm_gk20a *vm,
158 u32 pd_offset = pd_offset_from_index(l, pd_idx); 158 u32 pd_offset = pd_offset_from_index(l, pd_idx);
159 u32 pde_v[2] = {0, 0}; 159 u32 pde_v[2] = {0, 0};
160 160
161 small_valid = attrs->pgsz == gmmu_page_size_small; 161 small_valid = attrs->pgsz == GMMU_PAGE_SIZE_SMALL;
162 big_valid = attrs->pgsz == gmmu_page_size_big; 162 big_valid = attrs->pgsz == GMMU_PAGE_SIZE_BIG;
163 163
164 pde_v[0] = gmmu_pde_size_full_f(); 164 pde_v[0] = gmmu_pde_size_full_f();
165 pde_v[0] |= big_valid ? 165 pde_v[0] |= big_valid ?
@@ -283,24 +283,22 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
283 pd_write(g, pd, pd_offset + 1, pte_w[1]); 283 pd_write(g, pd, pd_offset + 1, pte_w[1]);
284} 284}
285 285
286enum gmmu_pgsz_gk20a gk20a_get_pde_pgsz(struct gk20a *g, 286u32 gk20a_get_pde_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
287 const struct gk20a_mmu_level *l, 287 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
288 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
289{ 288{
290 /* 289 /*
291 * big and small page sizes are the same 290 * big and small page sizes are the same
292 */ 291 */
293 return gmmu_page_size_small; 292 return GMMU_PAGE_SIZE_SMALL;
294} 293}
295 294
296enum gmmu_pgsz_gk20a gk20a_get_pte_pgsz(struct gk20a *g, 295u32 gk20a_get_pte_pgsz(struct gk20a *g, const struct gk20a_mmu_level *l,
297 const struct gk20a_mmu_level *l, 296 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
298 struct nvgpu_gmmu_pd *pd, u32 pd_idx)
299{ 297{
300 /* 298 /*
301 * return invalid 299 * return invalid
302 */ 300 */
303 return gmmu_nr_page_sizes; 301 return GMMU_NR_PAGE_SIZES;
304} 302}
305 303
306const struct gk20a_mmu_level gk20a_mm_levels_64k[] = { 304const struct gk20a_mmu_level gk20a_mm_levels_64k[] = {