diff options
author | Amulya <Amurthyreddy@nvidia.com> | 2018-08-09 01:10:08 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-08-22 20:31:33 -0400 |
commit | 1c13da1d29c344cb60953eabeca56b601446c64a (patch) | |
tree | 145a1a133b2d85592e0ddd1a25b12fc48e879829 /drivers/gpu/nvgpu/common/mm/vm_area.c | |
parent | f3c3e4dece89c5e2f77fbfaf3cacd877ba62406c (diff) |
gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros
Changed the enum gmmu_pgsz_gk20a into macros and changed all the
instances of it.
The enum gmmu_pgsz_gk20a was being used in for loops, where it was
compared with an integer. This violates MISRA rule 10.4, which only
allows arithmetic operations on operands of the same essential type
category. Changing this enum into macro will fix this violation.
JIRA NVGPU-993
Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3
Signed-off-by: Amulya <Amurthyreddy@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1795593
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm_area.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm_area.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index b8fecbfc..7e2b5c34 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c | |||
@@ -43,7 +43,7 @@ struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, | 45 | int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, |
46 | u64 map_addr, u64 map_size, int pgsz_idx, | 46 | u64 map_addr, u64 map_size, u32 pgsz_idx, |
47 | struct nvgpu_vm_area **pvm_area) | 47 | struct nvgpu_vm_area **pvm_area) |
48 | { | 48 | { |
49 | struct gk20a *g = vm->mm->g; | 49 | struct gk20a *g = vm->mm->g; |
@@ -99,19 +99,19 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, | |||
99 | struct nvgpu_allocator *vma; | 99 | struct nvgpu_allocator *vma; |
100 | struct nvgpu_vm_area *vm_area; | 100 | struct nvgpu_vm_area *vm_area; |
101 | u64 vaddr_start = 0; | 101 | u64 vaddr_start = 0; |
102 | int pgsz_idx = gmmu_page_size_small; | 102 | u32 pgsz_idx = GMMU_PAGE_SIZE_SMALL; |
103 | 103 | ||
104 | nvgpu_log(g, gpu_dbg_map, | 104 | nvgpu_log(g, gpu_dbg_map, |
105 | "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x", | 105 | "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x", |
106 | page_size, pages, *addr, flags); | 106 | page_size, pages, *addr, flags); |
107 | 107 | ||
108 | for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) { | 108 | for (; pgsz_idx < GMMU_NR_PAGE_SIZES; pgsz_idx++) { |
109 | if (vm->gmmu_page_sizes[pgsz_idx] == page_size) { | 109 | if (vm->gmmu_page_sizes[pgsz_idx] == page_size) { |
110 | break; | 110 | break; |
111 | } | 111 | } |
112 | } | 112 | } |
113 | 113 | ||
114 | if (pgsz_idx > gmmu_page_size_big) { | 114 | if (pgsz_idx > GMMU_PAGE_SIZE_BIG) { |
115 | return -EINVAL; | 115 | return -EINVAL; |
116 | } | 116 | } |
117 | 117 | ||
@@ -122,7 +122,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, | |||
122 | */ | 122 | */ |
123 | nvgpu_speculation_barrier(); | 123 | nvgpu_speculation_barrier(); |
124 | 124 | ||
125 | if (!vm->big_pages && pgsz_idx == gmmu_page_size_big) { | 125 | if (!vm->big_pages && pgsz_idx == GMMU_PAGE_SIZE_BIG) { |
126 | return -EINVAL; | 126 | return -EINVAL; |
127 | } | 127 | } |
128 | 128 | ||