summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/mm.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-09 01:10:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-22 20:31:33 -0400
commit1c13da1d29c344cb60953eabeca56b601446c64a (patch)
tree145a1a133b2d85592e0ddd1a25b12fc48e879829 /drivers/gpu/nvgpu/common/mm/mm.c
parentf3c3e4dece89c5e2f77fbfaf3cacd877ba62406c (diff)
gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros
Changed the enum gmmu_pgsz_gk20a into macros and changed all the instances of it. The enum gmmu_pgsz_gk20a was being used in for loops, where it was compared with an integer. This violates MISRA rule 10.4, which only allows arithmetic operations on operands of the same essential type category. Changing this enum into macro will fix this violation. JIRA NVGPU-993 Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795593 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/mm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 2e46e211..6be619ed 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -34,14 +34,14 @@
34 * Attempt to find a reserved memory area to determine PTE size for the passed 34 * Attempt to find a reserved memory area to determine PTE size for the passed
35 * mapping. If no reserved area can be found use small pages. 35 * mapping. If no reserved area can be found use small pages.
36 */ 36 */
37enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, 37u32 __get_pte_size_fixed_map(struct vm_gk20a *vm,
38 u64 base, u64 size) 38 u64 base, u64 size)
39{ 39{
40 struct nvgpu_vm_area *vm_area; 40 struct nvgpu_vm_area *vm_area;
41 41
42 vm_area = nvgpu_vm_area_find(vm, base); 42 vm_area = nvgpu_vm_area_find(vm, base);
43 if (!vm_area) { 43 if (!vm_area) {
44 return gmmu_page_size_small; 44 return GMMU_PAGE_SIZE_SMALL;
45 } 45 }
46 46
47 return vm_area->pgsz_idx; 47 return vm_area->pgsz_idx;
@@ -50,19 +50,19 @@ enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm,
50/* 50/*
51 * This is for when the address space does not support unified address spaces. 51 * This is for when the address space does not support unified address spaces.
52 */ 52 */
53static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm, 53static u32 __get_pte_size_split_addr(struct vm_gk20a *vm,
54 u64 base, u64 size) 54 u64 base, u64 size)
55{ 55{
56 if (!base) { 56 if (!base) {
57 if (size >= vm->gmmu_page_sizes[gmmu_page_size_big]) { 57 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG]) {
58 return gmmu_page_size_big; 58 return GMMU_PAGE_SIZE_BIG;
59 } 59 }
60 return gmmu_page_size_small; 60 return GMMU_PAGE_SIZE_SMALL;
61 } else { 61 } else {
62 if (base < __nv_gmmu_va_small_page_limit()) { 62 if (base < __nv_gmmu_va_small_page_limit()) {
63 return gmmu_page_size_small; 63 return GMMU_PAGE_SIZE_SMALL;
64 } else { 64 } else {
65 return gmmu_page_size_big; 65 return GMMU_PAGE_SIZE_BIG;
66 } 66 }
67 } 67 }
68} 68}
@@ -88,12 +88,12 @@ static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm,
88 * - Regardless of buffer size use small pages since we have no 88 * - Regardless of buffer size use small pages since we have no
89 * - guarantee of contiguity. 89 * - guarantee of contiguity.
90 */ 90 */
91enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) 91u32 __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
92{ 92{
93 struct gk20a *g = gk20a_from_vm(vm); 93 struct gk20a *g = gk20a_from_vm(vm);
94 94
95 if (!vm->big_pages) { 95 if (!vm->big_pages) {
96 return gmmu_page_size_small; 96 return GMMU_PAGE_SIZE_SMALL;
97 } 97 }
98 98
99 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) { 99 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFY_ADDRESS_SPACES)) {
@@ -104,11 +104,11 @@ enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
104 return __get_pte_size_fixed_map(vm, base, size); 104 return __get_pte_size_fixed_map(vm, base, size);
105 } 105 }
106 106
107 if (size >= vm->gmmu_page_sizes[gmmu_page_size_big] && 107 if (size >= vm->gmmu_page_sizes[GMMU_PAGE_SIZE_BIG] &&
108 nvgpu_iommuable(g)) { 108 nvgpu_iommuable(g)) {
109 return gmmu_page_size_big; 109 return GMMU_PAGE_SIZE_BIG;
110 } 110 }
111 return gmmu_page_size_small; 111 return GMMU_PAGE_SIZE_SMALL;
112} 112}
113 113
114int nvgpu_mm_suspend(struct gk20a *g) 114int nvgpu_mm_suspend(struct gk20a *g)