summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-14 01:27:15 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-17 16:54:08 -0400
commit70c20bb75be7815ebc67ac82d6999f46bc25ed6d (patch)
tree19d6b5299e09b71e9afe2967a758f036bb9b79bc /drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
parent553fdf3534f856edce73744fd54914b9b7a829cc (diff)
gpu: nvgpu: common: mm: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: Ieeecf719dca9acc1a116d2893637bf770caf4f5b Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1794241 GVS: Gerrit_Virtual_Submit Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/nvgpu_mem.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c23
1 files changed, 15 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index 8ba119a6..345b947d 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -40,8 +40,9 @@ u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
40 * Some iGPUs treat sysmem (i.e SoC DRAM) as vidmem. In these cases the 40 * Some iGPUs treat sysmem (i.e SoC DRAM) as vidmem. In these cases the
41 * "sysmem" aperture should really be translated to VIDMEM. 41 * "sysmem" aperture should really be translated to VIDMEM.
42 */ 42 */
43 if (!nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE)) 43 if (!nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE)) {
44 aperture = APERTURE_VIDMEM; 44 aperture = APERTURE_VIDMEM;
45 }
45 46
46 switch (aperture) { 47 switch (aperture) {
47 case __APERTURE_SYSMEM_COH: 48 case __APERTURE_SYSMEM_COH:
@@ -67,8 +68,9 @@ u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
67 * we add this translation step here. 68 * we add this translation step here.
68 */ 69 */
69 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) && 70 if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) &&
70 ap == APERTURE_SYSMEM) 71 ap == APERTURE_SYSMEM) {
71 ap = __APERTURE_SYSMEM_COH; 72 ap = __APERTURE_SYSMEM_COH;
73 }
72 74
73 return __nvgpu_aperture_mask(g, ap, 75 return __nvgpu_aperture_mask(g, ap,
74 sysmem_mask, sysmem_coh_mask, vidmem_mask); 76 sysmem_mask, sysmem_coh_mask, vidmem_mask);
@@ -115,15 +117,17 @@ u64 nvgpu_sgt_get_gpu_addr(struct gk20a *g, struct nvgpu_sgt *sgt,
115 117
116bool nvgpu_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt) 118bool nvgpu_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt)
117{ 119{
118 if (sgt->ops->sgt_iommuable) 120 if (sgt->ops->sgt_iommuable) {
119 return sgt->ops->sgt_iommuable(g, sgt); 121 return sgt->ops->sgt_iommuable(g, sgt);
122 }
120 return false; 123 return false;
121} 124}
122 125
123void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) 126void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt)
124{ 127{
125 if (sgt && sgt->ops->sgt_free) 128 if (sgt && sgt->ops->sgt_free) {
126 sgt->ops->sgt_free(g, sgt); 129 sgt->ops->sgt_free(g, sgt);
130 }
127} 131}
128 132
129u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys) 133u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys)
@@ -131,8 +135,9 @@ u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys)
131 /* ensure it is not vidmem allocation */ 135 /* ensure it is not vidmem allocation */
132 WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys)); 136 WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys));
133 137
134 if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit) 138 if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit) {
135 return phys | 1ULL << g->ops.mm.get_iommu_bit(g); 139 return phys | 1ULL << g->ops.mm.get_iommu_bit(g);
140 }
136 141
137 return phys; 142 return phys;
138} 143}
@@ -157,8 +162,9 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
157 */ 162 */
158 if (nvgpu_iommuable(g) && 163 if (nvgpu_iommuable(g) &&
159 nvgpu_sgt_iommuable(g, sgt) && 164 nvgpu_sgt_iommuable(g, sgt) &&
160 nvgpu_sgt_get_dma(sgt, sgt->sgl)) 165 nvgpu_sgt_get_dma(sgt, sgt->sgl)) {
161 return 1ULL << __ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl)); 166 return 1ULL << __ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl));
167 }
162 168
163 /* 169 /*
164 * Otherwise the buffer is not iommuable (VIDMEM, for example) or we are 170 * Otherwise the buffer is not iommuable (VIDMEM, for example) or we are
@@ -169,10 +175,11 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
169 chunk_align = 1ULL << __ffs(nvgpu_sgt_get_phys(g, sgt, sgl) | 175 chunk_align = 1ULL << __ffs(nvgpu_sgt_get_phys(g, sgt, sgl) |
170 nvgpu_sgt_get_length(sgt, sgl)); 176 nvgpu_sgt_get_length(sgt, sgl));
171 177
172 if (align) 178 if (align) {
173 align = min(align, chunk_align); 179 align = min(align, chunk_align);
174 else 180 } else {
175 align = chunk_align; 181 align = chunk_align;
182 }
176 } 183 }
177 184
178 return align; 185 return align;