summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-28 03:04:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-19 06:24:12 -0400
commit941ac9a9d07bedb4062fd0c4d32eb2ef80a42359 (patch)
treec53622d96a4c2e7c18693ecf4059d7e403cd7808 /drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
parent2805f03aa0496502b64ff760f667bfe9d8a27928 (diff)
nvgpu: common: MISRA 10.1 boolean fixes
Fix violations where a variable of type non-boolean is used as a boolean in gpu/nvgpu/common. JIRA NVGPU-646 Change-Id: I9773d863b715f83ae1772b75d5373f77244bc8ca Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1807132 GVS: Gerrit_Virtual_Submit Tested-by: Amulya Murthyreddy <amurthyreddy@nvidia.com> Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/nvgpu_mem.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index e251f3c4..5cfaded0 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -128,7 +128,7 @@ bool nvgpu_sgt_iommuable(struct gk20a *g, struct nvgpu_sgt *sgt)
128 128
129void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt) 129void nvgpu_sgt_free(struct gk20a *g, struct nvgpu_sgt *sgt)
130{ 130{
131 if (sgt && sgt->ops->sgt_free) { 131 if (sgt != NULL && sgt->ops->sgt_free != NULL) {
132 sgt->ops->sgt_free(g, sgt); 132 sgt->ops->sgt_free(g, sgt);
133 } 133 }
134} 134}
@@ -138,7 +138,7 @@ u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys)
138 /* ensure it is not vidmem allocation */ 138 /* ensure it is not vidmem allocation */
139 WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys)); 139 WARN_ON(nvgpu_addr_is_vidmem_page_alloc(phys));
140 140
141 if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit) { 141 if (nvgpu_iommuable(g) && g->ops.mm.get_iommu_bit != NULL) {
142 return phys | 1ULL << g->ops.mm.get_iommu_bit(g); 142 return phys | 1ULL << g->ops.mm.get_iommu_bit(g);
143 } 143 }
144 144
@@ -165,7 +165,7 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
165 */ 165 */
166 if (nvgpu_iommuable(g) && 166 if (nvgpu_iommuable(g) &&
167 nvgpu_sgt_iommuable(g, sgt) && 167 nvgpu_sgt_iommuable(g, sgt) &&
168 nvgpu_sgt_get_dma(sgt, sgt->sgl)) { 168 nvgpu_sgt_get_dma(sgt, sgt->sgl) != 0ULL) {
169 return 1ULL << __ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl)); 169 return 1ULL << __ffs(nvgpu_sgt_get_dma(sgt, sgt->sgl));
170 } 170 }
171 171
@@ -195,7 +195,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
195 if (mem->aperture == APERTURE_SYSMEM) { 195 if (mem->aperture == APERTURE_SYSMEM) {
196 u32 *ptr = mem->cpu_va; 196 u32 *ptr = mem->cpu_va;
197 197
198 WARN_ON(!ptr); 198 WARN_ON(ptr == NULL);
199 data = ptr[w]; 199 data = ptr[w];
200 } else if (mem->aperture == APERTURE_VIDMEM) { 200 } else if (mem->aperture == APERTURE_VIDMEM) {
201 nvgpu_pramin_rd_n(g, mem, w * sizeof(u32), sizeof(u32), &data); 201 nvgpu_pramin_rd_n(g, mem, w * sizeof(u32), sizeof(u32), &data);
@@ -208,20 +208,20 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
208 208
209u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset) 209u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
210{ 210{
211 WARN_ON(offset & 3U); 211 WARN_ON((offset & 3U) != 0U);
212 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32)); 212 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32));
213} 213}
214 214
215void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, 215void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
216 u32 offset, void *dest, u32 size) 216 u32 offset, void *dest, u32 size)
217{ 217{
218 WARN_ON(offset & 3U); 218 WARN_ON((offset & 3U) != 0U);
219 WARN_ON(size & 3U); 219 WARN_ON((size & 3U) != 0U);
220 220
221 if (mem->aperture == APERTURE_SYSMEM) { 221 if (mem->aperture == APERTURE_SYSMEM) {
222 u8 *src = (u8 *)mem->cpu_va + offset; 222 u8 *src = (u8 *)mem->cpu_va + offset;
223 223
224 WARN_ON(!mem->cpu_va); 224 WARN_ON(mem->cpu_va == NULL);
225 memcpy(dest, src, size); 225 memcpy(dest, src, size);
226 } else if (mem->aperture == APERTURE_VIDMEM) { 226 } else if (mem->aperture == APERTURE_VIDMEM) {
227 nvgpu_pramin_rd_n(g, mem, offset, size, dest); 227 nvgpu_pramin_rd_n(g, mem, offset, size, dest);
@@ -235,7 +235,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
235 if (mem->aperture == APERTURE_SYSMEM) { 235 if (mem->aperture == APERTURE_SYSMEM) {
236 u32 *ptr = mem->cpu_va; 236 u32 *ptr = mem->cpu_va;
237 237
238 WARN_ON(!ptr); 238 WARN_ON(ptr == NULL);
239 ptr[w] = data; 239 ptr[w] = data;
240 } else if (mem->aperture == APERTURE_VIDMEM) { 240 } else if (mem->aperture == APERTURE_VIDMEM) {
241 nvgpu_pramin_wr_n(g, mem, w * sizeof(u32), sizeof(u32), &data); 241 nvgpu_pramin_wr_n(g, mem, w * sizeof(u32), sizeof(u32), &data);
@@ -249,20 +249,20 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
249 249
250void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data) 250void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data)
251{ 251{
252 WARN_ON(offset & 3U); 252 WARN_ON((offset & 3U) != 0U);
253 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data); 253 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data);
254} 254}
255 255
256void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, 256void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
257 void *src, u32 size) 257 void *src, u32 size)
258{ 258{
259 WARN_ON(offset & 3U); 259 WARN_ON((offset & 3U) != 0U);
260 WARN_ON(size & 3U); 260 WARN_ON((size & 3U) != 0U);
261 261
262 if (mem->aperture == APERTURE_SYSMEM) { 262 if (mem->aperture == APERTURE_SYSMEM) {
263 u8 *dest = (u8 *)mem->cpu_va + offset; 263 u8 *dest = (u8 *)mem->cpu_va + offset;
264 264
265 WARN_ON(!mem->cpu_va); 265 WARN_ON(mem->cpu_va == NULL);
266 memcpy(dest, src, size); 266 memcpy(dest, src, size);
267 } else if (mem->aperture == APERTURE_VIDMEM) { 267 } else if (mem->aperture == APERTURE_VIDMEM) {
268 nvgpu_pramin_wr_n(g, mem, offset, size, src); 268 nvgpu_pramin_wr_n(g, mem, offset, size, src);
@@ -277,16 +277,16 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
277void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, 277void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
278 u32 c, u32 size) 278 u32 c, u32 size)
279{ 279{
280 WARN_ON(offset & 3U); 280 WARN_ON((offset & 3U) != 0U);
281 WARN_ON(size & 3U); 281 WARN_ON((size & 3U) != 0U);
282 WARN_ON(c & ~0xffU); 282 WARN_ON((c & ~0xffU) != 0U);
283 283
284 c &= 0xffU; 284 c &= 0xffU;
285 285
286 if (mem->aperture == APERTURE_SYSMEM) { 286 if (mem->aperture == APERTURE_SYSMEM) {
287 u8 *dest = (u8 *)mem->cpu_va + offset; 287 u8 *dest = (u8 *)mem->cpu_va + offset;
288 288
289 WARN_ON(!mem->cpu_va); 289 WARN_ON(mem->cpu_va == NULL);
290 memset(dest, c, size); 290 memset(dest, c, size);
291 } else if (mem->aperture == APERTURE_VIDMEM) { 291 } else if (mem->aperture == APERTURE_VIDMEM) {
292 u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); 292 u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);