summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorAmulya <Amurthyreddy@nvidia.com>2018-08-09 01:10:08 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-22 20:31:33 -0400
commit1c13da1d29c344cb60953eabeca56b601446c64a (patch)
tree145a1a133b2d85592e0ddd1a25b12fc48e879829 /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parentf3c3e4dece89c5e2f77fbfaf3cacd877ba62406c (diff)
gpu: nvgpu: Changed enum gmmu_pgsz_gk20a into macros
Changed the enum gmmu_pgsz_gk20a into macros and changed all the instances of it. The enum gmmu_pgsz_gk20a was being used in for loops, where it was compared with an integer. This violates MISRA rule 10.4, which only allows arithmetic operations on operands of the same essential type category. Changing this enum into macro will fix this violation. JIRA NVGPU-993 Change-Id: I6f18b08bc7548093d99e8229378415bcdec749e3 Signed-off-by: Amulya <Amurthyreddy@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795593 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index 0077c537..fa64cb82 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -192,7 +192,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
192 /* Circular Buffer */ 192 /* Circular Buffer */
193 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 193 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
194 gr->global_ctx_buffer[CIRCULAR].mem.size, 194 gr->global_ctx_buffer[CIRCULAR].mem.size,
195 gmmu_page_size_kernel); 195 GMMU_PAGE_SIZE_KERNEL);
196 196
197 if (!gpu_va) 197 if (!gpu_va)
198 goto clean_up; 198 goto clean_up;
@@ -202,7 +202,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
202 /* Attribute Buffer */ 202 /* Attribute Buffer */
203 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 203 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
204 gr->global_ctx_buffer[ATTRIBUTE].mem.size, 204 gr->global_ctx_buffer[ATTRIBUTE].mem.size,
205 gmmu_page_size_kernel); 205 GMMU_PAGE_SIZE_KERNEL);
206 206
207 if (!gpu_va) 207 if (!gpu_va)
208 goto clean_up; 208 goto clean_up;
@@ -212,7 +212,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
212 /* Page Pool */ 212 /* Page Pool */
213 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 213 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
214 gr->global_ctx_buffer[PAGEPOOL].mem.size, 214 gr->global_ctx_buffer[PAGEPOOL].mem.size,
215 gmmu_page_size_kernel); 215 GMMU_PAGE_SIZE_KERNEL);
216 if (!gpu_va) 216 if (!gpu_va)
217 goto clean_up; 217 goto clean_up;
218 g_bfr_va[PAGEPOOL_VA] = gpu_va; 218 g_bfr_va[PAGEPOOL_VA] = gpu_va;
@@ -221,7 +221,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
221 /* Priv register Access Map */ 221 /* Priv register Access Map */
222 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 222 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
223 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size, 223 gr->global_ctx_buffer[PRIV_ACCESS_MAP].mem.size,
224 gmmu_page_size_kernel); 224 GMMU_PAGE_SIZE_KERNEL);
225 if (!gpu_va) 225 if (!gpu_va)
226 goto clean_up; 226 goto clean_up;
227 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va; 227 g_bfr_va[PRIV_ACCESS_MAP_VA] = gpu_va;
@@ -232,7 +232,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
232#ifdef CONFIG_GK20A_CTXSW_TRACE 232#ifdef CONFIG_GK20A_CTXSW_TRACE
233 gpu_va = __nvgpu_vm_alloc_va(ch_vm, 233 gpu_va = __nvgpu_vm_alloc_va(ch_vm,
234 gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem.size, 234 gr->global_ctx_buffer[FECS_TRACE_BUFFER].mem.size,
235 gmmu_page_size_kernel); 235 GMMU_PAGE_SIZE_KERNEL);
236 236
237 if (!gpu_va) 237 if (!gpu_va)
238 goto clean_up; 238 goto clean_up;
@@ -262,7 +262,7 @@ static int vgpu_gr_map_global_ctx_buffers(struct gk20a *g,
262 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 262 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
263 if (g_bfr_va[i]) { 263 if (g_bfr_va[i]) {
264 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], 264 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
265 gmmu_page_size_kernel); 265 GMMU_PAGE_SIZE_KERNEL);
266 g_bfr_va[i] = 0; 266 g_bfr_va[i] = 0;
267 } 267 }
268 } 268 }
@@ -285,7 +285,7 @@ static void vgpu_gr_unmap_global_ctx_buffers(struct tsg_gk20a *tsg)
285 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) { 285 for (i = 0; i < NR_GLOBAL_CTX_BUF_VA; i++) {
286 if (g_bfr_va[i]) { 286 if (g_bfr_va[i]) {
287 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i], 287 __nvgpu_vm_free_va(ch_vm, g_bfr_va[i],
288 gmmu_page_size_kernel); 288 GMMU_PAGE_SIZE_KERNEL);
289 g_bfr_va[i] = 0; 289 g_bfr_va[i] = 0;
290 g_bfr_size[i] = 0; 290 g_bfr_size[i] = 0;
291 } 291 }
@@ -317,7 +317,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
317 317
318 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm, 318 gr_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(vm,
319 gr->ctx_vars.buffer_total_size, 319 gr->ctx_vars.buffer_total_size,
320 gmmu_page_size_kernel); 320 GMMU_PAGE_SIZE_KERNEL);
321 321
322 if (!gr_ctx->mem.gpu_va) 322 if (!gr_ctx->mem.gpu_va)
323 return -ENOMEM; 323 return -ENOMEM;
@@ -336,7 +336,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
336 if (unlikely(err)) { 336 if (unlikely(err)) {
337 nvgpu_err(g, "fail to alloc gr_ctx"); 337 nvgpu_err(g, "fail to alloc gr_ctx");
338 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, 338 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
339 gmmu_page_size_kernel); 339 GMMU_PAGE_SIZE_KERNEL);
340 gr_ctx->mem.aperture = APERTURE_INVALID; 340 gr_ctx->mem.aperture = APERTURE_INVALID;
341 } else { 341 } else {
342 gr_ctx->virt_ctx = p->gr_ctx_handle; 342 gr_ctx->virt_ctx = p->gr_ctx_handle;
@@ -365,7 +365,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
365 patch_ctx->mem.size = 128 * sizeof(u32); 365 patch_ctx->mem.size = 128 * sizeof(u32);
366 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm, 366 patch_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch_vm,
367 patch_ctx->mem.size, 367 patch_ctx->mem.size,
368 gmmu_page_size_kernel); 368 GMMU_PAGE_SIZE_KERNEL);
369 if (!patch_ctx->mem.gpu_va) 369 if (!patch_ctx->mem.gpu_va)
370 return -ENOMEM; 370 return -ENOMEM;
371 371
@@ -376,7 +376,7 @@ static int vgpu_gr_alloc_channel_patch_ctx(struct gk20a *g,
376 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); 376 err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg));
377 if (err || msg.ret) { 377 if (err || msg.ret) {
378 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va, 378 __nvgpu_vm_free_va(ch_vm, patch_ctx->mem.gpu_va,
379 gmmu_page_size_kernel); 379 GMMU_PAGE_SIZE_KERNEL);
380 err = -ENOMEM; 380 err = -ENOMEM;
381 } 381 }
382 382
@@ -394,7 +394,7 @@ static void vgpu_gr_free_channel_patch_ctx(struct tsg_gk20a *tsg)
394 /* server will free on channel close */ 394 /* server will free on channel close */
395 395
396 __nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va, 396 __nvgpu_vm_free_va(tsg->vm, patch_ctx->mem.gpu_va,
397 gmmu_page_size_kernel); 397 GMMU_PAGE_SIZE_KERNEL);
398 patch_ctx->mem.gpu_va = 0; 398 patch_ctx->mem.gpu_va = 0;
399 } 399 }
400} 400}
@@ -414,7 +414,7 @@ static void vgpu_gr_free_channel_pm_ctx(struct tsg_gk20a *tsg)
414 /* server will free on channel close */ 414 /* server will free on channel close */
415 415
416 __nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va, 416 __nvgpu_vm_free_va(tsg->vm, pm_ctx->mem.gpu_va,
417 gmmu_page_size_kernel); 417 GMMU_PAGE_SIZE_KERNEL);
418 pm_ctx->mem.gpu_va = 0; 418 pm_ctx->mem.gpu_va = 0;
419} 419}
420 420
@@ -437,7 +437,7 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g,
437 WARN_ON(err || msg.ret); 437 WARN_ON(err || msg.ret);
438 438
439 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va, 439 __nvgpu_vm_free_va(vm, gr_ctx->mem.gpu_va,
440 gmmu_page_size_kernel); 440 GMMU_PAGE_SIZE_KERNEL);
441 441
442 tsg = &g->fifo.tsg[gr_ctx->tsgid]; 442 tsg = &g->fifo.tsg[gr_ctx->tsgid];
443 vgpu_gr_unmap_global_ctx_buffers(tsg); 443 vgpu_gr_unmap_global_ctx_buffers(tsg);
@@ -1120,7 +1120,7 @@ int vgpu_gr_update_hwpm_ctxsw_mode(struct gk20a *g,
1120 if (pm_ctx->mem.gpu_va == 0) { 1120 if (pm_ctx->mem.gpu_va == 0) {
1121 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm, 1121 pm_ctx->mem.gpu_va = __nvgpu_vm_alloc_va(ch->vm,
1122 g->gr.ctx_vars.pm_ctxsw_image_size, 1122 g->gr.ctx_vars.pm_ctxsw_image_size,
1123 gmmu_page_size_kernel); 1123 GMMU_PAGE_SIZE_KERNEL);
1124 1124
1125 if (!pm_ctx->mem.gpu_va) 1125 if (!pm_ctx->mem.gpu_va)
1126 return -ENOMEM; 1126 return -ENOMEM;