summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-02 05:47:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-10 01:28:15 -0400
commit6b26d233499f9d447f06e8e72c72ed6728762e37 (patch)
treed983b078e372165b44e51d119e9b4b61ac9bbc1c
parent9c13b30a465ed94f1e3547dc439462c3ea496eb8 (diff)
gpu: nvgpu: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all loop bodies must be enclosed in braces including single statement loop bodies. This patch fix the MISRA violations due to single statement loop bodies without braces by adding them. JIRA NVGPU-989 Change-Id: If79f56f92b94d0114477b66a6f654ac16ee8ea27 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1791194 Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/boardobj/boardobjgrp.h5
-rw-r--r--drivers/gpu/nvgpu/boardobj/boardobjgrpmask.c29
-rw-r--r--drivers/gpu/nvgpu/gp106/bios_gp106.c3
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c3
-rw-r--r--drivers/gpu/nvgpu/gp10b/mm_gp10b.c3
-rw-r--r--drivers/gpu/nvgpu/gv100/gr_gv100.c6
-rw-r--r--drivers/gpu/nvgpu/gv11b/acr_gv11b.c3
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c18
-rw-r--r--drivers/gpu/nvgpu/gv11b/gr_gv11b.c9
-rw-r--r--drivers/gpu/nvgpu/perf/vfe_equ.c6
10 files changed, 56 insertions, 29 deletions
diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrp.h b/drivers/gpu/nvgpu/boardobj/boardobjgrp.h
index 912c2c98..3c28963c 100644
--- a/drivers/gpu/nvgpu/boardobj/boardobjgrp.h
+++ b/drivers/gpu/nvgpu/boardobj/boardobjgrp.h
@@ -376,8 +376,9 @@ void boardobjgrpe32hdrset(struct nv_pmu_boardobjgrp *hdr, u32 objmask);
376#define HIGHESTBITIDX_32(n32) \ 376#define HIGHESTBITIDX_32(n32) \
377{ \ 377{ \
378 u32 count = 0; \ 378 u32 count = 0; \
379 while (n32 >>= 1) \ 379 while (n32 >>= 1) { \
380 count++; \ 380 count++; \
381 } \
381 n32 = count; \ 382 n32 = count; \
382} 383}
383 384
diff --git a/drivers/gpu/nvgpu/boardobj/boardobjgrpmask.c b/drivers/gpu/nvgpu/boardobj/boardobjgrpmask.c
index 93befc99..849abe16 100644
--- a/drivers/gpu/nvgpu/boardobj/boardobjgrpmask.c
+++ b/drivers/gpu/nvgpu/boardobj/boardobjgrpmask.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -63,8 +63,9 @@ u32 boardobjgrpmask_import(struct boardobjgrpmask *mask, u8 bitsize,
63 if (mask->bitcount != bitsize) 63 if (mask->bitcount != bitsize)
64 return -EINVAL; 64 return -EINVAL;
65 65
66 for (index = 0; index < mask->maskdatacount; index++) 66 for (index = 0; index < mask->maskdatacount; index++) {
67 mask->data[index] = extmask->data[index]; 67 mask->data[index] = extmask->data[index];
68 }
68 69
69 BOARDOBJGRPMASK_NORMALIZE(mask); 70 BOARDOBJGRPMASK_NORMALIZE(mask);
70 71
@@ -83,8 +84,9 @@ u32 boardobjgrpmask_export(struct boardobjgrpmask *mask, u8 bitsize,
83 if (mask->bitcount != bitsize) 84 if (mask->bitcount != bitsize)
84 return -EINVAL; 85 return -EINVAL;
85 86
86 for (index = 0; index < mask->maskdatacount; index++) 87 for (index = 0; index < mask->maskdatacount; index++) {
87 extmask->data[index] = mask->data[index]; 88 extmask->data[index] = mask->data[index];
89 }
88 90
89 return 0; 91 return 0;
90} 92}
@@ -95,8 +97,9 @@ u32 boardobjgrpmask_clr(struct boardobjgrpmask *mask)
95 97
96 if (mask == NULL) 98 if (mask == NULL)
97 return -EINVAL; 99 return -EINVAL;
98 for (index = 0; index < mask->maskdatacount; index++) 100 for (index = 0; index < mask->maskdatacount; index++) {
99 mask->data[index] = 0; 101 mask->data[index] = 0;
102 }
100 103
101 return 0; 104 return 0;
102} 105}
@@ -107,8 +110,9 @@ u32 boardobjgrpmask_set(struct boardobjgrpmask *mask)
107 110
108 if (mask == NULL) 111 if (mask == NULL)
109 return -EINVAL; 112 return -EINVAL;
110 for (index = 0; index < mask->maskdatacount; index++) 113 for (index = 0; index < mask->maskdatacount; index++) {
111 mask->data[index] = 0xFFFFFFFF; 114 mask->data[index] = 0xFFFFFFFF;
115 }
112 BOARDOBJGRPMASK_NORMALIZE(mask); 116 BOARDOBJGRPMASK_NORMALIZE(mask);
113 return 0; 117 return 0;
114} 118}
@@ -119,8 +123,9 @@ u32 boardobjgrpmask_inv(struct boardobjgrpmask *mask)
119 123
120 if (mask == NULL) 124 if (mask == NULL)
121 return -EINVAL; 125 return -EINVAL;
122 for (index = 0; index < mask->maskdatacount; index++) 126 for (index = 0; index < mask->maskdatacount; index++) {
123 mask->data[index] = ~mask->data[index]; 127 mask->data[index] = ~mask->data[index];
128 }
124 BOARDOBJGRPMASK_NORMALIZE(mask); 129 BOARDOBJGRPMASK_NORMALIZE(mask);
125 return 0; 130 return 0;
126} 131}
@@ -281,8 +286,9 @@ u32 boardobjgrpmask_and(struct boardobjgrpmask *dst,
281 if (!boardobjgrpmask_sizeeq(dst, op2)) 286 if (!boardobjgrpmask_sizeeq(dst, op2))
282 return -EINVAL; 287 return -EINVAL;
283 288
284 for (index = 0; index < dst->maskdatacount; index++) 289 for (index = 0; index < dst->maskdatacount; index++) {
285 dst->data[index] = op1->data[index] & op2->data[index]; 290 dst->data[index] = op1->data[index] & op2->data[index];
291 }
286 292
287 return 0; 293 return 0;
288} 294}
@@ -298,8 +304,9 @@ u32 boardobjgrpmask_or(struct boardobjgrpmask *dst,
298 if (!boardobjgrpmask_sizeeq(dst, op2)) 304 if (!boardobjgrpmask_sizeeq(dst, op2))
299 return -EINVAL; 305 return -EINVAL;
300 306
301 for (index = 0; index < dst->maskdatacount; index++) 307 for (index = 0; index < dst->maskdatacount; index++) {
302 dst->data[index] = op1->data[index] | op2->data[index]; 308 dst->data[index] = op1->data[index] | op2->data[index];
309 }
303 310
304 return 0; 311 return 0;
305} 312}
@@ -315,8 +322,9 @@ u32 boardobjgrpmask_xor(struct boardobjgrpmask *dst,
315 if (!boardobjgrpmask_sizeeq(dst, op2)) 322 if (!boardobjgrpmask_sizeeq(dst, op2))
316 return -EINVAL; 323 return -EINVAL;
317 324
318 for (index = 0; index < dst->maskdatacount; index++) 325 for (index = 0; index < dst->maskdatacount; index++) {
319 dst->data[index] = op1->data[index] ^ op2->data[index]; 326 dst->data[index] = op1->data[index] ^ op2->data[index];
327 }
320 328
321 return 0; 329 return 0;
322} 330}
@@ -329,8 +337,9 @@ u32 boardobjgrpmask_copy(struct boardobjgrpmask *dst,
329 if (!boardobjgrpmask_sizeeq(dst, src)) 337 if (!boardobjgrpmask_sizeeq(dst, src))
330 return -EINVAL; 338 return -EINVAL;
331 339
332 for (index = 0; index < dst->maskdatacount; index++) 340 for (index = 0; index < dst->maskdatacount; index++) {
333 dst->data[index] = src->data[index]; 341 dst->data[index] = src->data[index];
342 }
334 343
335 return 0; 344 return 0;
336} 345}
diff --git a/drivers/gpu/nvgpu/gp106/bios_gp106.c b/drivers/gpu/nvgpu/gp106/bios_gp106.c
index deeae2fe..e470fffc 100644
--- a/drivers/gpu/nvgpu/gp106/bios_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/bios_gp106.c
@@ -70,8 +70,9 @@ static void upload_data(struct gk20a *g, u32 dst, u8 *src, u32 size, u8 port)
70 pwr_falcon_dmemc_blk_f(blk) | 70 pwr_falcon_dmemc_blk_f(blk) |
71 pwr_falcon_dmemc_aincw_f(1)); 71 pwr_falcon_dmemc_aincw_f(1));
72 72
73 for (i = 0; i < words; i++) 73 for (i = 0; i < words; i++) {
74 gk20a_writel(g, pwr_falcon_dmemd_r(port), src_u32[i]); 74 gk20a_writel(g, pwr_falcon_dmemd_r(port), src_u32[i]);
75 }
75} 76}
76 77
77int gp106_bios_devinit(struct gk20a *g) 78int gp106_bios_devinit(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index 7792728c..5a22af80 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -1572,8 +1572,9 @@ int gr_gp10b_load_smid_config(struct gk20a *g)
1572 gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg); 1572 gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg);
1573 } 1573 }
1574 1574
1575 for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++) 1575 for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++) {
1576 gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]); 1576 gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]);
1577 }
1577 1578
1578 nvgpu_kfree(g, tpc_sm_id); 1579 nvgpu_kfree(g, tpc_sm_id);
1579 1580
diff --git a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
index a0e08437..7036ca15 100644
--- a/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/mm_gp10b.c
@@ -286,8 +286,9 @@ static enum gmmu_pgsz_gk20a gp10b_get_pde0_pgsz(struct gk20a *g,
286 if (!pd->mem) 286 if (!pd->mem)
287 return pgsz; 287 return pgsz;
288 288
289 for (i = 0; i < GP10B_PDE0_ENTRY_SIZE >> 2; i++) 289 for (i = 0; i < GP10B_PDE0_ENTRY_SIZE >> 2; i++) {
290 pde_v[i] = nvgpu_mem_rd32(g, pd->mem, pde_offset + i); 290 pde_v[i] = nvgpu_mem_rd32(g, pd->mem, pde_offset + i);
291 }
291 292
292 /* 293 /*
293 * Check if the aperture AND address are set 294 * Check if the aperture AND address are set
diff --git a/drivers/gpu/nvgpu/gv100/gr_gv100.c b/drivers/gpu/nvgpu/gv100/gr_gv100.c
index 97affdd9..13092f2a 100644
--- a/drivers/gpu/nvgpu/gv100/gr_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/gr_gv100.c
@@ -229,9 +229,11 @@ int gr_gv100_init_sm_id_table(struct gk20a *g)
229 goto exit_build_table; 229 goto exit_build_table;
230 } 230 }
231 231
232 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) 232 for (gpc = 0; gpc < g->gr.gpc_count; gpc++) {
233 for (pes = 0; pes < g->gr.gpc_ppc_count[gpc]; pes++) 233 for (pes = 0; pes < g->gr.gpc_ppc_count[gpc]; pes++) {
234 gpc_tpc_mask[gpc] |= g->gr.pes_tpc_mask[pes][gpc]; 234 gpc_tpc_mask[gpc] |= g->gr.pes_tpc_mask[pes][gpc];
235 }
236 }
235 237
236 for (gtpc = 0; gtpc < g->gr.tpc_count; gtpc++) { 238 for (gtpc = 0; gtpc < g->gr.tpc_count; gtpc++) {
237 maxperf = -1; 239 maxperf = -1;
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
index 696eb015..d0928335 100644
--- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c
@@ -144,9 +144,10 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g)
144 goto err_release_acr_fw; 144 goto err_release_acr_fw;
145 } 145 }
146 146
147 for (index = 0; index < 9; index++) 147 for (index = 0; index < 9; index++) {
148 gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n", 148 gv11b_dbg_pmu(g, "acr_ucode_header_t210_load %u\n",
149 acr_ucode_header_t210_load[index]); 149 acr_ucode_header_t210_load[index]);
150 }
150 151
151 acr_dmem = (u64 *) 152 acr_dmem = (u64 *)
152 &(((u8 *)acr_ucode_data_t210_load)[ 153 &(((u8 *)acr_ucode_data_t210_load)[
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index d1bd7111..56012dd7 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -808,14 +808,16 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
808 runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask; 808 runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask;
809 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask; 809 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask;
810 810
811 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) 811 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) {
812 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id); 812 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id);
813 }
813 814
814 f->runlist_info[runlist_id].reset_eng_bitmask = 0; 815 f->runlist_info[runlist_id].reset_eng_bitmask = 0;
815 816
816 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) 817 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) {
817 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id, 818 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id,
818 &f->runlist_info[runlist_id].reset_eng_bitmask); 819 &f->runlist_info[runlist_id].reset_eng_bitmask);
820 }
819 return ret; 821 return ret;
820} 822}
821 823
@@ -1028,9 +1030,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1028 u32 num_runlists = 0; 1030 u32 num_runlists = 0;
1029 1031
1030 nvgpu_log_fn(g, "acquire runlist_lock for all runlists"); 1032 nvgpu_log_fn(g, "acquire runlist_lock for all runlists");
1031 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) 1033 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
1032 nvgpu_mutex_acquire(&f->runlist_info[rlid]. 1034 nvgpu_mutex_acquire(&f->runlist_info[rlid].
1033 runlist_lock); 1035 runlist_lock);
1036 }
1034 1037
1035 /* get runlist id and tsg */ 1038 /* get runlist id and tsg */
1036 if (id_type == ID_TYPE_TSG) { 1039 if (id_type == ID_TYPE_TSG) {
@@ -1206,9 +1209,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1206 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); 1209 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
1207 } else { 1210 } else {
1208 nvgpu_log_fn(g, "release runlist_lock for all runlists"); 1211 nvgpu_log_fn(g, "release runlist_lock for all runlists");
1209 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) 1212 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
1210 nvgpu_mutex_release(&f->runlist_info[rlid]. 1213 nvgpu_mutex_release(&f->runlist_info[rlid].
1211 runlist_lock); 1214 runlist_lock);
1215 }
1212 } 1216 }
1213} 1217}
1214 1218
@@ -1756,9 +1760,10 @@ void gv11b_fifo_init_eng_method_buffers(struct gk20a *g,
1756 break; 1760 break;
1757 } 1761 }
1758 if (err) { 1762 if (err) {
1759 for (i = (runque - 1); i >= 0; i--) 1763 for (i = (runque - 1); i >= 0; i--) {
1760 nvgpu_dma_unmap_free(vm, 1764 nvgpu_dma_unmap_free(vm,
1761 &tsg->eng_method_buffers[i]); 1765 &tsg->eng_method_buffers[i]);
1766 }
1762 1767
1763 nvgpu_kfree(g, tsg->eng_method_buffers); 1768 nvgpu_kfree(g, tsg->eng_method_buffers);
1764 tsg->eng_method_buffers = NULL; 1769 tsg->eng_method_buffers = NULL;
@@ -1778,8 +1783,9 @@ void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g,
1778 if (tsg->eng_method_buffers == NULL) 1783 if (tsg->eng_method_buffers == NULL)
1779 return; 1784 return;
1780 1785
1781 for (runque = 0; runque < g->fifo.num_pbdma; runque++) 1786 for (runque = 0; runque < g->fifo.num_pbdma; runque++) {
1782 nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]); 1787 nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]);
1788 }
1783 1789
1784 nvgpu_kfree(g, tsg->eng_method_buffers); 1790 nvgpu_kfree(g, tsg->eng_method_buffers);
1785 tsg->eng_method_buffers = NULL; 1791 tsg->eng_method_buffers = NULL;
diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
index 9e36071f..791c0d6f 100644
--- a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c
@@ -2801,8 +2801,9 @@ int gr_gv11b_load_smid_config(struct gk20a *g)
2801 gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg); 2801 gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg);
2802 } 2802 }
2803 2803
2804 for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++) 2804 for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++) {
2805 gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]); 2805 gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]);
2806 }
2806 nvgpu_kfree(g, tpc_sm_id); 2807 nvgpu_kfree(g, tpc_sm_id);
2807 2808
2808 return 0; 2809 return 0;
@@ -4894,11 +4895,12 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
4894 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) 4895 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
4895 for (tpc_num = 0; 4896 for (tpc_num = 0;
4896 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 4897 tpc_num < g->gr.gpc_tpc_count[gpc_num];
4897 tpc_num++) 4898 tpc_num++) {
4898 priv_addr_table[t++] = 4899 priv_addr_table[t++] =
4899 pri_tpc_addr(g, 4900 pri_tpc_addr(g,
4900 pri_tpccs_addr_mask(addr), 4901 pri_tpccs_addr_mask(addr),
4901 gpc_num, tpc_num); 4902 gpc_num, tpc_num);
4903 }
4902 4904
4903 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) { 4905 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) {
4904 err = gr_gk20a_split_ppc_broadcast_addr(g, 4906 err = gr_gk20a_split_ppc_broadcast_addr(g,
@@ -4998,11 +5000,12 @@ int gr_gv11b_create_priv_addr_table(struct gk20a *g,
4998 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC) 5000 if (broadcast_flags & PRI_BROADCAST_FLAGS_TPC)
4999 for (tpc_num = 0; 5001 for (tpc_num = 0;
5000 tpc_num < g->gr.gpc_tpc_count[gpc_num]; 5002 tpc_num < g->gr.gpc_tpc_count[gpc_num];
5001 tpc_num++) 5003 tpc_num++) {
5002 priv_addr_table[t++] = 5004 priv_addr_table[t++] =
5003 pri_tpc_addr(g, 5005 pri_tpc_addr(g,
5004 pri_tpccs_addr_mask(addr), 5006 pri_tpccs_addr_mask(addr),
5005 gpc_num, tpc_num); 5007 gpc_num, tpc_num);
5008 }
5006 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC) 5009 else if (broadcast_flags & PRI_BROADCAST_FLAGS_PPC)
5007 err = gr_gk20a_split_ppc_broadcast_addr(g, 5010 err = gr_gk20a_split_ppc_broadcast_addr(g,
5008 addr, gpc_num, priv_addr_table, &t); 5011 addr, gpc_num, priv_addr_table, &t);
diff --git a/drivers/gpu/nvgpu/perf/vfe_equ.c b/drivers/gpu/nvgpu/perf/vfe_equ.c
index 8321d98d..fa742c35 100644
--- a/drivers/gpu/nvgpu/perf/vfe_equ.c
+++ b/drivers/gpu/nvgpu/perf/vfe_equ.c
@@ -517,8 +517,9 @@ static u32 _vfe_equ_pmudatainit_quadratic(struct gk20a *g,
517 517
518 pset = (struct nv_pmu_vfe_equ_quadratic *) ppmudata; 518 pset = (struct nv_pmu_vfe_equ_quadratic *) ppmudata;
519 519
520 for (i = 0; i < CTRL_PERF_VFE_EQU_QUADRATIC_COEFF_COUNT; i++) 520 for (i = 0; i < CTRL_PERF_VFE_EQU_QUADRATIC_COEFF_COUNT; i++) {
521 pset->coeffs[i] = pvfe_equ_quadratic->coeffs[i]; 521 pset->coeffs[i] = pvfe_equ_quadratic->coeffs[i];
522 }
522 523
523 return status; 524 return status;
524} 525}
@@ -547,8 +548,9 @@ static u32 vfe_equ_construct_quadratic(struct gk20a *g,
547 pvfeequ->super.super.pmudatainit = 548 pvfeequ->super.super.pmudatainit =
548 _vfe_equ_pmudatainit_quadratic; 549 _vfe_equ_pmudatainit_quadratic;
549 550
550 for (i = 0; i < CTRL_PERF_VFE_EQU_QUADRATIC_COEFF_COUNT; i++) 551 for (i = 0; i < CTRL_PERF_VFE_EQU_QUADRATIC_COEFF_COUNT; i++) {
551 pvfeequ->coeffs[i] = ptmpequ->coeffs[i]; 552 pvfeequ->coeffs[i] = ptmpequ->coeffs[i];
553 }
552 554
553 return status; 555 return status;
554} 556}