summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-02 05:47:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-10 01:28:15 -0400
commit6b26d233499f9d447f06e8e72c72ed6728762e37 (patch)
treed983b078e372165b44e51d119e9b4b61ac9bbc1c /drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
parent9c13b30a465ed94f1e3547dc439462c3ea496eb8 (diff)
gpu: nvgpu: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all loop bodies must be enclosed in braces including single statement loop bodies. This patch fix the MISRA violations due to single statement loop bodies without braces by adding them. JIRA NVGPU-989 Change-Id: If79f56f92b94d0114477b66a6f654ac16ee8ea27 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1791194 Reviewed-by: Adeel Raza <araza@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Alex Waterman <alexw@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/fifo_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fifo_gv11b.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
index d1bd7111..56012dd7 100644
--- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c
@@ -808,14 +808,16 @@ int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id,
808 runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask; 808 runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask;
809 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask; 809 runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask;
810 810
811 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) 811 for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) {
812 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id); 812 ret |= gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id);
813 }
813 814
814 f->runlist_info[runlist_id].reset_eng_bitmask = 0; 815 f->runlist_info[runlist_id].reset_eng_bitmask = 0;
815 816
816 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) 817 for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) {
817 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id, 818 ret |= gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id,
818 &f->runlist_info[runlist_id].reset_eng_bitmask); 819 &f->runlist_info[runlist_id].reset_eng_bitmask);
820 }
819 return ret; 821 return ret;
820} 822}
821 823
@@ -1028,9 +1030,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1028 u32 num_runlists = 0; 1030 u32 num_runlists = 0;
1029 1031
1030 nvgpu_log_fn(g, "acquire runlist_lock for all runlists"); 1032 nvgpu_log_fn(g, "acquire runlist_lock for all runlists");
1031 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) 1033 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
1032 nvgpu_mutex_acquire(&f->runlist_info[rlid]. 1034 nvgpu_mutex_acquire(&f->runlist_info[rlid].
1033 runlist_lock); 1035 runlist_lock);
1036 }
1034 1037
1035 /* get runlist id and tsg */ 1038 /* get runlist id and tsg */
1036 if (id_type == ID_TYPE_TSG) { 1039 if (id_type == ID_TYPE_TSG) {
@@ -1206,9 +1209,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask,
1206 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); 1209 nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock);
1207 } else { 1210 } else {
1208 nvgpu_log_fn(g, "release runlist_lock for all runlists"); 1211 nvgpu_log_fn(g, "release runlist_lock for all runlists");
1209 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) 1212 for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) {
1210 nvgpu_mutex_release(&f->runlist_info[rlid]. 1213 nvgpu_mutex_release(&f->runlist_info[rlid].
1211 runlist_lock); 1214 runlist_lock);
1215 }
1212 } 1216 }
1213} 1217}
1214 1218
@@ -1756,9 +1760,10 @@ void gv11b_fifo_init_eng_method_buffers(struct gk20a *g,
1756 break; 1760 break;
1757 } 1761 }
1758 if (err) { 1762 if (err) {
1759 for (i = (runque - 1); i >= 0; i--) 1763 for (i = (runque - 1); i >= 0; i--) {
1760 nvgpu_dma_unmap_free(vm, 1764 nvgpu_dma_unmap_free(vm,
1761 &tsg->eng_method_buffers[i]); 1765 &tsg->eng_method_buffers[i]);
1766 }
1762 1767
1763 nvgpu_kfree(g, tsg->eng_method_buffers); 1768 nvgpu_kfree(g, tsg->eng_method_buffers);
1764 tsg->eng_method_buffers = NULL; 1769 tsg->eng_method_buffers = NULL;
@@ -1778,8 +1783,9 @@ void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g,
1778 if (tsg->eng_method_buffers == NULL) 1783 if (tsg->eng_method_buffers == NULL)
1779 return; 1784 return;
1780 1785
1781 for (runque = 0; runque < g->fifo.num_pbdma; runque++) 1786 for (runque = 0; runque < g->fifo.num_pbdma; runque++) {
1782 nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]); 1787 nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]);
1788 }
1783 1789
1784 nvgpu_kfree(g, tsg->eng_method_buffers); 1790 nvgpu_kfree(g, tsg->eng_method_buffers);
1785 tsg->eng_method_buffers = NULL; 1791 tsg->eng_method_buffers = NULL;