summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-02 04:45:54 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-06 20:36:39 -0400
commit17aeea4a2ffa23fc9dbcdc84cda747fe5a025131 (patch)
treed4be52f246724fb9cb99047059073b93aeb089ce /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent6c9daf7626567fffc9d1ccd475865e81ae90a973 (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
This fixes errors due to single statement loop bodies without braces, which is part of Rule 15.6 of MISRA. This patch covers in gpu/nvgpu/gk20a/ JIRA NVGPU-989 Change-Id: I2f422e9bc2b03229f4d2c3198613169ce5e7f3ee Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1791019 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 93ef211e..23e22c21 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -907,8 +907,9 @@ int gk20a_init_fifo_setup_sw_common(struct gk20a *g)
907 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32))); 907 memset(f->active_engines_list, 0xff, (f->max_engines * sizeof(u32)));
908 908
909 /* pbdma map needs to be in place before calling engine info init */ 909 /* pbdma map needs to be in place before calling engine info init */
910 for (i = 0; i < f->num_pbdma; ++i) 910 for (i = 0; i < f->num_pbdma; ++i) {
911 f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i)); 911 f->pbdma_map[i] = gk20a_readl(g, fifo_pbdma_map_r(i));
912 }
912 913
913 g->ops.fifo.init_engine_info(f); 914 g->ops.fifo.init_engine_info(f);
914 915
@@ -2496,9 +2497,10 @@ unsigned int gk20a_fifo_handle_pbdma_intr_0(struct gk20a *g, u32 pbdma_id,
2496 f->intr.pbdma.restartable_0) & pbdma_intr_0) { 2497 f->intr.pbdma.restartable_0) & pbdma_intr_0) {
2497 2498
2498 pbdma_intr_err = (unsigned long)pbdma_intr_0; 2499 pbdma_intr_err = (unsigned long)pbdma_intr_0;
2499 for_each_set_bit(bit, &pbdma_intr_err, 32) 2500 for_each_set_bit(bit, &pbdma_intr_err, 32) {
2500 nvgpu_err(g, "PBDMA intr %s Error", 2501 nvgpu_err(g, "PBDMA intr %s Error",
2501 pbdma_intr_fault_type_desc[bit]); 2502 pbdma_intr_fault_type_desc[bit]);
2503 }
2502 2504
2503 nvgpu_err(g, 2505 nvgpu_err(g,
2504 "pbdma_intr_0(%d):0x%08x PBH: %08x " 2506 "pbdma_intr_0(%d):0x%08x PBH: %08x "
@@ -2851,8 +2853,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2851 return 0; 2853 return 0;
2852 2854
2853 /* we have no idea which runlist we are using. lock all */ 2855 /* we have no idea which runlist we are using. lock all */
2854 for (i = 0; i < g->fifo.max_runlists; i++) 2856 for (i = 0; i < g->fifo.max_runlists; i++) {
2855 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); 2857 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
2858 }
2856 2859
2857 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2860 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2858 2861
@@ -2861,8 +2864,9 @@ int gk20a_fifo_preempt_channel(struct gk20a *g, u32 chid)
2861 if (!mutex_ret) 2864 if (!mutex_ret)
2862 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2865 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2863 2866
2864 for (i = 0; i < g->fifo.max_runlists; i++) 2867 for (i = 0; i < g->fifo.max_runlists; i++) {
2865 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); 2868 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
2869 }
2866 2870
2867 if (ret) { 2871 if (ret) {
2868 if (nvgpu_platform_is_silicon(g)) { 2872 if (nvgpu_platform_is_silicon(g)) {
@@ -2891,8 +2895,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2891 return 0; 2895 return 0;
2892 2896
2893 /* we have no idea which runlist we are using. lock all */ 2897 /* we have no idea which runlist we are using. lock all */
2894 for (i = 0; i < g->fifo.max_runlists; i++) 2898 for (i = 0; i < g->fifo.max_runlists; i++) {
2895 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock); 2899 nvgpu_mutex_acquire(&f->runlist_info[i].runlist_lock);
2900 }
2896 2901
2897 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2902 mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2898 2903
@@ -2901,8 +2906,9 @@ int gk20a_fifo_preempt_tsg(struct gk20a *g, u32 tsgid)
2901 if (!mutex_ret) 2906 if (!mutex_ret)
2902 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); 2907 nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token);
2903 2908
2904 for (i = 0; i < g->fifo.max_runlists; i++) 2909 for (i = 0; i < g->fifo.max_runlists; i++) {
2905 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock); 2910 nvgpu_mutex_release(&f->runlist_info[i].runlist_lock);
2911 }
2906 2912
2907 if (ret) { 2913 if (ret) {
2908 if (nvgpu_platform_is_silicon(g)) { 2914 if (nvgpu_platform_is_silicon(g)) {