summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-30 01:07:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 07:35:54 -0400
commit0f97bd4d44c8bcedf298f725fe0b6cfc70fa81ff (patch)
tree469b4746ebedb5843c631c547f102f72f5850ffa /drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
parent97aa9f705a84186ef0f7f31487988cfd5a8a94e8 (diff)
gpu: nvgpu: gk20a: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces by introducing the braces. JIRA NVGPU-671 Change-Id: Icdeede22dd26fd70fae92aa791d35b115ef49e32 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1797691 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/tsg_gk20a.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
index 506d4330..6dc2e282 100644
--- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c
@@ -50,16 +50,18 @@ int gk20a_enable_tsg(struct tsg_gk20a *tsg)
50 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); 50 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
51 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); 51 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
52 52
53 if (is_next || is_ctx_reload) 53 if (is_next || is_ctx_reload) {
54 g->ops.fifo.enable_channel(ch); 54 g->ops.fifo.enable_channel(ch);
55 }
55 } 56 }
56 57
57 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { 58 nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) {
58 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); 59 is_next = gk20a_fifo_channel_status_is_next(g, ch->chid);
59 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); 60 is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid);
60 61
61 if (is_next || is_ctx_reload) 62 if (is_next || is_ctx_reload) {
62 continue; 63 continue;
64 }
63 65
64 g->ops.fifo.enable_channel(ch); 66 g->ops.fifo.enable_channel(ch);
65 } 67 }
@@ -92,8 +94,9 @@ static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch)
92 94
93 for (i = 0; i < f->max_runlists; ++i) { 95 for (i = 0; i < f->max_runlists; ++i) {
94 runlist = &f->runlist_info[i]; 96 runlist = &f->runlist_info[i];
95 if (test_bit(ch->chid, runlist->active_channels)) 97 if (test_bit(ch->chid, runlist->active_channels)) {
96 return true; 98 return true;
99 }
97 } 100 }
98 101
99 return false; 102 return false;
@@ -124,9 +127,9 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg,
124 ch->tsgid = tsg->tsgid; 127 ch->tsgid = tsg->tsgid;
125 128
126 /* all the channel part of TSG should need to be same runlist_id */ 129 /* all the channel part of TSG should need to be same runlist_id */
127 if (tsg->runlist_id == FIFO_INVAL_TSG_ID) 130 if (tsg->runlist_id == FIFO_INVAL_TSG_ID) {
128 tsg->runlist_id = ch->runlist_id; 131 tsg->runlist_id = ch->runlist_id;
129 else if (tsg->runlist_id != ch->runlist_id) { 132 } else if (tsg->runlist_id != ch->runlist_id) {
130 nvgpu_err(tsg->g, 133 nvgpu_err(tsg->g,
131 "Error: TSG channel should be share same runlist ch[%d] tsg[%d]", 134 "Error: TSG channel should be share same runlist ch[%d] tsg[%d]",
132 ch->runlist_id, tsg->runlist_id); 135 ch->runlist_id, tsg->runlist_id);
@@ -180,8 +183,9 @@ int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid)
180 struct tsg_gk20a *tsg = NULL; 183 struct tsg_gk20a *tsg = NULL;
181 int err; 184 int err;
182 185
183 if (tsgid >= g->fifo.num_channels) 186 if (tsgid >= g->fifo.num_channels) {
184 return -EINVAL; 187 return -EINVAL;
188 }
185 189
186 tsg = &g->fifo.tsg[tsgid]; 190 tsg = &g->fifo.tsg[tsgid];
187 191
@@ -214,8 +218,9 @@ int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level)
214 case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH: 218 case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH:
215 ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid, 219 ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid,
216 0, level); 220 0, level);
217 if (!ret) 221 if (!ret) {
218 tsg->interleave_level = level; 222 tsg->interleave_level = level;
223 }
219 break; 224 break;
220 default: 225 default:
221 ret = -EINVAL; 226 ret = -EINVAL;
@@ -238,8 +243,9 @@ u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg)
238{ 243{
239 struct gk20a *g = tsg->g; 244 struct gk20a *g = tsg->g;
240 245
241 if (!tsg->timeslice_us) 246 if (!tsg->timeslice_us) {
242 return g->ops.fifo.default_timeslice_us(g); 247 return g->ops.fifo.default_timeslice_us(g);
248 }
243 249
244 return tsg->timeslice_us; 250 return tsg->timeslice_us;
245} 251}
@@ -306,8 +312,9 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid)
306 tsg->tgid = pid; 312 tsg->tgid = pid;
307 tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; 313 tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE;
308 314
309 if (g->ops.fifo.init_eng_method_buffers) 315 if (g->ops.fifo.init_eng_method_buffers) {
310 g->ops.fifo.init_eng_method_buffers(g, tsg); 316 g->ops.fifo.init_eng_method_buffers(g, tsg);
317 }
311 318
312 if (g->ops.fifo.tsg_open) { 319 if (g->ops.fifo.tsg_open) {
313 err = g->ops.fifo.tsg_open(tsg); 320 err = g->ops.fifo.tsg_open(tsg);