diff options
author | Srirangan <smadhavan@nvidia.com> | 2018-08-20 06:39:12 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-08-28 09:46:59 -0400 |
commit | 07d3387ceb10cdc4d4413d04b1223cbd5181438b (patch) | |
tree | c86a661e6bff08c43f45fdb2b79be9ba1a6531b1 /drivers | |
parent | 3e5e4804f9c2bf5b914012852b56dbbbc00f8253 (diff) |
gpu: nvgpu: gv11b: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces,
including single statement blocks. Fix errors due to single statement
if blocks without braces, introducing the braces.
JIRA NVGPU-671
Change-Id: I1562bd1b109a100af29bd147ed8b56463b6a8e63
Signed-off-by: Srirangan <smadhavan@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1796674
Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com>
Reviewed-by: Scott Long <scottl@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/acr_gv11b.c | 12 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c | 3 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | 192 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/hal_gv11b.c | 6 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/mm_gv11b.c | 29 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/pmu_gv11b.c | 27 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/subctx_gv11b.c | 3 |
7 files changed, 179 insertions, 93 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c index fdd0f123..a6bbaa40 100644 --- a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c | |||
@@ -187,8 +187,9 @@ int gv11b_bootstrap_hs_flcn(struct gk20a *g) | |||
187 | acr->acr_ucode.gpu_va + | 187 | acr->acr_ucode.gpu_va + |
188 | acr_ucode_header_t210_load[2]); | 188 | acr_ucode_header_t210_load[2]); |
189 | bl_dmem_desc->data_size = acr_ucode_header_t210_load[3]; | 189 | bl_dmem_desc->data_size = acr_ucode_header_t210_load[3]; |
190 | } else | 190 | } else { |
191 | acr->acr_dmem_desc_v1->nonwpr_ucode_blob_size = 0; | 191 | acr->acr_dmem_desc_v1->nonwpr_ucode_blob_size = 0; |
192 | } | ||
192 | status = pmu_exec_gen_bl(g, bl_dmem_desc, 1); | 193 | status = pmu_exec_gen_bl(g, bl_dmem_desc, 1); |
193 | if (status != 0) { | 194 | if (status != 0) { |
194 | err = status; | 195 | err = status; |
@@ -277,10 +278,12 @@ int gv11b_init_pmu_setup_hw1(struct gk20a *g, | |||
277 | pmu->isr_enabled = true; | 278 | pmu->isr_enabled = true; |
278 | nvgpu_mutex_release(&pmu->isr_mutex); | 279 | nvgpu_mutex_release(&pmu->isr_mutex); |
279 | 280 | ||
280 | if (g->ops.pmu.setup_apertures) | 281 | if (g->ops.pmu.setup_apertures) { |
281 | g->ops.pmu.setup_apertures(g); | 282 | g->ops.pmu.setup_apertures(g); |
282 | if (g->ops.pmu.update_lspmu_cmdline_args) | 283 | } |
284 | if (g->ops.pmu.update_lspmu_cmdline_args) { | ||
283 | g->ops.pmu.update_lspmu_cmdline_args(g); | 285 | g->ops.pmu.update_lspmu_cmdline_args(g); |
286 | } | ||
284 | 287 | ||
285 | /*disable irqs for hs falcon booting as we will poll for halt*/ | 288 | /*disable irqs for hs falcon booting as we will poll for halt*/ |
286 | nvgpu_mutex_acquire(&pmu->isr_mutex); | 289 | nvgpu_mutex_acquire(&pmu->isr_mutex); |
@@ -290,7 +293,8 @@ int gv11b_init_pmu_setup_hw1(struct gk20a *g, | |||
290 | /*Clearing mailbox register used to reflect capabilities*/ | 293 | /*Clearing mailbox register used to reflect capabilities*/ |
291 | gk20a_writel(g, pwr_falcon_mailbox1_r(), 0); | 294 | gk20a_writel(g, pwr_falcon_mailbox1_r(), 0); |
292 | err = bl_bootstrap(pmu, desc, bl_sz); | 295 | err = bl_bootstrap(pmu, desc, bl_sz); |
293 | if (err) | 296 | if (err) { |
294 | return err; | 297 | return err; |
298 | } | ||
295 | return 0; | 299 | return 0; |
296 | } | 300 | } |
diff --git a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c index 94d75b20..85db7aea 100644 --- a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c | |||
@@ -68,8 +68,9 @@ int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size) | |||
68 | } | 68 | } |
69 | 69 | ||
70 | err = gk20a_alloc_inst_block(g, &mm->perfbuf.inst_block); | 70 | err = gk20a_alloc_inst_block(g, &mm->perfbuf.inst_block); |
71 | if (err) | 71 | if (err) { |
72 | return err; | 72 | return err; |
73 | } | ||
73 | 74 | ||
74 | g->ops.mm.init_inst_block(&mm->perfbuf.inst_block, mm->perfbuf.vm, 0); | 75 | g->ops.mm.init_inst_block(&mm->perfbuf.inst_block, mm->perfbuf.vm, 0); |
75 | 76 | ||
diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c index 0e0c1e36..893835a4 100644 --- a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c | |||
@@ -66,16 +66,17 @@ void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) | |||
66 | struct gk20a *g = tsg->g; | 66 | struct gk20a *g = tsg->g; |
67 | u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); | 67 | u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); |
68 | 68 | ||
69 | if (tsg->timeslice_timeout) | 69 | if (tsg->timeslice_timeout) { |
70 | runlist_entry_0 |= | 70 | runlist_entry_0 |= |
71 | ram_rl_entry_tsg_timeslice_scale_f(tsg->timeslice_scale) | | 71 | ram_rl_entry_tsg_timeslice_scale_f(tsg->timeslice_scale) | |
72 | ram_rl_entry_tsg_timeslice_timeout_f(tsg->timeslice_timeout); | 72 | ram_rl_entry_tsg_timeslice_timeout_f(tsg->timeslice_timeout); |
73 | else | 73 | } else { |
74 | runlist_entry_0 |= | 74 | runlist_entry_0 |= |
75 | ram_rl_entry_tsg_timeslice_scale_f( | 75 | ram_rl_entry_tsg_timeslice_scale_f( |
76 | ram_rl_entry_tsg_timeslice_scale_3_v()) | | 76 | ram_rl_entry_tsg_timeslice_scale_3_v()) | |
77 | ram_rl_entry_tsg_timeslice_timeout_f( | 77 | ram_rl_entry_tsg_timeslice_timeout_f( |
78 | ram_rl_entry_tsg_timeslice_timeout_128_v()); | 78 | ram_rl_entry_tsg_timeslice_timeout_128_v()); |
79 | } | ||
79 | 80 | ||
80 | runlist[0] = runlist_entry_0; | 81 | runlist[0] = runlist_entry_0; |
81 | runlist[1] = ram_rl_entry_tsg_length_f(tsg->num_active_channels); | 82 | runlist[1] = ram_rl_entry_tsg_length_f(tsg->num_active_channels); |
@@ -301,11 +302,13 @@ void gv11b_dump_channel_status_ramfc(struct gk20a *g, | |||
301 | struct channel_gk20a *c = g->fifo.channel + chid; | 302 | struct channel_gk20a *c = g->fifo.channel + chid; |
302 | struct nvgpu_semaphore_int *hw_sema = NULL; | 303 | struct nvgpu_semaphore_int *hw_sema = NULL; |
303 | 304 | ||
304 | if (c->hw_sema) | 305 | if (c->hw_sema) { |
305 | hw_sema = c->hw_sema; | 306 | hw_sema = c->hw_sema; |
307 | } | ||
306 | 308 | ||
307 | if (!ch_state) | 309 | if (!ch_state) { |
308 | return; | 310 | return; |
311 | } | ||
309 | 312 | ||
310 | inst_mem = &ch_state->inst_block[0]; | 313 | inst_mem = &ch_state->inst_block[0]; |
311 | 314 | ||
@@ -335,11 +338,12 @@ void gv11b_dump_channel_status_ramfc(struct gk20a *g, | |||
335 | inst_mem[ram_fc_sem_addr_lo_w()], | 338 | inst_mem[ram_fc_sem_addr_lo_w()], |
336 | inst_mem[ram_fc_sem_payload_lo_w()], | 339 | inst_mem[ram_fc_sem_payload_lo_w()], |
337 | inst_mem[ram_fc_sem_execute_w()]); | 340 | inst_mem[ram_fc_sem_execute_w()]); |
338 | if (hw_sema) | 341 | if (hw_sema) { |
339 | gk20a_debug_output(o, "SEMA STATE: value: 0x%08x next_val: 0x%08x addr: 0x%010llx\n", | 342 | gk20a_debug_output(o, "SEMA STATE: value: 0x%08x next_val: 0x%08x addr: 0x%010llx\n", |
340 | __nvgpu_semaphore_read(hw_sema), | 343 | __nvgpu_semaphore_read(hw_sema), |
341 | nvgpu_atomic_read(&hw_sema->next_value), | 344 | nvgpu_atomic_read(&hw_sema->next_value), |
342 | nvgpu_hw_sema_addr(hw_sema)); | 345 | nvgpu_hw_sema_addr(hw_sema)); |
346 | } | ||
343 | gk20a_debug_output(o, "\n"); | 347 | gk20a_debug_output(o, "\n"); |
344 | } | 348 | } |
345 | 349 | ||
@@ -365,12 +369,15 @@ void gv11b_dump_eng_status(struct gk20a *g, | |||
365 | "tsg" : "channel", | 369 | "tsg" : "channel", |
366 | gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status)); | 370 | gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status)); |
367 | 371 | ||
368 | if (fifo_engine_status_eng_reload_v(status)) | 372 | if (fifo_engine_status_eng_reload_v(status)) { |
369 | gk20a_debug_output(o, "ctx_reload "); | 373 | gk20a_debug_output(o, "ctx_reload "); |
370 | if (fifo_engine_status_faulted_v(status)) | 374 | } |
375 | if (fifo_engine_status_faulted_v(status)) { | ||
371 | gk20a_debug_output(o, "faulted "); | 376 | gk20a_debug_output(o, "faulted "); |
372 | if (fifo_engine_status_engine_v(status)) | 377 | } |
378 | if (fifo_engine_status_engine_v(status)) { | ||
373 | gk20a_debug_output(o, "busy "); | 379 | gk20a_debug_output(o, "busy "); |
380 | } | ||
374 | gk20a_debug_output(o, "\n"); | 381 | gk20a_debug_output(o, "\n"); |
375 | } | 382 | } |
376 | gk20a_debug_output(o, "\n"); | 383 | gk20a_debug_output(o, "\n"); |
@@ -477,9 +484,10 @@ static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id, | |||
477 | delay << 1, GR_IDLE_CHECK_MAX); | 484 | delay << 1, GR_IDLE_CHECK_MAX); |
478 | } while (!nvgpu_timeout_expired(&timeout)); | 485 | } while (!nvgpu_timeout_expired(&timeout)); |
479 | 486 | ||
480 | if (ret) | 487 | if (ret) { |
481 | nvgpu_err(g, "preempt timeout pbdma: %u pbdma_stat: %u " | 488 | nvgpu_err(g, "preempt timeout pbdma: %u pbdma_stat: %u " |
482 | "tsgid: %u", pbdma_id, pbdma_stat, id); | 489 | "tsgid: %u", pbdma_id, pbdma_stat, id); |
490 | } | ||
483 | return ret; | 491 | return ret; |
484 | } | 492 | } |
485 | 493 | ||
@@ -663,16 +671,19 @@ void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g, | |||
663 | struct tsg_gk20a *tsg, | 671 | struct tsg_gk20a *tsg, |
664 | u32 faulted_pbdma, u32 faulted_engine) | 672 | u32 faulted_pbdma, u32 faulted_engine) |
665 | { | 673 | { |
666 | if (!tsg) | 674 | if (!tsg) { |
667 | return; | 675 | return; |
676 | } | ||
668 | 677 | ||
669 | nvgpu_log(g, gpu_dbg_intr, "reset faulted pbdma:0x%x eng:0x%x", | 678 | nvgpu_log(g, gpu_dbg_intr, "reset faulted pbdma:0x%x eng:0x%x", |
670 | faulted_pbdma, faulted_engine); | 679 | faulted_pbdma, faulted_engine); |
671 | 680 | ||
672 | if (faulted_pbdma != FIFO_INVAL_PBDMA_ID) | 681 | if (faulted_pbdma != FIFO_INVAL_PBDMA_ID) { |
673 | gv11b_reset_pbdma_faulted_tsg(tsg); | 682 | gv11b_reset_pbdma_faulted_tsg(tsg); |
674 | if (faulted_engine != FIFO_INVAL_ENGINE_ID) | 683 | } |
684 | if (faulted_engine != FIFO_INVAL_ENGINE_ID) { | ||
675 | gv11b_reset_eng_faulted_tsg(tsg); | 685 | gv11b_reset_eng_faulted_tsg(tsg); |
686 | } | ||
676 | } | 687 | } |
677 | 688 | ||
678 | static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, | 689 | static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, |
@@ -685,29 +696,33 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, | |||
685 | u32 rlid, pbdma_bitmask = 0; | 696 | u32 rlid, pbdma_bitmask = 0; |
686 | 697 | ||
687 | if (id_type != ID_TYPE_UNKNOWN) { | 698 | if (id_type != ID_TYPE_UNKNOWN) { |
688 | if (id_type == ID_TYPE_TSG) | 699 | if (id_type == ID_TYPE_TSG) { |
689 | runlists_mask |= fifo_sched_disable_runlist_m( | 700 | runlists_mask |= fifo_sched_disable_runlist_m( |
690 | f->tsg[id].runlist_id); | 701 | f->tsg[id].runlist_id); |
691 | else | 702 | } else { |
692 | runlists_mask |= fifo_sched_disable_runlist_m( | 703 | runlists_mask |= fifo_sched_disable_runlist_m( |
693 | f->channel[id].runlist_id); | 704 | f->channel[id].runlist_id); |
705 | } | ||
694 | } | 706 | } |
695 | 707 | ||
696 | if (rc_type == RC_TYPE_MMU_FAULT && mmfault) { | 708 | if (rc_type == RC_TYPE_MMU_FAULT && mmfault) { |
697 | if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) | 709 | if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) { |
698 | pbdma_bitmask = BIT(mmfault->faulted_pbdma); | 710 | pbdma_bitmask = BIT(mmfault->faulted_pbdma); |
711 | } | ||
699 | 712 | ||
700 | for (rlid = 0; rlid < f->max_runlists; rlid++) { | 713 | for (rlid = 0; rlid < f->max_runlists; rlid++) { |
701 | 714 | ||
702 | runlist = &f->runlist_info[rlid]; | 715 | runlist = &f->runlist_info[rlid]; |
703 | 716 | ||
704 | if (runlist->eng_bitmask & act_eng_bitmask) | 717 | if (runlist->eng_bitmask & act_eng_bitmask) { |
705 | runlists_mask |= | 718 | runlists_mask |= |
706 | fifo_sched_disable_runlist_m(rlid); | 719 | fifo_sched_disable_runlist_m(rlid); |
720 | } | ||
707 | 721 | ||
708 | if (runlist->pbdma_bitmask & pbdma_bitmask) | 722 | if (runlist->pbdma_bitmask & pbdma_bitmask) { |
709 | runlists_mask |= | 723 | runlists_mask |= |
710 | fifo_sched_disable_runlist_m(rlid); | 724 | fifo_sched_disable_runlist_m(rlid); |
725 | } | ||
711 | } | 726 | } |
712 | } | 727 | } |
713 | 728 | ||
@@ -716,9 +731,10 @@ static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, | |||
716 | if (act_eng_bitmask) { | 731 | if (act_eng_bitmask) { |
717 | /* eng ids are known */ | 732 | /* eng ids are known */ |
718 | runlist = &f->runlist_info[rlid]; | 733 | runlist = &f->runlist_info[rlid]; |
719 | if (runlist->eng_bitmask & act_eng_bitmask) | 734 | if (runlist->eng_bitmask & act_eng_bitmask) { |
720 | runlists_mask |= | 735 | runlists_mask |= |
721 | fifo_sched_disable_runlist_m(rlid); | 736 | fifo_sched_disable_runlist_m(rlid); |
737 | } | ||
722 | } else { | 738 | } else { |
723 | runlists_mask |= | 739 | runlists_mask |= |
724 | fifo_sched_disable_runlist_m(rlid); | 740 | fifo_sched_disable_runlist_m(rlid); |
@@ -778,9 +794,10 @@ static int gv11b_fifo_poll_runlist_preempt_pending(struct gk20a *g, | |||
778 | delay << 1, GR_IDLE_CHECK_MAX); | 794 | delay << 1, GR_IDLE_CHECK_MAX); |
779 | } while (!nvgpu_timeout_expired(&timeout)); | 795 | } while (!nvgpu_timeout_expired(&timeout)); |
780 | 796 | ||
781 | if (ret) | 797 | if (ret) { |
782 | nvgpu_err(g, "preempt runlist timeout, runlists_mask:0x%08x", | 798 | nvgpu_err(g, "preempt runlist timeout, runlists_mask:0x%08x", |
783 | runlists_mask); | 799 | runlists_mask); |
800 | } | ||
784 | return ret; | 801 | return ret; |
785 | } | 802 | } |
786 | 803 | ||
@@ -827,8 +844,9 @@ int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid) | |||
827 | struct fifo_gk20a *f = &g->fifo; | 844 | struct fifo_gk20a *f = &g->fifo; |
828 | u32 tsgid; | 845 | u32 tsgid; |
829 | 846 | ||
830 | if (chid == FIFO_INVAL_CHANNEL_ID) | 847 | if (chid == FIFO_INVAL_CHANNEL_ID) { |
831 | return 0; | 848 | return 0; |
849 | } | ||
832 | 850 | ||
833 | tsgid = f->channel[chid].tsgid; | 851 | tsgid = f->channel[chid].tsgid; |
834 | nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid); | 852 | nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid); |
@@ -857,8 +875,9 @@ static int __locked_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) | |||
857 | * difference to pending runlist_preempt. | 875 | * difference to pending runlist_preempt. |
858 | */ | 876 | */ |
859 | 877 | ||
860 | if (!ret) | 878 | if (!ret) { |
861 | gk20a_fifo_handle_runlist_event(g); | 879 | gk20a_fifo_handle_runlist_event(g); |
880 | } | ||
862 | 881 | ||
863 | return ret; | 882 | return ret; |
864 | } | 883 | } |
@@ -877,8 +896,9 @@ int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg) | |||
877 | } | 896 | } |
878 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | 897 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); |
879 | 898 | ||
880 | if (last_ch) | 899 | if (last_ch) { |
881 | g->ops.fifo.ring_channel_doorbell(last_ch); | 900 | g->ops.fifo.ring_channel_doorbell(last_ch); |
901 | } | ||
882 | 902 | ||
883 | return 0; | 903 | return 0; |
884 | } | 904 | } |
@@ -892,13 +912,15 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | |||
892 | u32 runlist_id; | 912 | u32 runlist_id; |
893 | 913 | ||
894 | nvgpu_log_fn(g, "tsgid: %d", tsgid); | 914 | nvgpu_log_fn(g, "tsgid: %d", tsgid); |
895 | if (tsgid == FIFO_INVAL_TSG_ID) | 915 | if (tsgid == FIFO_INVAL_TSG_ID) { |
896 | return 0; | 916 | return 0; |
917 | } | ||
897 | 918 | ||
898 | runlist_id = f->tsg[tsgid].runlist_id; | 919 | runlist_id = f->tsg[tsgid].runlist_id; |
899 | nvgpu_log_fn(g, "runlist_id: %d", runlist_id); | 920 | nvgpu_log_fn(g, "runlist_id: %d", runlist_id); |
900 | if (runlist_id == FIFO_INVAL_RUNLIST_ID) | 921 | if (runlist_id == FIFO_INVAL_RUNLIST_ID) { |
901 | return 0; | 922 | return 0; |
923 | } | ||
902 | 924 | ||
903 | nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); | 925 | nvgpu_mutex_acquire(&f->runlist_info[runlist_id].runlist_lock); |
904 | 926 | ||
@@ -909,8 +931,9 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | |||
909 | 931 | ||
910 | ret = __locked_fifo_preempt(g, tsgid, true); | 932 | ret = __locked_fifo_preempt(g, tsgid, true); |
911 | 933 | ||
912 | if (!mutex_ret) | 934 | if (!mutex_ret) { |
913 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 935 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
936 | } | ||
914 | 937 | ||
915 | /* WAR for Bug 2065990 */ | 938 | /* WAR for Bug 2065990 */ |
916 | gk20a_fifo_enable_tsg_sched(g, &f->tsg[tsgid]); | 939 | gk20a_fifo_enable_tsg_sched(g, &f->tsg[tsgid]); |
@@ -918,11 +941,12 @@ int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) | |||
918 | nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); | 941 | nvgpu_mutex_release(&f->runlist_info[runlist_id].runlist_lock); |
919 | 942 | ||
920 | if (ret) { | 943 | if (ret) { |
921 | if (nvgpu_platform_is_silicon(g)) | 944 | if (nvgpu_platform_is_silicon(g)) { |
922 | nvgpu_err(g, "preempt timed out for tsgid: %u, " | 945 | nvgpu_err(g, "preempt timed out for tsgid: %u, " |
923 | "ctxsw timeout will trigger recovery if needed", tsgid); | 946 | "ctxsw timeout will trigger recovery if needed", tsgid); |
924 | else | 947 | } else { |
925 | gk20a_fifo_preempt_timeout_rc(g, tsgid, true); | 948 | gk20a_fifo_preempt_timeout_rc(g, tsgid, true); |
949 | } | ||
926 | } | 950 | } |
927 | 951 | ||
928 | return ret; | 952 | return ret; |
@@ -946,14 +970,16 @@ static void gv11b_fifo_locked_preempt_runlists(struct gk20a *g, u32 runlists_mas | |||
946 | /* if preempt timed out, reset engs served by runlists */ | 970 | /* if preempt timed out, reset engs served by runlists */ |
947 | for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { | 971 | for (rlid = 0; rlid < g->fifo.max_runlists; rlid++) { |
948 | if (runlists_mask & | 972 | if (runlists_mask & |
949 | fifo_runlist_preempt_runlist_m(rlid)) | 973 | fifo_runlist_preempt_runlist_m(rlid)) { |
950 | g->fifo.runlist_info[rlid].reset_eng_bitmask = | 974 | g->fifo.runlist_info[rlid].reset_eng_bitmask = |
951 | g->fifo.runlist_info[rlid].eng_bitmask; | 975 | g->fifo.runlist_info[rlid].eng_bitmask; |
976 | } | ||
952 | } | 977 | } |
953 | } | 978 | } |
954 | 979 | ||
955 | if (!mutex_ret) | 980 | if (!mutex_ret) { |
956 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 981 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
982 | } | ||
957 | } | 983 | } |
958 | 984 | ||
959 | static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, | 985 | static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, |
@@ -975,8 +1001,9 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, | |||
975 | 1001 | ||
976 | for (rlid = 0; rlid < g->fifo.max_runlists; | 1002 | for (rlid = 0; rlid < g->fifo.max_runlists; |
977 | rlid++) { | 1003 | rlid++) { |
978 | if (!(runlists_mask & BIT(rlid))) | 1004 | if (!(runlists_mask & BIT(rlid))) { |
979 | continue; | 1005 | continue; |
1006 | } | ||
980 | nvgpu_log(g, gpu_dbg_info, "abort runlist id %d", | 1007 | nvgpu_log(g, gpu_dbg_info, "abort runlist id %d", |
981 | rlid); | 1008 | rlid); |
982 | runlist = &g->fifo.runlist_info[rlid]; | 1009 | runlist = &g->fifo.runlist_info[rlid]; |
@@ -1005,17 +1032,19 @@ static void gv11b_fifo_locked_abort_runlist_active_tsgs(struct gk20a *g, | |||
1005 | /* (chid == ~0 && !add) remove all act ch from runlist*/ | 1032 | /* (chid == ~0 && !add) remove all act ch from runlist*/ |
1006 | err = gk20a_fifo_update_runlist_locked(g, rlid, | 1033 | err = gk20a_fifo_update_runlist_locked(g, rlid, |
1007 | FIFO_INVAL_CHANNEL_ID, add, wait_for_finish); | 1034 | FIFO_INVAL_CHANNEL_ID, add, wait_for_finish); |
1008 | if (err) | 1035 | if (err) { |
1009 | nvgpu_err(g, "runlist id %d is not cleaned up", | 1036 | nvgpu_err(g, "runlist id %d is not cleaned up", |
1010 | rlid); | 1037 | rlid); |
1038 | } | ||
1011 | 1039 | ||
1012 | gk20a_fifo_abort_tsg(g, tsg->tsgid, false); | 1040 | gk20a_fifo_abort_tsg(g, tsg->tsgid, false); |
1013 | 1041 | ||
1014 | nvgpu_log(g, gpu_dbg_info, "aborted tsg id %d", tsgid); | 1042 | nvgpu_log(g, gpu_dbg_info, "aborted tsg id %d", tsgid); |
1015 | } | 1043 | } |
1016 | } | 1044 | } |
1017 | if (!mutex_ret) | 1045 | if (!mutex_ret) { |
1018 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); | 1046 | nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); |
1047 | } | ||
1019 | } | 1048 | } |
1020 | 1049 | ||
1021 | void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | 1050 | void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, |
@@ -1041,10 +1070,11 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1041 | if (id != FIFO_INVAL_TSG_ID) { | 1070 | if (id != FIFO_INVAL_TSG_ID) { |
1042 | tsg = &g->fifo.tsg[id]; | 1071 | tsg = &g->fifo.tsg[id]; |
1043 | runlist_id = tsg->runlist_id; | 1072 | runlist_id = tsg->runlist_id; |
1044 | if (runlist_id != FIFO_INVAL_RUNLIST_ID) | 1073 | if (runlist_id != FIFO_INVAL_RUNLIST_ID) { |
1045 | num_runlists++; | 1074 | num_runlists++; |
1046 | else | 1075 | } else { |
1047 | nvgpu_log_fn(g, "tsg runlist id is invalid"); | 1076 | nvgpu_log_fn(g, "tsg runlist id is invalid"); |
1077 | } | ||
1048 | } else { | 1078 | } else { |
1049 | nvgpu_log_fn(g, "id type is tsg but tsg id is inval"); | 1079 | nvgpu_log_fn(g, "id type is tsg but tsg id is inval"); |
1050 | } | 1080 | } |
@@ -1066,8 +1096,10 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1066 | break; | 1096 | break; |
1067 | } | 1097 | } |
1068 | } | 1098 | } |
1069 | if (num_runlists > 1 ) /* abort all runlists */ | 1099 | if (num_runlists > 1) { |
1100 | /* abort all runlists */ | ||
1070 | runlist_id = FIFO_INVAL_RUNLIST_ID; | 1101 | runlist_id = FIFO_INVAL_RUNLIST_ID; |
1102 | } | ||
1071 | } | 1103 | } |
1072 | 1104 | ||
1073 | /* if runlist_id is valid and there is only single runlist to be | 1105 | /* if runlist_id is valid and there is only single runlist to be |
@@ -1099,18 +1131,22 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1099 | 1131 | ||
1100 | /* Disable power management */ | 1132 | /* Disable power management */ |
1101 | if (g->support_pmu && g->elpg_enabled) { | 1133 | if (g->support_pmu && g->elpg_enabled) { |
1102 | if (nvgpu_pmu_disable_elpg(g)) | 1134 | if (nvgpu_pmu_disable_elpg(g)) { |
1103 | nvgpu_err(g, "failed to set disable elpg"); | 1135 | nvgpu_err(g, "failed to set disable elpg"); |
1136 | } | ||
1104 | } | 1137 | } |
1105 | if (g->ops.clock_gating.slcg_gr_load_gating_prod) | 1138 | if (g->ops.clock_gating.slcg_gr_load_gating_prod) { |
1106 | g->ops.clock_gating.slcg_gr_load_gating_prod(g, | 1139 | g->ops.clock_gating.slcg_gr_load_gating_prod(g, |
1107 | false); | 1140 | false); |
1108 | if (g->ops.clock_gating.slcg_perf_load_gating_prod) | 1141 | } |
1142 | if (g->ops.clock_gating.slcg_perf_load_gating_prod) { | ||
1109 | g->ops.clock_gating.slcg_perf_load_gating_prod(g, | 1143 | g->ops.clock_gating.slcg_perf_load_gating_prod(g, |
1110 | false); | 1144 | false); |
1111 | if (g->ops.clock_gating.slcg_ltc_load_gating_prod) | 1145 | } |
1146 | if (g->ops.clock_gating.slcg_ltc_load_gating_prod) { | ||
1112 | g->ops.clock_gating.slcg_ltc_load_gating_prod(g, | 1147 | g->ops.clock_gating.slcg_ltc_load_gating_prod(g, |
1113 | false); | 1148 | false); |
1149 | } | ||
1114 | 1150 | ||
1115 | gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN); | 1151 | gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN); |
1116 | 1152 | ||
@@ -1122,8 +1158,9 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1122 | mmfault->faulted_engine); | 1158 | mmfault->faulted_engine); |
1123 | } | 1159 | } |
1124 | 1160 | ||
1125 | if (tsg) | 1161 | if (tsg) { |
1126 | gk20a_disable_tsg(tsg); | 1162 | gk20a_disable_tsg(tsg); |
1163 | } | ||
1127 | 1164 | ||
1128 | /* | 1165 | /* |
1129 | * Even though TSG preempt timed out, the RC sequence would by design | 1166 | * Even though TSG preempt timed out, the RC sequence would by design |
@@ -1187,8 +1224,9 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1187 | if (g->fifo.deferred_reset_pending) { | 1224 | if (g->fifo.deferred_reset_pending) { |
1188 | gk20a_disable_tsg(tsg); | 1225 | gk20a_disable_tsg(tsg); |
1189 | } else { | 1226 | } else { |
1190 | if (rc_type == RC_TYPE_MMU_FAULT) | 1227 | if (rc_type == RC_TYPE_MMU_FAULT) { |
1191 | gk20a_fifo_set_ctx_mmu_error_tsg(g, tsg); | 1228 | gk20a_fifo_set_ctx_mmu_error_tsg(g, tsg); |
1229 | } | ||
1192 | 1230 | ||
1193 | gk20a_fifo_abort_tsg(g, tsg->tsgid, false); | 1231 | gk20a_fifo_abort_tsg(g, tsg->tsgid, false); |
1194 | } | 1232 | } |
@@ -1200,8 +1238,9 @@ void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, | |||
1200 | gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED); | 1238 | gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED); |
1201 | 1239 | ||
1202 | /* It is safe to enable ELPG again. */ | 1240 | /* It is safe to enable ELPG again. */ |
1203 | if (g->support_pmu && g->elpg_enabled) | 1241 | if (g->support_pmu && g->elpg_enabled) { |
1204 | nvgpu_pmu_enable_elpg(g); | 1242 | nvgpu_pmu_enable_elpg(g); |
1243 | } | ||
1205 | 1244 | ||
1206 | /* release runlist_lock */ | 1245 | /* release runlist_lock */ |
1207 | if (runlist_id != FIFO_INVAL_RUNLIST_ID) { | 1246 | if (runlist_id != FIFO_INVAL_RUNLIST_ID) { |
@@ -1289,15 +1328,18 @@ int gv11b_init_fifo_reset_enable_hw(struct gk20a *g) | |||
1289 | /* enable pmc pfifo */ | 1328 | /* enable pmc pfifo */ |
1290 | g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); | 1329 | g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); |
1291 | 1330 | ||
1292 | if (g->ops.clock_gating.slcg_ce2_load_gating_prod) | 1331 | if (g->ops.clock_gating.slcg_ce2_load_gating_prod) { |
1293 | g->ops.clock_gating.slcg_ce2_load_gating_prod(g, | 1332 | g->ops.clock_gating.slcg_ce2_load_gating_prod(g, |
1294 | g->slcg_enabled); | 1333 | g->slcg_enabled); |
1295 | if (g->ops.clock_gating.slcg_fifo_load_gating_prod) | 1334 | } |
1335 | if (g->ops.clock_gating.slcg_fifo_load_gating_prod) { | ||
1296 | g->ops.clock_gating.slcg_fifo_load_gating_prod(g, | 1336 | g->ops.clock_gating.slcg_fifo_load_gating_prod(g, |
1297 | g->slcg_enabled); | 1337 | g->slcg_enabled); |
1298 | if (g->ops.clock_gating.blcg_fifo_load_gating_prod) | 1338 | } |
1339 | if (g->ops.clock_gating.blcg_fifo_load_gating_prod) { | ||
1299 | g->ops.clock_gating.blcg_fifo_load_gating_prod(g, | 1340 | g->ops.clock_gating.blcg_fifo_load_gating_prod(g, |
1300 | g->blcg_enabled); | 1341 | g->blcg_enabled); |
1342 | } | ||
1301 | 1343 | ||
1302 | timeout = gk20a_readl(g, fifo_fb_timeout_r()); | 1344 | timeout = gk20a_readl(g, fifo_fb_timeout_r()); |
1303 | nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout); | 1345 | nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout); |
@@ -1425,11 +1467,12 @@ bool gv11b_fifo_handle_sched_error(struct gk20a *g) | |||
1425 | 1467 | ||
1426 | sched_error = gk20a_readl(g, fifo_intr_sched_error_r()); | 1468 | sched_error = gk20a_readl(g, fifo_intr_sched_error_r()); |
1427 | 1469 | ||
1428 | if (sched_error < ARRAY_SIZE(gv11b_sched_error_str)) | 1470 | if (sched_error < ARRAY_SIZE(gv11b_sched_error_str)) { |
1429 | nvgpu_err(g, "fifo sched error :%s", | 1471 | nvgpu_err(g, "fifo sched error :%s", |
1430 | gv11b_sched_error_str[sched_error]); | 1472 | gv11b_sched_error_str[sched_error]); |
1431 | else | 1473 | } else { |
1432 | nvgpu_err(g, "fifo sched error code not supported"); | 1474 | nvgpu_err(g, "fifo sched error code not supported"); |
1475 | } | ||
1433 | 1476 | ||
1434 | if (sched_error == SCHED_ERROR_CODE_BAD_TSG ) { | 1477 | if (sched_error == SCHED_ERROR_CODE_BAD_TSG ) { |
1435 | /* id is unknown, preempt all runlists and do recovery */ | 1478 | /* id is unknown, preempt all runlists and do recovery */ |
@@ -1543,8 +1586,9 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr) | |||
1543 | const char *info_status_str; | 1586 | const char *info_status_str; |
1544 | 1587 | ||
1545 | 1588 | ||
1546 | if (!(fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f())) | 1589 | if (!(fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f())) { |
1547 | return ret; | 1590 | return ret; |
1591 | } | ||
1548 | 1592 | ||
1549 | /* get ctxsw timedout engines */ | 1593 | /* get ctxsw timedout engines */ |
1550 | ctxsw_timeout_engines = gk20a_readl(g, fifo_intr_ctxsw_timeout_r()); | 1594 | ctxsw_timeout_engines = gk20a_readl(g, fifo_intr_ctxsw_timeout_r()); |
@@ -1572,8 +1616,9 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr) | |||
1572 | tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id, | 1616 | tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id, |
1573 | &info_status); | 1617 | &info_status); |
1574 | 1618 | ||
1575 | if (tsgid == FIFO_INVAL_TSG_ID) | 1619 | if (tsgid == FIFO_INVAL_TSG_ID) { |
1576 | continue; | 1620 | continue; |
1621 | } | ||
1577 | 1622 | ||
1578 | if (g->ops.fifo.check_tsg_ctxsw_timeout( | 1623 | if (g->ops.fifo.check_tsg_ctxsw_timeout( |
1579 | &f->tsg[tsgid], &verbose, &ms)) { | 1624 | &f->tsg[tsgid], &verbose, &ms)) { |
@@ -1581,9 +1626,10 @@ bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr) | |||
1581 | 1626 | ||
1582 | info_status_str = invalid_str; | 1627 | info_status_str = invalid_str; |
1583 | if (info_status < | 1628 | if (info_status < |
1584 | ARRAY_SIZE(ctxsw_timeout_status_desc)) | 1629 | ARRAY_SIZE(ctxsw_timeout_status_desc)) { |
1585 | info_status_str = | 1630 | info_status_str = |
1586 | ctxsw_timeout_status_desc[info_status]; | 1631 | ctxsw_timeout_status_desc[info_status]; |
1632 | } | ||
1587 | 1633 | ||
1588 | nvgpu_err(g, "ctxsw timeout error: " | 1634 | nvgpu_err(g, "ctxsw timeout error: " |
1589 | "active engine id =%u, %s=%d, info: %s ms=%u", | 1635 | "active engine id =%u, %s=%d, info: %s ms=%u", |
@@ -1665,11 +1711,13 @@ unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g, | |||
1665 | 1711 | ||
1666 | /* minimize race with the gpu clearing the pending interrupt */ | 1712 | /* minimize race with the gpu clearing the pending interrupt */ |
1667 | if (!(pbdma_intr_1_current & | 1713 | if (!(pbdma_intr_1_current & |
1668 | pbdma_intr_1_ctxnotvalid_pending_f())) | 1714 | pbdma_intr_1_ctxnotvalid_pending_f())) { |
1669 | pbdma_intr_1 &= ~pbdma_intr_1_ctxnotvalid_pending_f(); | 1715 | pbdma_intr_1 &= ~pbdma_intr_1_ctxnotvalid_pending_f(); |
1716 | } | ||
1670 | 1717 | ||
1671 | if (pbdma_intr_1 == 0) | 1718 | if (pbdma_intr_1 == 0) { |
1672 | return RC_TYPE_NO_RC; | 1719 | return RC_TYPE_NO_RC; |
1720 | } | ||
1673 | 1721 | ||
1674 | if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { | 1722 | if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { |
1675 | nvgpu_log(g, gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", | 1723 | nvgpu_log(g, gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", |
@@ -1706,12 +1754,13 @@ void gv11b_fifo_init_ramfc_eng_method_buffer(struct gk20a *g, | |||
1706 | nvgpu_log_info(g, "eng method buffer NULL"); | 1754 | nvgpu_log_info(g, "eng method buffer NULL"); |
1707 | return; | 1755 | return; |
1708 | } | 1756 | } |
1709 | if (tsg->runlist_id == gk20a_fifo_get_fast_ce_runlist_id(g)) | 1757 | if (tsg->runlist_id == gk20a_fifo_get_fast_ce_runlist_id(g)) { |
1710 | method_buffer_per_runque = | 1758 | method_buffer_per_runque = |
1711 | &tsg->eng_method_buffers[ASYNC_CE_RUNQUE]; | 1759 | &tsg->eng_method_buffers[ASYNC_CE_RUNQUE]; |
1712 | else | 1760 | } else { |
1713 | method_buffer_per_runque = | 1761 | method_buffer_per_runque = |
1714 | &tsg->eng_method_buffers[GR_RUNQUE]; | 1762 | &tsg->eng_method_buffers[GR_RUNQUE]; |
1763 | } | ||
1715 | 1764 | ||
1716 | nvgpu_mem_wr32(g, mem, ram_in_eng_method_buffer_addr_lo_w(), | 1765 | nvgpu_mem_wr32(g, mem, ram_in_eng_method_buffer_addr_lo_w(), |
1717 | u64_lo32(method_buffer_per_runque->gpu_va)); | 1766 | u64_lo32(method_buffer_per_runque->gpu_va)); |
@@ -1742,8 +1791,9 @@ void gv11b_fifo_init_eng_method_buffers(struct gk20a *g, | |||
1742 | unsigned int runque, method_buffer_size; | 1791 | unsigned int runque, method_buffer_size; |
1743 | unsigned int num_pbdma = g->fifo.num_pbdma; | 1792 | unsigned int num_pbdma = g->fifo.num_pbdma; |
1744 | 1793 | ||
1745 | if (tsg->eng_method_buffers != NULL) | 1794 | if (tsg->eng_method_buffers != NULL) { |
1746 | return; | 1795 | return; |
1796 | } | ||
1747 | 1797 | ||
1748 | method_buffer_size = gv11b_fifo_get_eng_method_buffer_size(g); | 1798 | method_buffer_size = gv11b_fifo_get_eng_method_buffer_size(g); |
1749 | if (method_buffer_size == 0) { | 1799 | if (method_buffer_size == 0) { |
@@ -1757,8 +1807,9 @@ void gv11b_fifo_init_eng_method_buffers(struct gk20a *g, | |||
1757 | for (runque = 0; runque < num_pbdma; runque++) { | 1807 | for (runque = 0; runque < num_pbdma; runque++) { |
1758 | err = nvgpu_dma_alloc_map_sys(vm, method_buffer_size, | 1808 | err = nvgpu_dma_alloc_map_sys(vm, method_buffer_size, |
1759 | &tsg->eng_method_buffers[runque]); | 1809 | &tsg->eng_method_buffers[runque]); |
1760 | if (err) | 1810 | if (err) { |
1761 | break; | 1811 | break; |
1812 | } | ||
1762 | } | 1813 | } |
1763 | if (err) { | 1814 | if (err) { |
1764 | for (i = (runque - 1); i >= 0; i--) { | 1815 | for (i = (runque - 1); i >= 0; i--) { |
@@ -1781,8 +1832,9 @@ void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g, | |||
1781 | struct vm_gk20a *vm = g->mm.bar2.vm; | 1832 | struct vm_gk20a *vm = g->mm.bar2.vm; |
1782 | unsigned int runque; | 1833 | unsigned int runque; |
1783 | 1834 | ||
1784 | if (tsg->eng_method_buffers == NULL) | 1835 | if (tsg->eng_method_buffers == NULL) { |
1785 | return; | 1836 | return; |
1837 | } | ||
1786 | 1838 | ||
1787 | for (runque = 0; runque < g->fifo.num_pbdma; runque++) { | 1839 | for (runque = 0; runque < g->fifo.num_pbdma; runque++) { |
1788 | nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]); | 1840 | nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]); |
@@ -2020,8 +2072,9 @@ static u32 gv11b_mmu_fault_id_to_gr_veid(struct gk20a *g, u32 gr_eng_fault_id, | |||
2020 | num_subctx = f->max_subctx_count; | 2072 | num_subctx = f->max_subctx_count; |
2021 | 2073 | ||
2022 | if (mmu_fault_id >= gr_eng_fault_id && | 2074 | if (mmu_fault_id >= gr_eng_fault_id && |
2023 | mmu_fault_id < (gr_eng_fault_id + num_subctx)) | 2075 | mmu_fault_id < (gr_eng_fault_id + num_subctx)) { |
2024 | veid = mmu_fault_id - gr_eng_fault_id; | 2076 | veid = mmu_fault_id - gr_eng_fault_id; |
2077 | } | ||
2025 | 2078 | ||
2026 | return veid; | 2079 | return veid; |
2027 | } | 2080 | } |
@@ -2043,11 +2096,13 @@ static u32 gv11b_mmu_fault_id_to_eng_id_and_veid(struct gk20a *g, | |||
2043 | /* get faulted subctx id */ | 2096 | /* get faulted subctx id */ |
2044 | *veid = gv11b_mmu_fault_id_to_gr_veid(g, | 2097 | *veid = gv11b_mmu_fault_id_to_gr_veid(g, |
2045 | engine_info->fault_id, mmu_fault_id); | 2098 | engine_info->fault_id, mmu_fault_id); |
2046 | if (*veid != FIFO_INVAL_VEID) | 2099 | if (*veid != FIFO_INVAL_VEID) { |
2047 | break; | 2100 | break; |
2101 | } | ||
2048 | } else { | 2102 | } else { |
2049 | if (engine_info->fault_id == mmu_fault_id) | 2103 | if (engine_info->fault_id == mmu_fault_id) { |
2050 | break; | 2104 | break; |
2105 | } | ||
2051 | } | 2106 | } |
2052 | 2107 | ||
2053 | active_engine_id = FIFO_INVAL_ENGINE_ID; | 2108 | active_engine_id = FIFO_INVAL_ENGINE_ID; |
@@ -2064,8 +2119,9 @@ static u32 gv11b_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id) | |||
2064 | fault_id_pbdma0 = fifo_cfg0_pbdma_fault_id_v(reg_val); | 2119 | fault_id_pbdma0 = fifo_cfg0_pbdma_fault_id_v(reg_val); |
2065 | 2120 | ||
2066 | if (mmu_fault_id >= fault_id_pbdma0 && | 2121 | if (mmu_fault_id >= fault_id_pbdma0 && |
2067 | mmu_fault_id <= fault_id_pbdma0 + num_pbdma - 1) | 2122 | mmu_fault_id <= fault_id_pbdma0 + num_pbdma - 1) { |
2068 | return mmu_fault_id - fault_id_pbdma0; | 2123 | return mmu_fault_id - fault_id_pbdma0; |
2124 | } | ||
2069 | 2125 | ||
2070 | return FIFO_INVAL_PBDMA_ID; | 2126 | return FIFO_INVAL_PBDMA_ID; |
2071 | } | 2127 | } |
@@ -2076,10 +2132,11 @@ void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g, | |||
2076 | *active_engine_id = gv11b_mmu_fault_id_to_eng_id_and_veid(g, | 2132 | *active_engine_id = gv11b_mmu_fault_id_to_eng_id_and_veid(g, |
2077 | mmu_fault_id, veid); | 2133 | mmu_fault_id, veid); |
2078 | 2134 | ||
2079 | if (*active_engine_id == FIFO_INVAL_ENGINE_ID) | 2135 | if (*active_engine_id == FIFO_INVAL_ENGINE_ID) { |
2080 | *pbdma_id = gv11b_mmu_fault_id_to_pbdma_id(g, mmu_fault_id); | 2136 | *pbdma_id = gv11b_mmu_fault_id_to_pbdma_id(g, mmu_fault_id); |
2081 | else | 2137 | } else { |
2082 | *pbdma_id = FIFO_INVAL_PBDMA_ID; | 2138 | *pbdma_id = FIFO_INVAL_PBDMA_ID; |
2139 | } | ||
2083 | } | 2140 | } |
2084 | 2141 | ||
2085 | static bool gk20a_fifo_channel_status_is_eng_faulted(struct gk20a *g, u32 chid) | 2142 | static bool gk20a_fifo_channel_status_is_eng_faulted(struct gk20a *g, u32 chid) |
@@ -2099,11 +2156,13 @@ void gv11b_fifo_tsg_verify_status_faulted(struct channel_gk20a *ch) | |||
2099 | * If channel has FAULTED set, clear the CE method buffer | 2156 | * If channel has FAULTED set, clear the CE method buffer |
2100 | * if saved out channel is same as faulted channel | 2157 | * if saved out channel is same as faulted channel |
2101 | */ | 2158 | */ |
2102 | if (!gk20a_fifo_channel_status_is_eng_faulted(g, ch->chid)) | 2159 | if (!gk20a_fifo_channel_status_is_eng_faulted(g, ch->chid)) { |
2103 | return; | 2160 | return; |
2161 | } | ||
2104 | 2162 | ||
2105 | if (tsg->eng_method_buffers == NULL) | 2163 | if (tsg->eng_method_buffers == NULL) { |
2106 | return; | 2164 | return; |
2165 | } | ||
2107 | 2166 | ||
2108 | /* | 2167 | /* |
2109 | * CE method buffer format : | 2168 | * CE method buffer format : |
@@ -2113,6 +2172,7 @@ void gv11b_fifo_tsg_verify_status_faulted(struct channel_gk20a *ch) | |||
2113 | * It is sufficient to write 0 to method count to invalidate | 2172 | * It is sufficient to write 0 to method count to invalidate |
2114 | */ | 2173 | */ |
2115 | if ((u32)ch->chid == | 2174 | if ((u32)ch->chid == |
2116 | nvgpu_mem_rd32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 1)) | 2175 | nvgpu_mem_rd32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 1)) { |
2117 | nvgpu_mem_wr32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 0, 0); | 2176 | nvgpu_mem_wr32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 0, 0); |
2177 | } | ||
2118 | } | 2178 | } |
diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c index 0989e00a..cf669aa7 100644 --- a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c | |||
@@ -856,8 +856,10 @@ int gv11b_init_hal(struct gk20a *g) | |||
856 | __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false); | 856 | __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false); |
857 | 857 | ||
858 | /* Read fuses to check if gpu needs to boot in secure/non-secure mode */ | 858 | /* Read fuses to check if gpu needs to boot in secure/non-secure mode */ |
859 | if (gops->fuse.check_priv_security(g)) | 859 | if (gops->fuse.check_priv_security(g)) { |
860 | return -EINVAL; /* Do not boot gpu */ | 860 | /* Do not boot gpu */ |
861 | return -EINVAL; | ||
862 | } | ||
861 | 863 | ||
862 | /* priv security dependent ops */ | 864 | /* priv security dependent ops */ |
863 | if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { | 865 | if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { |
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c index b8272a92..73b7dae7 100644 --- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c | |||
@@ -56,8 +56,9 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block, | |||
56 | 56 | ||
57 | g->ops.mm.init_pdb(g, inst_block, vm); | 57 | g->ops.mm.init_pdb(g, inst_block, vm); |
58 | 58 | ||
59 | if (big_page_size && g->ops.mm.set_big_page_size) | 59 | if (big_page_size && g->ops.mm.set_big_page_size) { |
60 | g->ops.mm.set_big_page_size(g, inst_block, big_page_size); | 60 | g->ops.mm.set_big_page_size(g, inst_block, big_page_size); |
61 | } | ||
61 | 62 | ||
62 | gv11b_init_subcontext_pdb(vm, inst_block, false); | 63 | gv11b_init_subcontext_pdb(vm, inst_block, false); |
63 | } | 64 | } |
@@ -97,12 +98,14 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g) | |||
97 | nvgpu_mutex_acquire(&g->mm.hub_isr_mutex); | 98 | nvgpu_mutex_acquire(&g->mm.hub_isr_mutex); |
98 | 99 | ||
99 | if (nvgpu_mem_is_valid( | 100 | if (nvgpu_mem_is_valid( |
100 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) | 101 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) { |
101 | nvgpu_dma_unmap_free(vm, | 102 | nvgpu_dma_unmap_free(vm, |
102 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]); | 103 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]); |
103 | if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) | 104 | } |
105 | if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) { | ||
104 | nvgpu_dma_unmap_free(vm, | 106 | nvgpu_dma_unmap_free(vm, |
105 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]); | 107 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]); |
108 | } | ||
106 | 109 | ||
107 | nvgpu_mutex_release(&g->mm.hub_isr_mutex); | 110 | nvgpu_mutex_release(&g->mm.hub_isr_mutex); |
108 | nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); | 111 | nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); |
@@ -152,12 +155,14 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g) | |||
152 | static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g) | 155 | static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g) |
153 | { | 156 | { |
154 | if (nvgpu_mem_is_valid( | 157 | if (nvgpu_mem_is_valid( |
155 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) | 158 | &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) { |
156 | g->ops.fb.fault_buf_configure_hw(g, | 159 | g->ops.fb.fault_buf_configure_hw(g, |
157 | NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX); | 160 | NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX); |
158 | if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) | 161 | } |
162 | if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) { | ||
159 | g->ops.fb.fault_buf_configure_hw(g, | 163 | g->ops.fb.fault_buf_configure_hw(g, |
160 | NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX); | 164 | NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX); |
165 | } | ||
161 | } | 166 | } |
162 | 167 | ||
163 | static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) | 168 | static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) |
@@ -170,8 +175,9 @@ static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) | |||
170 | 175 | ||
171 | err = gv11b_mm_mmu_fault_info_buf_init(g); | 176 | err = gv11b_mm_mmu_fault_info_buf_init(g); |
172 | 177 | ||
173 | if (!err) | 178 | if (!err) { |
174 | gv11b_mm_mmu_hw_fault_buf_init(g); | 179 | gv11b_mm_mmu_hw_fault_buf_init(g); |
180 | } | ||
175 | 181 | ||
176 | return err; | 182 | return err; |
177 | } | 183 | } |
@@ -185,8 +191,9 @@ int gv11b_init_mm_setup_hw(struct gk20a *g) | |||
185 | err = gk20a_init_mm_setup_hw(g); | 191 | err = gk20a_init_mm_setup_hw(g); |
186 | 192 | ||
187 | err = gv11b_mm_mmu_fault_setup_sw(g); | 193 | err = gv11b_mm_mmu_fault_setup_sw(g); |
188 | if (!err) | 194 | if (!err) { |
189 | gv11b_mm_mmu_fault_setup_hw(g); | 195 | gv11b_mm_mmu_fault_setup_hw(g); |
196 | } | ||
190 | 197 | ||
191 | nvgpu_log_fn(g, "end"); | 198 | nvgpu_log_fn(g, "end"); |
192 | 199 | ||
@@ -199,11 +206,12 @@ void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
199 | 206 | ||
200 | g->ops.mm.fb_flush(g); | 207 | g->ops.mm.fb_flush(g); |
201 | gk20a_mm_l2_flush(g, invalidate); | 208 | gk20a_mm_l2_flush(g, invalidate); |
202 | if (g->ops.bus.bar1_bind) | 209 | if (g->ops.bus.bar1_bind) { |
203 | g->ops.fb.tlb_invalidate(g, | 210 | g->ops.fb.tlb_invalidate(g, |
204 | g->mm.bar1.vm->pdb.mem); | 211 | g->mm.bar1.vm->pdb.mem); |
205 | else | 212 | } else { |
206 | g->ops.mm.fb_flush(g); | 213 | g->ops.mm.fb_flush(g); |
214 | } | ||
207 | } | 215 | } |
208 | 216 | ||
209 | /* | 217 | /* |
@@ -214,8 +222,9 @@ void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate) | |||
214 | u64 gv11b_gpu_phys_addr(struct gk20a *g, | 222 | u64 gv11b_gpu_phys_addr(struct gk20a *g, |
215 | struct nvgpu_gmmu_attrs *attrs, u64 phys) | 223 | struct nvgpu_gmmu_attrs *attrs, u64 phys) |
216 | { | 224 | { |
217 | if (attrs && attrs->l3_alloc) | 225 | if (attrs && attrs->l3_alloc) { |
218 | return phys | NVGPU_L3_ALLOC_BIT; | 226 | return phys | NVGPU_L3_ALLOC_BIT; |
227 | } | ||
219 | 228 | ||
220 | return phys; | 229 | return phys; |
221 | } | 230 | } |
diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c index 80d6be3d..60ffdb98 100644 --- a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c | |||
@@ -340,10 +340,12 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0) | |||
340 | pwr_pmu_falcon_ecc_status_reset_task_f()); | 340 | pwr_pmu_falcon_ecc_status_reset_task_f()); |
341 | 341 | ||
342 | /* update counters per slice */ | 342 | /* update counters per slice */ |
343 | if (corrected_overflow) | 343 | if (corrected_overflow) { |
344 | corrected_delta += (0x1UL << pwr_pmu_falcon_ecc_corrected_err_count_total_s()); | 344 | corrected_delta += (0x1UL << pwr_pmu_falcon_ecc_corrected_err_count_total_s()); |
345 | if (uncorrected_overflow) | 345 | } |
346 | if (uncorrected_overflow) { | ||
346 | uncorrected_delta += (0x1UL << pwr_pmu_falcon_ecc_uncorrected_err_count_total_s()); | 347 | uncorrected_delta += (0x1UL << pwr_pmu_falcon_ecc_uncorrected_err_count_total_s()); |
348 | } | ||
347 | 349 | ||
348 | g->ecc.pmu.pmu_ecc_corrected_err_count[0].counter += corrected_delta; | 350 | g->ecc.pmu.pmu_ecc_corrected_err_count[0].counter += corrected_delta; |
349 | g->ecc.pmu.pmu_ecc_uncorrected_err_count[0].counter += uncorrected_delta; | 351 | g->ecc.pmu.pmu_ecc_uncorrected_err_count[0].counter += uncorrected_delta; |
@@ -351,21 +353,26 @@ void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0) | |||
351 | nvgpu_log(g, gpu_dbg_intr, | 353 | nvgpu_log(g, gpu_dbg_intr, |
352 | "pmu ecc interrupt intr1: 0x%x", intr1); | 354 | "pmu ecc interrupt intr1: 0x%x", intr1); |
353 | 355 | ||
354 | if (ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_imem_m()) | 356 | if (ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_imem_m()) { |
355 | nvgpu_log(g, gpu_dbg_intr, | 357 | nvgpu_log(g, gpu_dbg_intr, |
356 | "imem ecc error corrected"); | 358 | "imem ecc error corrected"); |
357 | if (ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_imem_m()) | 359 | } |
360 | if (ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_imem_m()) { | ||
358 | nvgpu_log(g, gpu_dbg_intr, | 361 | nvgpu_log(g, gpu_dbg_intr, |
359 | "imem ecc error uncorrected"); | 362 | "imem ecc error uncorrected"); |
360 | if (ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_dmem_m()) | 363 | } |
364 | if (ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_dmem_m()) { | ||
361 | nvgpu_log(g, gpu_dbg_intr, | 365 | nvgpu_log(g, gpu_dbg_intr, |
362 | "dmem ecc error corrected"); | 366 | "dmem ecc error corrected"); |
363 | if (ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_dmem_m()) | 367 | } |
368 | if (ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_dmem_m()) { | ||
364 | nvgpu_log(g, gpu_dbg_intr, | 369 | nvgpu_log(g, gpu_dbg_intr, |
365 | "dmem ecc error uncorrected"); | 370 | "dmem ecc error uncorrected"); |
371 | } | ||
366 | 372 | ||
367 | if (corrected_overflow || uncorrected_overflow) | 373 | if (corrected_overflow || uncorrected_overflow) { |
368 | nvgpu_info(g, "ecc counter overflow!"); | 374 | nvgpu_info(g, "ecc counter overflow!"); |
375 | } | ||
369 | 376 | ||
370 | nvgpu_log(g, gpu_dbg_intr, | 377 | nvgpu_log(g, gpu_dbg_intr, |
371 | "ecc error row address: 0x%x", | 378 | "ecc error row address: 0x%x", |
@@ -456,8 +463,9 @@ int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) | |||
456 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | 463 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, |
457 | pmu_handle_pg_param_msg, pmu, &seq, ~0); | 464 | pmu_handle_pg_param_msg, pmu, &seq, ~0); |
458 | 465 | ||
459 | } else | 466 | } else { |
460 | return -EINVAL; | 467 | return -EINVAL; |
468 | } | ||
461 | 469 | ||
462 | return 0; | 470 | return 0; |
463 | } | 471 | } |
@@ -493,8 +501,9 @@ int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id) | |||
493 | gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); | 501 | gv11b_dbg_pmu(g, "cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); |
494 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | 502 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, |
495 | pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0); | 503 | pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0); |
496 | } else | 504 | } else { |
497 | return -EINVAL; | 505 | return -EINVAL; |
506 | } | ||
498 | 507 | ||
499 | return 0; | 508 | return 0; |
500 | } | 509 | } |
diff --git a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c index f0cf920c..d742e8dc 100644 --- a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c | |||
@@ -105,8 +105,9 @@ int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va) | |||
105 | struct nvgpu_gr_ctx *gr_ctx; | 105 | struct nvgpu_gr_ctx *gr_ctx; |
106 | 106 | ||
107 | tsg = tsg_gk20a_from_ch(c); | 107 | tsg = tsg_gk20a_from_ch(c); |
108 | if (!tsg) | 108 | if (!tsg) { |
109 | return -EINVAL; | 109 | return -EINVAL; |
110 | } | ||
110 | 111 | ||
111 | gr_ctx = &tsg->gr_ctx; | 112 | gr_ctx = &tsg->gr_ctx; |
112 | 113 | ||