summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/fb/fb_gv11b.c
diff options
context:
space:
mode:
authorSrirangan <smadhavan@nvidia.com>2018-08-16 02:03:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-20 08:46:25 -0400
commit9e69e0cf978b53706f55ffb873e3966b4bb3a7a8 (patch)
tree2437cda373f2c37419e14b89772fb3c5f6d234e4 /drivers/gpu/nvgpu/common/fb/fb_gv11b.c
parentde10cedf8caca9fd01f1b85031e538843da23252 (diff)
gpu: nvgpu: common: Fix MISRA 15.6 violations
MISRA Rule-15.6 requires that all if-else blocks be enclosed in braces, including single statement blocks. Fix errors due to single statement if blocks without braces, introducing the braces. JIRA NVGPU-671 Change-Id: I599cce2af1d6cdc24efefba4ec42abfe998aec47 Signed-off-by: Srirangan <smadhavan@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1795845 Reviewed-by: Adeel Raza <araza@nvidia.com> Reviewed-by: svc-misra-checker <svc-misra-checker@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/fb/fb_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gv11b.c162
1 files changed, 109 insertions, 53 deletions
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c
index b6121f4d..d5ad495a 100644
--- a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c
@@ -97,12 +97,13 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
97 u64 compbit_store_iova; 97 u64 compbit_store_iova;
98 u64 compbit_base_post_divide64; 98 u64 compbit_base_post_divide64;
99 99
100 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) 100 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) {
101 compbit_store_iova = nvgpu_mem_get_phys_addr(g, 101 compbit_store_iova = nvgpu_mem_get_phys_addr(g,
102 &gr->compbit_store.mem); 102 &gr->compbit_store.mem);
103 else 103 } else {
104 compbit_store_iova = nvgpu_mem_get_addr(g, 104 compbit_store_iova = nvgpu_mem_get_addr(g,
105 &gr->compbit_store.mem); 105 &gr->compbit_store.mem);
106 }
106 /* must be aligned to 64 KB */ 107 /* must be aligned to 64 KB */
107 compbit_store_iova = roundup(compbit_store_iova, (u64)SZ_64K); 108 compbit_store_iova = roundup(compbit_store_iova, (u64)SZ_64K);
108 109
@@ -115,12 +116,14 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
115 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * 116 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
116 g->ltc_count) << fb_mmu_cbc_base_address_alignment_shift_v(); 117 g->ltc_count) << fb_mmu_cbc_base_address_alignment_shift_v();
117 118
118 if (compbit_base_post_multiply64 < compbit_store_iova) 119 if (compbit_base_post_multiply64 < compbit_store_iova) {
119 compbit_base_post_divide++; 120 compbit_base_post_divide++;
121 }
120 122
121 if (g->ops.ltc.cbc_fix_config) 123 if (g->ops.ltc.cbc_fix_config) {
122 compbit_base_post_divide = 124 compbit_base_post_divide =
123 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); 125 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
126 }
124 127
125 gk20a_writel(g, fb_mmu_cbc_base_r(), 128 gk20a_writel(g, fb_mmu_cbc_base_r(),
126 fb_mmu_cbc_base_address_f(compbit_base_post_divide)); 129 fb_mmu_cbc_base_address_f(compbit_base_post_divide));
@@ -250,8 +253,9 @@ static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
250 /* while the fault is being handled it is possible for overflow 253 /* while the fault is being handled it is possible for overflow
251 * to happen, 254 * to happen,
252 */ 255 */
253 if (reg_val & fb_mmu_fault_buffer_get_overflow_m()) 256 if (reg_val & fb_mmu_fault_buffer_get_overflow_m()) {
254 reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f(); 257 reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f();
258 }
255 259
256 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val); 260 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val);
257 261
@@ -341,8 +345,10 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
341 fault_status = g->ops.fb.read_mmu_fault_status(g); 345 fault_status = g->ops.fb.read_mmu_fault_status(g);
342 346
343 do { 347 do {
344 if (!(fault_status & fb_mmu_fault_status_busy_true_f())) 348 if (!(fault_status &
349 fb_mmu_fault_status_busy_true_f())) {
345 break; 350 break;
351 }
346 /* 352 /*
347 * Make sure fault buffer is disabled. 353 * Make sure fault buffer is disabled.
348 * This is to avoid accessing fault buffer by hw 354 * This is to avoid accessing fault buffer by hw
@@ -435,19 +441,23 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
435 fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m(); 441 fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m();
436 442
437 /* clear the interrupt */ 443 /* clear the interrupt */
438 if ((corrected_delta > 0) || corrected_overflow) 444 if ((corrected_delta > 0) || corrected_overflow) {
439 gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0); 445 gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0);
440 if ((uncorrected_delta > 0) || uncorrected_overflow) 446 }
447 if ((uncorrected_delta > 0) || uncorrected_overflow) {
441 gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0); 448 gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0);
449 }
442 450
443 gk20a_writel(g, fb_mmu_l2tlb_ecc_status_r(), 451 gk20a_writel(g, fb_mmu_l2tlb_ecc_status_r(),
444 fb_mmu_l2tlb_ecc_status_reset_clear_f()); 452 fb_mmu_l2tlb_ecc_status_reset_clear_f());
445 453
446 /* Handle overflow */ 454 /* Handle overflow */
447 if (corrected_overflow) 455 if (corrected_overflow) {
448 corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s()); 456 corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s());
449 if (uncorrected_overflow) 457 }
458 if (uncorrected_overflow) {
450 uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s()); 459 uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s());
460 }
451 461
452 462
453 g->ecc.fb.mmu_l2tlb_ecc_corrected_err_count[0].counter += 463 g->ecc.fb.mmu_l2tlb_ecc_corrected_err_count[0].counter +=
@@ -455,12 +465,17 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
455 g->ecc.fb.mmu_l2tlb_ecc_uncorrected_err_count[0].counter += 465 g->ecc.fb.mmu_l2tlb_ecc_uncorrected_err_count[0].counter +=
456 uncorrected_delta; 466 uncorrected_delta;
457 467
458 if (ecc_status & fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m()) 468 if (ecc_status &
469 fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m()) {
459 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); 470 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
460 if (ecc_status & fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) 471 }
472 if (ecc_status &
473 fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) {
461 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); 474 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
462 if (corrected_overflow || uncorrected_overflow) 475 }
476 if (corrected_overflow || uncorrected_overflow) {
463 nvgpu_info(g, "mmu l2tlb ecc counter overflow!"); 477 nvgpu_info(g, "mmu l2tlb ecc counter overflow!");
478 }
464 479
465 nvgpu_log(g, gpu_dbg_intr, 480 nvgpu_log(g, gpu_dbg_intr,
466 "ecc error address: 0x%x", ecc_addr); 481 "ecc error address: 0x%x", ecc_addr);
@@ -493,19 +508,23 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
493 fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m(); 508 fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m();
494 509
495 /* clear the interrupt */ 510 /* clear the interrupt */
496 if ((corrected_delta > 0) || corrected_overflow) 511 if ((corrected_delta > 0) || corrected_overflow) {
497 gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0); 512 gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0);
498 if ((uncorrected_delta > 0) || uncorrected_overflow) 513 }
514 if ((uncorrected_delta > 0) || uncorrected_overflow) {
499 gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0); 515 gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0);
516 }
500 517
501 gk20a_writel(g, fb_mmu_hubtlb_ecc_status_r(), 518 gk20a_writel(g, fb_mmu_hubtlb_ecc_status_r(),
502 fb_mmu_hubtlb_ecc_status_reset_clear_f()); 519 fb_mmu_hubtlb_ecc_status_reset_clear_f());
503 520
504 /* Handle overflow */ 521 /* Handle overflow */
505 if (corrected_overflow) 522 if (corrected_overflow) {
506 corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s()); 523 corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s());
507 if (uncorrected_overflow) 524 }
525 if (uncorrected_overflow) {
508 uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s()); 526 uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s());
527 }
509 528
510 529
511 g->ecc.fb.mmu_hubtlb_ecc_corrected_err_count[0].counter += 530 g->ecc.fb.mmu_hubtlb_ecc_corrected_err_count[0].counter +=
@@ -513,12 +532,15 @@ void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
513 g->ecc.fb.mmu_hubtlb_ecc_uncorrected_err_count[0].counter += 532 g->ecc.fb.mmu_hubtlb_ecc_uncorrected_err_count[0].counter +=
514 uncorrected_delta; 533 uncorrected_delta;
515 534
516 if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m()) 535 if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m()) {
517 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); 536 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
518 if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) 537 }
538 if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) {
519 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); 539 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
520 if (corrected_overflow || uncorrected_overflow) 540 }
541 if (corrected_overflow || uncorrected_overflow) {
521 nvgpu_info(g, "mmu hubtlb ecc counter overflow!"); 542 nvgpu_info(g, "mmu hubtlb ecc counter overflow!");
543 }
522 544
523 nvgpu_log(g, gpu_dbg_intr, 545 nvgpu_log(g, gpu_dbg_intr,
524 "ecc error address: 0x%x", ecc_addr); 546 "ecc error address: 0x%x", ecc_addr);
@@ -551,19 +573,23 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
551 fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m(); 573 fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m();
552 574
553 /* clear the interrupt */ 575 /* clear the interrupt */
554 if ((corrected_delta > 0) || corrected_overflow) 576 if ((corrected_delta > 0) || corrected_overflow) {
555 gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0); 577 gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0);
556 if ((uncorrected_delta > 0) || uncorrected_overflow) 578 }
579 if ((uncorrected_delta > 0) || uncorrected_overflow) {
557 gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0); 580 gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0);
581 }
558 582
559 gk20a_writel(g, fb_mmu_fillunit_ecc_status_r(), 583 gk20a_writel(g, fb_mmu_fillunit_ecc_status_r(),
560 fb_mmu_fillunit_ecc_status_reset_clear_f()); 584 fb_mmu_fillunit_ecc_status_reset_clear_f());
561 585
562 /* Handle overflow */ 586 /* Handle overflow */
563 if (corrected_overflow) 587 if (corrected_overflow) {
564 corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s()); 588 corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s());
565 if (uncorrected_overflow) 589 }
590 if (uncorrected_overflow) {
566 uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s()); 591 uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s());
592 }
567 593
568 594
569 g->ecc.fb.mmu_fillunit_ecc_corrected_err_count[0].counter += 595 g->ecc.fb.mmu_fillunit_ecc_corrected_err_count[0].counter +=
@@ -571,17 +597,26 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
571 g->ecc.fb.mmu_fillunit_ecc_uncorrected_err_count[0].counter += 597 g->ecc.fb.mmu_fillunit_ecc_uncorrected_err_count[0].counter +=
572 uncorrected_delta; 598 uncorrected_delta;
573 599
574 if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m()) 600 if (ecc_status &
601 fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m()) {
575 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error"); 602 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error");
576 if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m()) 603 }
604 if (ecc_status &
605 fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m()) {
577 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error"); 606 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error");
578 if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m()) 607 }
608 if (ecc_status &
609 fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m()) {
579 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error"); 610 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error");
580 if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m()) 611 }
612 if (ecc_status &
613 fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m()) {
581 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error"); 614 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error");
615 }
582 616
583 if (corrected_overflow || uncorrected_overflow) 617 if (corrected_overflow || uncorrected_overflow) {
584 nvgpu_info(g, "mmu fillunit ecc counter overflow!"); 618 nvgpu_info(g, "mmu fillunit ecc counter overflow!");
619 }
585 620
586 nvgpu_log(g, gpu_dbg_intr, 621 nvgpu_log(g, gpu_dbg_intr,
587 "ecc error address: 0x%x", ecc_addr); 622 "ecc error address: 0x%x", ecc_addr);
@@ -594,33 +629,37 @@ void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
594static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault) 629static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault)
595{ 630{
596 if (WARN_ON(mmfault->fault_type >= 631 if (WARN_ON(mmfault->fault_type >=
597 ARRAY_SIZE(fault_type_descs_gv11b))) 632 ARRAY_SIZE(fault_type_descs_gv11b))) {
598 mmfault->fault_type_desc = invalid_str; 633 mmfault->fault_type_desc = invalid_str;
599 else 634 } else {
600 mmfault->fault_type_desc = 635 mmfault->fault_type_desc =
601 fault_type_descs_gv11b[mmfault->fault_type]; 636 fault_type_descs_gv11b[mmfault->fault_type];
637 }
602 638
603 if (WARN_ON(mmfault->client_type >= 639 if (WARN_ON(mmfault->client_type >=
604 ARRAY_SIZE(fault_client_type_descs_gv11b))) 640 ARRAY_SIZE(fault_client_type_descs_gv11b))) {
605 mmfault->client_type_desc = invalid_str; 641 mmfault->client_type_desc = invalid_str;
606 else 642 } else {
607 mmfault->client_type_desc = 643 mmfault->client_type_desc =
608 fault_client_type_descs_gv11b[mmfault->client_type]; 644 fault_client_type_descs_gv11b[mmfault->client_type];
645 }
609 646
610 mmfault->client_id_desc = invalid_str; 647 mmfault->client_id_desc = invalid_str;
611 if (mmfault->client_type == 648 if (mmfault->client_type ==
612 gmmu_fault_client_type_hub_v()) { 649 gmmu_fault_client_type_hub_v()) {
613 650
614 if (!(WARN_ON(mmfault->client_id >= 651 if (!(WARN_ON(mmfault->client_id >=
615 ARRAY_SIZE(hub_client_descs_gv11b)))) 652 ARRAY_SIZE(hub_client_descs_gv11b)))) {
616 mmfault->client_id_desc = 653 mmfault->client_id_desc =
617 hub_client_descs_gv11b[mmfault->client_id]; 654 hub_client_descs_gv11b[mmfault->client_id];
655 }
618 } else if (mmfault->client_type == 656 } else if (mmfault->client_type ==
619 gmmu_fault_client_type_gpc_v()) { 657 gmmu_fault_client_type_gpc_v()) {
620 if (!(WARN_ON(mmfault->client_id >= 658 if (!(WARN_ON(mmfault->client_id >=
621 ARRAY_SIZE(gpc_client_descs_gv11b)))) 659 ARRAY_SIZE(gpc_client_descs_gv11b)))) {
622 mmfault->client_id_desc = 660 mmfault->client_id_desc =
623 gpc_client_descs_gv11b[mmfault->client_id]; 661 gpc_client_descs_gv11b[mmfault->client_id];
662 }
624 } 663 }
625 664
626} 665}
@@ -719,8 +758,9 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
719 758
720 /* refch will be put back after fault is handled */ 759 /* refch will be put back after fault is handled */
721 refch = gk20a_refch_from_inst_ptr(g, inst_ptr); 760 refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
722 if (refch) 761 if (refch) {
723 chid = refch->chid; 762 chid = refch->chid;
763 }
724 764
725 /* it is ok to continue even if refch is NULL */ 765 /* it is ok to continue even if refch is NULL */
726 mmfault->refch = refch; 766 mmfault->refch = refch;
@@ -803,8 +843,9 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
803 u32 id = FIFO_INVAL_TSG_ID; 843 u32 id = FIFO_INVAL_TSG_ID;
804 unsigned int rc_type = RC_TYPE_NO_RC; 844 unsigned int rc_type = RC_TYPE_NO_RC;
805 845
806 if (!mmfault->valid) 846 if (!mmfault->valid) {
807 return; 847 return;
848 }
808 849
809 gv11b_fb_print_fault_info(g, mmfault); 850 gv11b_fb_print_fault_info(g, mmfault);
810 851
@@ -877,8 +918,9 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
877 rc_type = RC_TYPE_MMU_FAULT; 918 rc_type = RC_TYPE_MMU_FAULT;
878 if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) { 919 if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) {
879 id = mmfault->refch->tsgid; 920 id = mmfault->refch->tsgid;
880 if (id != FIFO_INVAL_TSG_ID) 921 if (id != FIFO_INVAL_TSG_ID) {
881 id_type = ID_TYPE_TSG; 922 id_type = ID_TYPE_TSG;
923 }
882 } else { 924 } else {
883 nvgpu_err(g, "bare channels not supported"); 925 nvgpu_err(g, "bare channels not supported");
884 } 926 }
@@ -898,19 +940,21 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
898 mmfault->refch = NULL; 940 mmfault->refch = NULL;
899 } 941 }
900 942
901 if (rc_type != RC_TYPE_NO_RC) 943 if (rc_type != RC_TYPE_NO_RC) {
902 g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask, 944 g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask,
903 id, id_type, rc_type, mmfault); 945 id, id_type, rc_type, mmfault);
946 }
904 } else { 947 } else {
905 if (mmfault->fault_type == gmmu_fault_type_pte_v()) { 948 if (mmfault->fault_type == gmmu_fault_type_pte_v()) {
906 nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix"); 949 nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix");
907 err = gv11b_fb_fix_page_fault(g, mmfault); 950 err = gv11b_fb_fix_page_fault(g, mmfault);
908 if (err) 951 if (err) {
909 *invalidate_replay_val |= 952 *invalidate_replay_val |=
910 fb_mmu_invalidate_replay_cancel_global_f(); 953 fb_mmu_invalidate_replay_cancel_global_f();
911 else 954 } else {
912 *invalidate_replay_val |= 955 *invalidate_replay_val |=
913 fb_mmu_invalidate_replay_start_ack_all_f(); 956 fb_mmu_invalidate_replay_start_ack_all_f();
957 }
914 } else { 958 } else {
915 /* cancel faults other than invalid pte */ 959 /* cancel faults other than invalid pte */
916 *invalidate_replay_val |= 960 *invalidate_replay_val |=
@@ -1026,8 +1070,9 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
1026 1070
1027 } 1071 }
1028 if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX && 1072 if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
1029 invalidate_replay_val != 0U) 1073 invalidate_replay_val != 0U) {
1030 gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val); 1074 gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val);
1075 }
1031} 1076}
1032 1077
1033static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g, 1078static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
@@ -1057,8 +1102,9 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
1057 1102
1058 /* refch will be put back after fault is handled */ 1103 /* refch will be put back after fault is handled */
1059 refch = gk20a_refch_from_inst_ptr(g, inst_ptr); 1104 refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
1060 if (refch) 1105 if (refch) {
1061 chid = refch->chid; 1106 chid = refch->chid;
1107 }
1062 1108
1063 /* It is still ok to continue if refch is NULL */ 1109 /* It is still ok to continue if refch is NULL */
1064 mmfault->refch = refch; 1110 mmfault->refch = refch;
@@ -1180,15 +1226,17 @@ static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
1180{ 1226{
1181 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) { 1227 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
1182 if (gv11b_fb_is_fault_buf_enabled(g, 1228 if (gv11b_fb_is_fault_buf_enabled(g,
1183 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) 1229 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
1184 gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX); 1230 gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
1231 }
1185 } 1232 }
1186 1233
1187 if (fault_status & fb_mmu_fault_status_replayable_error_m()) { 1234 if (fault_status & fb_mmu_fault_status_replayable_error_m()) {
1188 if (gv11b_fb_is_fault_buf_enabled(g, 1235 if (gv11b_fb_is_fault_buf_enabled(g,
1189 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) 1236 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
1190 gv11b_fb_fault_buf_configure_hw(g, 1237 gv11b_fb_fault_buf_configure_hw(g,
1191 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX); 1238 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
1239 }
1192 } 1240 }
1193 gv11b_ce_mthd_buffer_fault_in_bar2_fault(g); 1241 gv11b_ce_mthd_buffer_fault_in_bar2_fault(g);
1194 1242
@@ -1224,9 +1272,10 @@ void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
1224 gv11b_fb_handle_mmu_fault_common(g, mmfault, 1272 gv11b_fb_handle_mmu_fault_common(g, mmfault,
1225 &invalidate_replay_val); 1273 &invalidate_replay_val);
1226 1274
1227 if (invalidate_replay_val) 1275 if (invalidate_replay_val) {
1228 gv11b_fb_replay_or_cancel_faults(g, 1276 gv11b_fb_replay_or_cancel_faults(g,
1229 invalidate_replay_val); 1277 invalidate_replay_val);
1278 }
1230 } 1279 }
1231} 1280}
1232 1281
@@ -1254,8 +1303,9 @@ void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g)
1254{ 1303{
1255 u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r()); 1304 u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
1256 1305
1257 if (!(fault_status & fb_mmu_fault_status_replayable_m())) 1306 if (!(fault_status & fb_mmu_fault_status_replayable_m())) {
1258 return; 1307 return;
1308 }
1259 1309
1260 if (gv11b_fb_is_fault_buf_enabled(g, 1310 if (gv11b_fb_is_fault_buf_enabled(g,
1261 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) { 1311 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
@@ -1349,16 +1399,19 @@ void gv11b_fb_hub_isr(struct gk20a *g)
1349 nvgpu_info(g, "ecc uncorrected error notify"); 1399 nvgpu_info(g, "ecc uncorrected error notify");
1350 1400
1351 status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r()); 1401 status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r());
1352 if (status) 1402 if (status) {
1353 gv11b_handle_l2tlb_ecc_isr(g, status); 1403 gv11b_handle_l2tlb_ecc_isr(g, status);
1404 }
1354 1405
1355 status = gk20a_readl(g, fb_mmu_hubtlb_ecc_status_r()); 1406 status = gk20a_readl(g, fb_mmu_hubtlb_ecc_status_r());
1356 if (status) 1407 if (status) {
1357 gv11b_handle_hubtlb_ecc_isr(g, status); 1408 gv11b_handle_hubtlb_ecc_isr(g, status);
1409 }
1358 1410
1359 status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r()); 1411 status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r());
1360 if (status) 1412 if (status) {
1361 gv11b_handle_fillunit_ecc_isr(g, status); 1413 gv11b_handle_fillunit_ecc_isr(g, status);
1414 }
1362 } 1415 }
1363 if (niso_intr & 1416 if (niso_intr &
1364 (fb_niso_intr_mmu_other_fault_notify_m() | 1417 (fb_niso_intr_mmu_other_fault_notify_m() |
@@ -1382,8 +1435,9 @@ bool gv11b_fb_mmu_fault_pending(struct gk20a *g)
1382 fb_niso_intr_mmu_replayable_fault_notify_m() | 1435 fb_niso_intr_mmu_replayable_fault_notify_m() |
1383 fb_niso_intr_mmu_replayable_fault_overflow_m() | 1436 fb_niso_intr_mmu_replayable_fault_overflow_m() |
1384 fb_niso_intr_mmu_nonreplayable_fault_notify_m() | 1437 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
1385 fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) 1438 fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) {
1386 return true; 1439 return true;
1440 }
1387 1441
1388 return false; 1442 return false;
1389} 1443}
@@ -1420,8 +1474,9 @@ int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
1420 nvgpu_udelay(5); 1474 nvgpu_udelay(5);
1421 } while (!nvgpu_timeout_expired_msg(&timeout, 1475 } while (!nvgpu_timeout_expired_msg(&timeout,
1422 "invalidate replay failed on 0x%llx")); 1476 "invalidate replay failed on 0x%llx"));
1423 if (err) 1477 if (err) {
1424 nvgpu_err(g, "invalidate replay timedout"); 1478 nvgpu_err(g, "invalidate replay timedout");
1479 }
1425 1480
1426 nvgpu_mutex_release(&g->mm.tlb_lock); 1481 nvgpu_mutex_release(&g->mm.tlb_lock);
1427 1482
@@ -1460,8 +1515,9 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g,
1460 } 1515 }
1461 1516
1462 pte[0] |= gmmu_new_pte_valid_true_f(); 1517 pte[0] |= gmmu_new_pte_valid_true_f();
1463 if (pte[0] & gmmu_new_pte_read_only_true_f()) 1518 if (pte[0] & gmmu_new_pte_read_only_true_f()) {
1464 pte[0] &= ~(gmmu_new_pte_read_only_true_f()); 1519 pte[0] &= ~(gmmu_new_pte_read_only_true_f());
1520 }
1465 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, 1521 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1466 "new pte: %#08x %#08x", pte[1], pte[0]); 1522 "new pte: %#08x %#08x", pte[1], pte[0]);
1467 1523