summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-07-03 17:00:40 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-07-11 04:43:26 -0400
commita801c897df1e5e7ac498e1531ce4bbdabdca1c3d (patch)
treec461329d88878803636a268844a7a05acc2ae1d5
parent572fba2c52a6d63dbc785b48ad845e55f0b7eac0 (diff)
gpu: nvgpu: Simplify FB hub intr enable
Hard code flags for enabling and disabling FB hub interrupts. JIRA NVGPU-714 Change-Id: I806ef443cb9e27e221d407d633ca91d8fb40d075 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1769853 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gv100/fb_gv100.c28
-rw-r--r--drivers/gpu/nvgpu/gv100/fb_gv100.h2
-rw-r--r--drivers/gpu/nvgpu/gv100/hal_gv100.c6
-rw-r--r--drivers/gpu/nvgpu/gv100/mc_gv100.c4
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c143
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.h22
-rw-r--r--drivers/gpu/nvgpu/gv11b/hal_gv11b.c2
-rw-r--r--drivers/gpu/nvgpu/gv11b/mc_gv11b.c4
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.c23
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/mm.h1
12 files changed, 75 insertions, 170 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 30e7351c..2663ab43 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -113,6 +113,10 @@ int nvgpu_mm_suspend(struct gk20a *g)
113 g->ops.mm.cbc_clean(g); 113 g->ops.mm.cbc_clean(g);
114 g->ops.mm.l2_flush(g, false); 114 g->ops.mm.l2_flush(g, false);
115 115
116 if (g->ops.fb.disable_hub_intr != NULL) {
117 g->ops.fb.disable_hub_intr(g);
118 }
119
116 nvgpu_log_info(g, "MM suspend done!"); 120 nvgpu_log_info(g, "MM suspend done!");
117 121
118 return 0; 122 return 0;
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index b677419c..b59baa78 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -542,10 +542,8 @@ struct gpu_ops {
542 int (*mem_unlock)(struct gk20a *g); 542 int (*mem_unlock)(struct gk20a *g);
543 int (*init_nvlink)(struct gk20a *g); 543 int (*init_nvlink)(struct gk20a *g);
544 int (*enable_nvlink)(struct gk20a *g); 544 int (*enable_nvlink)(struct gk20a *g);
545 void (*enable_hub_intr)(struct gk20a *g, unsigned int index, 545 void (*enable_hub_intr)(struct gk20a *g);
546 unsigned int intr_type); 546 void (*disable_hub_intr)(struct gk20a *g);
547 void (*disable_hub_intr)(struct gk20a *g, unsigned int index,
548 unsigned int intr_type);
549 int (*init_fbpa)(struct gk20a *g); 547 int (*init_fbpa)(struct gk20a *g);
550 void (*fbpa_isr)(struct gk20a *g); 548 void (*fbpa_isr)(struct gk20a *g);
551 void (*write_mmu_fault_buffer_lo_hi)(struct gk20a *g, u32 index, 549 void (*write_mmu_fault_buffer_lo_hi)(struct gk20a *g, u32 index,
diff --git a/drivers/gpu/nvgpu/gv100/fb_gv100.c b/drivers/gpu/nvgpu/gv100/fb_gv100.c
index defb6642..1889268f 100644
--- a/drivers/gpu/nvgpu/gv100/fb_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/fb_gv100.c
@@ -70,6 +70,34 @@ void gv100_fb_reset(struct gk20a *g)
70 gk20a_writel(g, fb_mmu_priv_level_mask_r(), val); 70 gk20a_writel(g, fb_mmu_priv_level_mask_r(), val);
71} 71}
72 72
73void gv100_fb_enable_hub_intr(struct gk20a *g)
74{
75 u32 mask = 0;
76
77 mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
78 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
79 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
80 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
81 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
82
83 gk20a_writel(g, fb_niso_intr_en_set_r(0),
84 mask);
85}
86
87void gv100_fb_disable_hub_intr(struct gk20a *g)
88{
89 u32 mask = 0;
90
91 mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
92 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
93 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
94 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
95 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
96
97 gk20a_writel(g, fb_niso_intr_en_clr_r(0),
98 mask);
99}
100
73int gv100_fb_memory_unlock(struct gk20a *g) 101int gv100_fb_memory_unlock(struct gk20a *g)
74{ 102{
75 struct nvgpu_firmware *mem_unlock_fw = NULL; 103 struct nvgpu_firmware *mem_unlock_fw = NULL;
diff --git a/drivers/gpu/nvgpu/gv100/fb_gv100.h b/drivers/gpu/nvgpu/gv100/fb_gv100.h
index 5b99fe5e..195baccf 100644
--- a/drivers/gpu/nvgpu/gv100/fb_gv100.h
+++ b/drivers/gpu/nvgpu/gv100/fb_gv100.h
@@ -28,6 +28,8 @@
28struct gk20a; 28struct gk20a;
29 29
30void gv100_fb_reset(struct gk20a *g); 30void gv100_fb_reset(struct gk20a *g);
31void gv100_fb_enable_hub_intr(struct gk20a *g);
32void gv100_fb_disable_hub_intr(struct gk20a *g);
31int gv100_fb_memory_unlock(struct gk20a *g); 33int gv100_fb_memory_unlock(struct gk20a *g);
32int gv100_fb_init_nvlink(struct gk20a *g); 34int gv100_fb_init_nvlink(struct gk20a *g);
33int gv100_fb_enable_nvlink(struct gk20a *g); 35int gv100_fb_enable_nvlink(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c
index 3cfda7ca..ea131822 100644
--- a/drivers/gpu/nvgpu/gv100/hal_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c
@@ -459,7 +459,7 @@ static const struct gpu_ops gv100_ops = {
459 }, 459 },
460 .fb = { 460 .fb = {
461 .reset = gv100_fb_reset, 461 .reset = gv100_fb_reset,
462 .init_hw = gk20a_fb_init_hw, 462 .init_hw = gv11b_fb_init_hw,
463 .init_fs_state = NULL, 463 .init_fs_state = NULL,
464 .set_mmu_page_size = gm20b_fb_set_mmu_page_size, 464 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
465 .set_use_full_comp_tag_line = 465 .set_use_full_comp_tag_line =
@@ -481,8 +481,8 @@ static const struct gpu_ops gv100_ops = {
481 .mem_unlock = gv100_fb_memory_unlock, 481 .mem_unlock = gv100_fb_memory_unlock,
482 .init_nvlink = gv100_fb_init_nvlink, 482 .init_nvlink = gv100_fb_init_nvlink,
483 .enable_nvlink = gv100_fb_enable_nvlink, 483 .enable_nvlink = gv100_fb_enable_nvlink,
484 .enable_hub_intr = gv11b_fb_enable_hub_intr, 484 .enable_hub_intr = gv100_fb_enable_hub_intr,
485 .disable_hub_intr = gv11b_fb_disable_hub_intr, 485 .disable_hub_intr = gv100_fb_disable_hub_intr,
486 .write_mmu_fault_buffer_lo_hi = 486 .write_mmu_fault_buffer_lo_hi =
487 fb_gv11b_write_mmu_fault_buffer_lo_hi, 487 fb_gv11b_write_mmu_fault_buffer_lo_hi,
488 .write_mmu_fault_buffer_get = 488 .write_mmu_fault_buffer_get =
diff --git a/drivers/gpu/nvgpu/gv100/mc_gv100.c b/drivers/gpu/nvgpu/gv100/mc_gv100.c
index 7ed9e6da..3d6f0bd3 100644
--- a/drivers/gpu/nvgpu/gv100/mc_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/mc_gv100.c
@@ -41,7 +41,6 @@ void mc_gv100_intr_enable(struct gk20a *g)
41 0xffffffffU); 41 0xffffffffU);
42 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), 42 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
43 0xffffffffU); 43 0xffffffffU);
44 g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
45 44
46 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] = 45 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
47 mc_intr_pfifo_pending_f() | 46 mc_intr_pfifo_pending_f() |
@@ -56,9 +55,6 @@ void mc_gv100_intr_enable(struct gk20a *g)
56 mc_intr_pfifo_pending_f() 55 mc_intr_pfifo_pending_f()
57 | eng_intr_mask; 56 | eng_intr_mask;
58 57
59 /* TODO: Enable PRI faults for HUB ECC err intr */
60 g->ops.fb.enable_hub_intr(g, STALL_REG_INDEX, g->mm.hub_intr_types);
61
62 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING), 58 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
63 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]); 59 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
64 60
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
index d6e621eb..2c2c4898 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -32,6 +32,7 @@
32 32
33#include "gk20a/gk20a.h" 33#include "gk20a/gk20a.h"
34#include "gk20a/mm_gk20a.h" 34#include "gk20a/mm_gk20a.h"
35#include "gk20a/fb_gk20a.h"
35 36
36#include "gp10b/fb_gp10b.h" 37#include "gp10b/fb_gp10b.h"
37 38
@@ -58,6 +59,13 @@ static void gv11b_init_nvlink_soc_credits(struct gk20a *g)
58 } 59 }
59} 60}
60 61
62void gv11b_fb_init_hw(struct gk20a *g)
63{
64 gk20a_fb_init_hw(g);
65
66 g->ops.fb.enable_hub_intr(g);
67}
68
61void gv11b_fb_init_fs_state(struct gk20a *g) 69void gv11b_fb_init_fs_state(struct gk20a *g)
62{ 70{
63 nvgpu_log(g, gpu_dbg_fn, "initialize gv11b fb"); 71 nvgpu_log(g, gpu_dbg_fn, "initialize gv11b fb");
@@ -374,118 +382,34 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
374 gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED); 382 gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED);
375} 383}
376 384
377static void gv11b_fb_intr_en_set(struct gk20a *g, 385void gv11b_fb_enable_hub_intr(struct gk20a *g)
378 unsigned int index, u32 mask)
379{
380 u32 reg_val;
381
382 reg_val = gk20a_readl(g, fb_niso_intr_en_set_r(index));
383 reg_val |= mask;
384 gk20a_writel(g, fb_niso_intr_en_set_r(index), reg_val);
385}
386
387static void gv11b_fb_intr_en_clr(struct gk20a *g,
388 unsigned int index, u32 mask)
389{
390 u32 reg_val;
391
392 reg_val = gk20a_readl(g, fb_niso_intr_en_clr_r(index));
393 reg_val |= mask;
394 gk20a_writel(g, fb_niso_intr_en_clr_r(index), reg_val);
395}
396
397static u32 gv11b_fb_get_hub_intr_clr_mask(struct gk20a *g,
398 unsigned int intr_type)
399{ 386{
400 u32 mask = 0; 387 u32 mask = 0;
401 388
402 if (intr_type & HUB_INTR_TYPE_OTHER) { 389 mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
403 mask |= 390 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
404 fb_niso_intr_en_clr_mmu_other_fault_notify_m(); 391 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
405 } 392 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
406 393 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m() |
407 if (intr_type & HUB_INTR_TYPE_NONREPLAY) { 394 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
408 mask |=
409 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m() |
410 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m();
411 }
412
413 if (intr_type & HUB_INTR_TYPE_REPLAY) {
414 mask |=
415 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m() |
416 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m();
417 }
418
419 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
420 mask |=
421 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m();
422 }
423
424 if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
425 mask |=
426 fb_niso_intr_en_clr_hub_access_counter_notify_m() |
427 fb_niso_intr_en_clr_hub_access_counter_error_m();
428 }
429 395
430 return mask; 396 gk20a_writel(g, fb_niso_intr_en_set_r(0),
397 mask);
431} 398}
432 399
433static u32 gv11b_fb_get_hub_intr_en_mask(struct gk20a *g, 400void gv11b_fb_disable_hub_intr(struct gk20a *g)
434 unsigned int intr_type)
435{ 401{
436 u32 mask = 0; 402 u32 mask = 0;
437 403
438 if (intr_type & HUB_INTR_TYPE_OTHER) { 404 mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
439 mask |= 405 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
440 fb_niso_intr_en_set_mmu_other_fault_notify_m(); 406 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
441 } 407 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
408 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m() |
409 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
442 410
443 if (intr_type & HUB_INTR_TYPE_NONREPLAY) { 411 gk20a_writel(g, fb_niso_intr_en_clr_r(0),
444 mask |= 412 mask);
445 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
446 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m();
447 }
448
449 if (intr_type & HUB_INTR_TYPE_REPLAY) {
450 mask |=
451 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
452 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
453 }
454
455 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
456 mask |=
457 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
458 }
459
460 if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
461 mask |=
462 fb_niso_intr_en_set_hub_access_counter_notify_m() |
463 fb_niso_intr_en_set_hub_access_counter_error_m();
464 }
465
466 return mask;
467}
468
469void gv11b_fb_enable_hub_intr(struct gk20a *g,
470 unsigned int index, unsigned int intr_type)
471{
472 u32 mask = 0;
473
474 mask = gv11b_fb_get_hub_intr_en_mask(g, intr_type);
475
476 if (mask)
477 gv11b_fb_intr_en_set(g, index, mask);
478}
479
480void gv11b_fb_disable_hub_intr(struct gk20a *g,
481 unsigned int index, unsigned int intr_type)
482{
483 u32 mask = 0;
484
485 mask = gv11b_fb_get_hub_intr_clr_mask(g, intr_type);
486
487 if (mask)
488 gv11b_fb_intr_en_clr(g, index, mask);
489} 413}
490 414
491void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status) 415void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
@@ -1226,10 +1150,6 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
1226static void gv11b_fb_handle_bar2_fault(struct gk20a *g, 1150static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
1227 struct mmu_fault_info *mmfault, u32 fault_status) 1151 struct mmu_fault_info *mmfault, u32 fault_status)
1228{ 1152{
1229 g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX,
1230 HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
1231
1232
1233 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) { 1153 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
1234 if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) 1154 if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))
1235 gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX); 1155 gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX);
@@ -1247,8 +1167,6 @@ static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
1247 gk20a_channel_put(mmfault->refch); 1167 gk20a_channel_put(mmfault->refch);
1248 mmfault->refch = NULL; 1168 mmfault->refch = NULL;
1249 } 1169 }
1250 g->ops.fb.enable_hub_intr(g, STALL_REG_INDEX,
1251 HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
1252} 1170}
1253 1171
1254void gv11b_fb_handle_other_fault_notify(struct gk20a *g, 1172void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
@@ -1395,10 +1313,6 @@ void gv11b_fb_hub_isr(struct gk20a *g)
1395 1313
1396 nvgpu_info(g, "ecc uncorrected error notify"); 1314 nvgpu_info(g, "ecc uncorrected error notify");
1397 1315
1398 /* disable interrupts during handling */
1399 g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX,
1400 HUB_INTR_TYPE_ECC_UNCORRECTED);
1401
1402 status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r()); 1316 status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r());
1403 if (status) 1317 if (status)
1404 gv11b_handle_l2tlb_ecc_isr(g, status); 1318 gv11b_handle_l2tlb_ecc_isr(g, status);
@@ -1410,11 +1324,6 @@ void gv11b_fb_hub_isr(struct gk20a *g)
1410 status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r()); 1324 status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r());
1411 if (status) 1325 if (status)
1412 gv11b_handle_fillunit_ecc_isr(g, status); 1326 gv11b_handle_fillunit_ecc_isr(g, status);
1413
1414 /* re-enable interrupts after handling */
1415 g->ops.fb.enable_hub_intr(g, STALL_REG_INDEX,
1416 HUB_INTR_TYPE_ECC_UNCORRECTED);
1417
1418 } 1327 }
1419 if (niso_intr & 1328 if (niso_intr &
1420 (fb_niso_intr_mmu_other_fault_notify_m() | 1329 (fb_niso_intr_mmu_other_fault_notify_m() |
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
index 0431568d..34988bd2 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
@@ -25,9 +25,6 @@
25#ifndef _NVGPU_GV11B_FB 25#ifndef _NVGPU_GV11B_FB
26#define _NVGPU_GV11B_FB 26#define _NVGPU_GV11B_FB
27 27
28#define STALL_REG_INDEX 0
29#define NONSTALL_REG_INDEX 1
30
31#define NONREPLAY_REG_INDEX 0 28#define NONREPLAY_REG_INDEX 0
32#define REPLAY_REG_INDEX 1 29#define REPLAY_REG_INDEX 1
33 30
@@ -37,22 +34,13 @@
37#define FAULT_BUF_INVALID 0 34#define FAULT_BUF_INVALID 0
38#define FAULT_BUF_VALID 1 35#define FAULT_BUF_VALID 1
39 36
40#define HUB_INTR_TYPE_OTHER 1U /* bit 0 */
41#define HUB_INTR_TYPE_NONREPLAY 2U /* bit 1 */
42#define HUB_INTR_TYPE_REPLAY 4U /* bit 2 */
43#define HUB_INTR_TYPE_ECC_UNCORRECTED 8U /* bit 3 */
44#define HUB_INTR_TYPE_ACCESS_COUNTER 16U /* bit 4 */
45#define HUB_INTR_TYPE_ALL (HUB_INTR_TYPE_OTHER | \
46 HUB_INTR_TYPE_NONREPLAY | \
47 HUB_INTR_TYPE_REPLAY | \
48 HUB_INTR_TYPE_ECC_UNCORRECTED | \
49 HUB_INTR_TYPE_ACCESS_COUNTER)
50
51#define FAULT_TYPE_OTHER_AND_NONREPLAY 0 37#define FAULT_TYPE_OTHER_AND_NONREPLAY 0
52#define FAULT_TYPE_REPLAY 1 38#define FAULT_TYPE_REPLAY 1
53 39
54struct gk20a; 40struct gk20a;
55 41
42void gv11b_fb_init_hw(struct gk20a *g);
43
56void gv11b_fb_init_fs_state(struct gk20a *g); 44void gv11b_fb_init_fs_state(struct gk20a *g);
57void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr); 45void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr);
58void gv11b_fb_reset(struct gk20a *g); 46void gv11b_fb_reset(struct gk20a *g);
@@ -63,10 +51,8 @@ u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
63void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g, 51void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
64 unsigned int index, unsigned int state); 52 unsigned int index, unsigned int state);
65void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index); 53void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index);
66void gv11b_fb_enable_hub_intr(struct gk20a *g, 54void gv11b_fb_enable_hub_intr(struct gk20a *g);
67 unsigned int index, unsigned int intr_type); 55void gv11b_fb_disable_hub_intr(struct gk20a *g);
68void gv11b_fb_disable_hub_intr(struct gk20a *g,
69 unsigned int index, unsigned int intr_type);
70bool gv11b_fb_mmu_fault_pending(struct gk20a *g); 56bool gv11b_fb_mmu_fault_pending(struct gk20a *g);
71void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status); 57void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status);
72void gv11b_fb_handle_other_fault_notify(struct gk20a *g, 58void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
index 6132a2dd..270d4dd4 100644
--- a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
@@ -423,7 +423,7 @@ static const struct gpu_ops gv11b_ops = {
423 }, 423 },
424 .fb = { 424 .fb = {
425 .reset = gv11b_fb_reset, 425 .reset = gv11b_fb_reset,
426 .init_hw = gk20a_fb_init_hw, 426 .init_hw = gv11b_fb_init_hw,
427 .init_fs_state = gv11b_fb_init_fs_state, 427 .init_fs_state = gv11b_fb_init_fs_state,
428 .init_cbc = gv11b_fb_init_cbc, 428 .init_cbc = gv11b_fb_init_cbc,
429 .set_mmu_page_size = gm20b_fb_set_mmu_page_size, 429 .set_mmu_page_size = gm20b_fb_set_mmu_page_size,
diff --git a/drivers/gpu/nvgpu/gv11b/mc_gv11b.c b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
index 31600828..b7a3ce62 100644
--- a/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
@@ -41,7 +41,6 @@ void mc_gv11b_intr_enable(struct gk20a *g)
41 0xffffffffU); 41 0xffffffffU);
42 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), 42 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
43 0xffffffffU); 43 0xffffffffU);
44 g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
45 44
46 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] = 45 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] =
47 mc_intr_pfifo_pending_f() | 46 mc_intr_pfifo_pending_f() |
@@ -55,9 +54,6 @@ void mc_gv11b_intr_enable(struct gk20a *g)
55 mc_intr_pfifo_pending_f() 54 mc_intr_pfifo_pending_f()
56 | eng_intr_mask; 55 | eng_intr_mask;
57 56
58 /* TODO: Enable PRI faults for HUB ECC err intr */
59 g->ops.fb.enable_hub_intr(g, STALL_REG_INDEX, g->mm.hub_intr_types);
60
61 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING), 57 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
62 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]); 58 g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]);
63 59
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
index 5dd43c34..394ff0ed 100644
--- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
@@ -77,11 +77,7 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
77 77
78 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex); 78 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
79 79
80 g->ops.fb.disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER | 80 g->ops.fb.disable_hub_intr(g);
81 HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
82
83 g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY |
84 HUB_INTR_TYPE_REPLAY));
85 81
86 if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) { 82 if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
87 gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX, 83 gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
@@ -105,15 +101,12 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
105 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); 101 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
106} 102}
107 103
108static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g, 104static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g)
109 u32 *hub_intr_types)
110{ 105{
111 *hub_intr_types |= HUB_INTR_TYPE_OTHER;
112 return 0; 106 return 0;
113} 107}
114 108
115static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g, 109static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
116 u32 *hub_intr_types)
117{ 110{
118 struct vm_gk20a *vm = g->mm.bar2.vm; 111 struct vm_gk20a *vm = g->mm.bar2.vm;
119 int err = 0; 112 int err = 0;
@@ -136,8 +129,6 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
136 } 129 }
137 } 130 }
138 131
139 *hub_intr_types |= HUB_INTR_TYPE_NONREPLAY;
140
141 if (!nvgpu_mem_is_valid( 132 if (!nvgpu_mem_is_valid(
142 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) { 133 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) {
143 err = nvgpu_dma_alloc_map_sys(vm, fb_size, 134 err = nvgpu_dma_alloc_map_sys(vm, fb_size,
@@ -149,8 +140,6 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
149 return; 140 return;
150 } 141 }
151 } 142 }
152
153 *hub_intr_types |= HUB_INTR_TYPE_REPLAY;
154} 143}
155 144
156static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g) 145static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
@@ -170,12 +159,10 @@ static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
170 159
171 nvgpu_mutex_init(&g->mm.hub_isr_mutex); 160 nvgpu_mutex_init(&g->mm.hub_isr_mutex);
172 161
173 g->mm.hub_intr_types = HUB_INTR_TYPE_ECC_UNCORRECTED; 162 err = gv11b_mm_mmu_fault_info_buf_init(g);
174
175 err = gv11b_mm_mmu_fault_info_buf_init(g, &g->mm.hub_intr_types);
176 163
177 if (!err) 164 if (!err)
178 gv11b_mm_mmu_hw_fault_buf_init(g, &g->mm.hub_intr_types); 165 gv11b_mm_mmu_hw_fault_buf_init(g);
179 166
180 return err; 167 return err;
181} 168}
diff --git a/drivers/gpu/nvgpu/include/nvgpu/mm.h b/drivers/gpu/nvgpu/include/nvgpu/mm.h
index ace22742..b06d0361 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/mm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/mm.h
@@ -131,7 +131,6 @@ struct mm_gk20a {
131 struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM]; 131 struct nvgpu_mem hw_fault_buf[FAULT_TYPE_NUM];
132 struct mmu_fault_info fault_info[FAULT_TYPE_NUM]; 132 struct mmu_fault_info fault_info[FAULT_TYPE_NUM];
133 struct nvgpu_mutex hub_isr_mutex; 133 struct nvgpu_mutex hub_isr_mutex;
134 u32 hub_intr_types;
135 134
136 /* 135 /*
137 * Separate function to cleanup the CE since it requires a channel to 136 * Separate function to cleanup the CE since it requires a channel to