summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Nieto <dmartineznie@nvidia.com>2017-12-05 18:20:18 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-12-11 19:42:01 -0500
commit258ae4471296bcee03987778e3b7c79d3a027e53 (patch)
treea4890fa3a54b1857ba5c6ff3d770f84733b95154
parentba69628aafefcf4567f2f3b1459ccc4ebd8e203f (diff)
gpu: nvgpu: gv11b: PMU parity HWW ECC support
Adding support for ISR handling of ECC parity errors for PMU unit and setting the initial IRQDST mask to deliver ECC interrupts to host in the non-stall PMU irq path JIRA: GPUT19X-83 Change-Id: I8efae6777811893ecce79d0e32ba81b62c27b1ef Signed-off-by: David Nieto <dmartineznie@nvidia.com> Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1611625 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/linux/platform_gv11b_tegra.c28
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h1
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c52
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gm20b/hal_gm20b.c1
-rw-r--r--drivers/gpu/nvgpu/gp106/hal_gp106.c1
-rw-r--r--drivers/gpu/nvgpu/gp10b/hal_gp10b.c1
-rw-r--r--drivers/gpu/nvgpu/gv100/hal_gv100.c1
-rw-r--r--drivers/gpu/nvgpu/gv11b/ecc_gv11b.h2
-rw-r--r--drivers/gpu/nvgpu/gv11b/hal_gv11b.c2
-rw-r--r--drivers/gpu/nvgpu/gv11b/pmu_gv11b.c117
-rw-r--r--drivers/gpu/nvgpu/gv11b/pmu_gv11b.h2
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h208
13 files changed, 397 insertions, 21 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/platform_gv11b_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gv11b_tegra.c
index 3c6eac77..a2a60aaa 100644
--- a/drivers/gpu/nvgpu/common/linux/platform_gv11b_tegra.c
+++ b/drivers/gpu/nvgpu/common/linux/platform_gv11b_tegra.c
@@ -243,6 +243,9 @@ static struct device_attribute *dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_ar
243static struct device_attribute *dev_attr_mmu_fillunit_ecc_corrected_err_count_array; 243static struct device_attribute *dev_attr_mmu_fillunit_ecc_corrected_err_count_array;
244static struct device_attribute *dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array; 244static struct device_attribute *dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array;
245 245
246static struct device_attribute *dev_attr_pmu_ecc_corrected_err_count_array;
247static struct device_attribute *dev_attr_pmu_ecc_uncorrected_err_count_array;
248
246void gr_gv11b_create_sysfs(struct gk20a *g) 249void gr_gv11b_create_sysfs(struct gk20a *g)
247{ 250{
248 struct device *dev = dev_from_gk20a(g); 251 struct device *dev = dev_from_gk20a(g);
@@ -414,6 +417,21 @@ void gr_gv11b_create_sysfs(struct gk20a *g)
414 &g->ecc.eng.t19x.mmu_fillunit_corrected_err_count, 417 &g->ecc.eng.t19x.mmu_fillunit_corrected_err_count,
415 &dev_attr_mmu_fillunit_ecc_corrected_err_count_array); 418 &dev_attr_mmu_fillunit_ecc_corrected_err_count_array);
416 419
420 error |= gp10b_ecc_stat_create(dev,
421 1,
422 "eng",
423 "pmu_ecc_uncorrected_err_count",
424 &g->ecc.eng.t19x.pmu_uncorrected_err_count,
425 &dev_attr_pmu_ecc_uncorrected_err_count_array);
426
427 error |= gp10b_ecc_stat_create(dev,
428 1,
429 "eng",
430 "pmu_ecc_corrected_err_count",
431 &g->ecc.eng.t19x.pmu_corrected_err_count,
432 &dev_attr_pmu_ecc_corrected_err_count_array);
433
434
417 if (error) 435 if (error)
418 dev_err(dev, "Failed to create gv11b sysfs attributes!\n"); 436 dev_err(dev, "Failed to create gv11b sysfs attributes!\n");
419} 437}
@@ -541,4 +559,14 @@ static void gr_gv11b_remove_sysfs(struct device *dev)
541 1, 559 1,
542 &g->ecc.eng.t19x.mmu_fillunit_corrected_err_count, 560 &g->ecc.eng.t19x.mmu_fillunit_corrected_err_count,
543 dev_attr_mmu_fillunit_ecc_corrected_err_count_array); 561 dev_attr_mmu_fillunit_ecc_corrected_err_count_array);
562
563 gp10b_ecc_stat_remove(dev,
564 1,
565 &g->ecc.eng.t19x.pmu_uncorrected_err_count,
566 dev_attr_pmu_ecc_uncorrected_err_count_array);
567
568 gp10b_ecc_stat_remove(dev,
569 1,
570 &g->ecc.eng.t19x.pmu_corrected_err_count,
571 dev_attr_pmu_ecc_corrected_err_count_array);
544} 572}
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index f577f93c..2ae1b758 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -888,6 +888,7 @@ struct gpu_ops {
888 void (*set_irqmask)(struct gk20a *g); 888 void (*set_irqmask)(struct gk20a *g);
889 void (*update_lspmu_cmdline_args)(struct gk20a *g); 889 void (*update_lspmu_cmdline_args)(struct gk20a *g);
890 void (*setup_apertures)(struct gk20a *g); 890 void (*setup_apertures)(struct gk20a *g);
891 u32 (*get_irqdest)(struct gk20a *g);
891 } pmu; 892 } pmu;
892 struct { 893 struct {
893 int (*init_debugfs)(struct gk20a *g); 894 int (*init_debugfs)(struct gk20a *g);
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index f9f85219..9c2f72fb 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -110,6 +110,33 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu)
110 nvgpu_kfree(g, tracebuffer); 110 nvgpu_kfree(g, tracebuffer);
111} 111}
112 112
113u32 gk20a_pmu_get_irqdest(struct gk20a *g)
114{
115 u32 intr_dest;
116
117 /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
118 intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) |
119 pwr_falcon_irqdest_host_wdtmr_f(1) |
120 pwr_falcon_irqdest_host_mthd_f(0) |
121 pwr_falcon_irqdest_host_ctxsw_f(0) |
122 pwr_falcon_irqdest_host_halt_f(1) |
123 pwr_falcon_irqdest_host_exterr_f(0) |
124 pwr_falcon_irqdest_host_swgen0_f(1) |
125 pwr_falcon_irqdest_host_swgen1_f(0) |
126 pwr_falcon_irqdest_host_ext_f(0xff) |
127 pwr_falcon_irqdest_target_gptmr_f(1) |
128 pwr_falcon_irqdest_target_wdtmr_f(0) |
129 pwr_falcon_irqdest_target_mthd_f(0) |
130 pwr_falcon_irqdest_target_ctxsw_f(0) |
131 pwr_falcon_irqdest_target_halt_f(0) |
132 pwr_falcon_irqdest_target_exterr_f(0) |
133 pwr_falcon_irqdest_target_swgen0_f(0) |
134 pwr_falcon_irqdest_target_swgen1_f(0) |
135 pwr_falcon_irqdest_target_ext_f(0xff);
136
137 return intr_dest;
138}
139
113void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) 140void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
114{ 141{
115 struct gk20a *g = gk20a_from_pmu(pmu); 142 struct gk20a *g = gk20a_from_pmu(pmu);
@@ -126,26 +153,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable)
126 nvgpu_flcn_set_irq(pmu->flcn, false, 0x0, 0x0); 153 nvgpu_flcn_set_irq(pmu->flcn, false, 0x0, 0x0);
127 154
128 if (enable) { 155 if (enable) {
129 /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */ 156 intr_dest = g->ops.pmu.get_irqdest(g);
130 intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) |
131 pwr_falcon_irqdest_host_wdtmr_f(1) |
132 pwr_falcon_irqdest_host_mthd_f(0) |
133 pwr_falcon_irqdest_host_ctxsw_f(0) |
134 pwr_falcon_irqdest_host_halt_f(1) |
135 pwr_falcon_irqdest_host_exterr_f(0) |
136 pwr_falcon_irqdest_host_swgen0_f(1) |
137 pwr_falcon_irqdest_host_swgen1_f(0) |
138 pwr_falcon_irqdest_host_ext_f(0xff) |
139 pwr_falcon_irqdest_target_gptmr_f(1) |
140 pwr_falcon_irqdest_target_wdtmr_f(0) |
141 pwr_falcon_irqdest_target_mthd_f(0) |
142 pwr_falcon_irqdest_target_ctxsw_f(0) |
143 pwr_falcon_irqdest_target_halt_f(0) |
144 pwr_falcon_irqdest_target_exterr_f(0) |
145 pwr_falcon_irqdest_target_swgen0_f(0) |
146 pwr_falcon_irqdest_target_swgen1_f(0) |
147 pwr_falcon_irqdest_target_ext_f(0xff);
148
149 /* 0=disable, 1=enable */ 157 /* 0=disable, 1=enable */
150 intr_mask = pwr_falcon_irqmset_gptmr_f(1) | 158 intr_mask = pwr_falcon_irqmset_gptmr_f(1) |
151 pwr_falcon_irqmset_wdtmr_f(1) | 159 pwr_falcon_irqmset_wdtmr_f(1) |
@@ -729,6 +737,10 @@ void gk20a_pmu_isr(struct gk20a *g)
729 gk20a_readl(g, pwr_falcon_exterrstat_r()) & 737 gk20a_readl(g, pwr_falcon_exterrstat_r()) &
730 ~pwr_falcon_exterrstat_valid_m()); 738 ~pwr_falcon_exterrstat_valid_m());
731 } 739 }
740
741 if (g->ops.pmu.handle_ext_irq)
742 g->ops.pmu.handle_ext_irq(g, intr);
743
732 if (intr & pwr_falcon_irqstat_swgen0_true_f()) { 744 if (intr & pwr_falcon_irqstat_swgen0_true_f()) {
733 nvgpu_pmu_process_message(pmu); 745 nvgpu_pmu_process_message(pmu);
734 recheck = true; 746 recheck = true;
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index f1d9498c..5bd43510 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -76,5 +76,5 @@ void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
76 struct pmu_pg_stats_data *pg_stat_data); 76 struct pmu_pg_stats_data *pg_stat_data);
77bool gk20a_pmu_is_engine_in_reset(struct gk20a *g); 77bool gk20a_pmu_is_engine_in_reset(struct gk20a *g);
78int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset); 78int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset);
79 79u32 gk20a_pmu_get_irqdest(struct gk20a *g);
80#endif /*__PMU_GK20A_H__*/ 80#endif /*__PMU_GK20A_H__*/
diff --git a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
index 3ef11d11..d89ff2fe 100644
--- a/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
+++ b/drivers/gpu/nvgpu/gm20b/hal_gm20b.c
@@ -495,6 +495,7 @@ static const struct gpu_ops gm20b_ops = {
495 .dump_secure_fuses = pmu_dump_security_fuses_gm20b, 495 .dump_secure_fuses = pmu_dump_security_fuses_gm20b,
496 .reset_engine = gk20a_pmu_engine_reset, 496 .reset_engine = gk20a_pmu_engine_reset,
497 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset, 497 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
498 .get_irqdest = gk20a_pmu_get_irqdest,
498 }, 499 },
499 .clk = { 500 .clk = {
500 .init_clk_support = gm20b_init_clk_support, 501 .init_clk_support = gm20b_init_clk_support,
diff --git a/drivers/gpu/nvgpu/gp106/hal_gp106.c b/drivers/gpu/nvgpu/gp106/hal_gp106.c
index a3a58903..2150b1ec 100644
--- a/drivers/gpu/nvgpu/gp106/hal_gp106.c
+++ b/drivers/gpu/nvgpu/gp106/hal_gp106.c
@@ -598,6 +598,7 @@ static const struct gpu_ops gp106_ops = {
598 .is_engine_in_reset = gp106_pmu_is_engine_in_reset, 598 .is_engine_in_reset = gp106_pmu_is_engine_in_reset,
599 .pmu_get_queue_tail = pwr_pmu_queue_tail_r, 599 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
600 .pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg, 600 .pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg,
601 .get_irqdest = gk20a_pmu_get_irqdest,
601 }, 602 },
602 .clk = { 603 .clk = {
603 .init_clk_support = gp106_init_clk_support, 604 .init_clk_support = gp106_init_clk_support,
diff --git a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
index 0186ba00..06311347 100644
--- a/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/hal_gp10b.c
@@ -547,6 +547,7 @@ static const struct gpu_ops gp10b_ops = {
547 .dump_secure_fuses = pmu_dump_security_fuses_gp10b, 547 .dump_secure_fuses = pmu_dump_security_fuses_gp10b,
548 .reset_engine = gk20a_pmu_engine_reset, 548 .reset_engine = gk20a_pmu_engine_reset,
549 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset, 549 .is_engine_in_reset = gk20a_pmu_is_engine_in_reset,
550 .get_irqdest = gk20a_pmu_get_irqdest,
550 }, 551 },
551 .regops = { 552 .regops = {
552 .get_global_whitelist_ranges = 553 .get_global_whitelist_ranges =
diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c
index 36ac029b..648a3b39 100644
--- a/drivers/gpu/nvgpu/gv100/hal_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c
@@ -603,6 +603,7 @@ static const struct gpu_ops gv100_ops = {
603 .pmu_mutex_size = pwr_pmu_mutex__size_1_v, 603 .pmu_mutex_size = pwr_pmu_mutex__size_1_v,
604 .is_engine_in_reset = gp106_pmu_is_engine_in_reset, 604 .is_engine_in_reset = gp106_pmu_is_engine_in_reset,
605 .pmu_get_queue_tail = pwr_pmu_queue_tail_r, 605 .pmu_get_queue_tail = pwr_pmu_queue_tail_r,
606 .get_irqdest = gk20a_pmu_get_irqdest,
606 }, 607 },
607 .clk = { 608 .clk = {
608 .init_clk_support = gp106_init_clk_support, 609 .init_clk_support = gp106_init_clk_support,
diff --git a/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h b/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h
index 94b25c02..ebce46ce 100644
--- a/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h
@@ -59,6 +59,8 @@ struct ecc_eng_t19x {
59 struct gk20a_ecc_stat mmu_fillunit_corrected_err_count; 59 struct gk20a_ecc_stat mmu_fillunit_corrected_err_count;
60 struct gk20a_ecc_stat mmu_fillunit_uncorrected_err_count; 60 struct gk20a_ecc_stat mmu_fillunit_uncorrected_err_count;
61 /* PMU */ 61 /* PMU */
62 struct gk20a_ecc_stat pmu_corrected_err_count;
63 struct gk20a_ecc_stat pmu_uncorrected_err_count;
62}; 64};
63 65
64#endif 66#endif
diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
index 6a21eb2d..f6bdf6e5 100644
--- a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
@@ -611,6 +611,8 @@ static const struct gpu_ops gv11b_ops = {
611 .pmu_nsbootstrap = gv11b_pmu_bootstrap, 611 .pmu_nsbootstrap = gv11b_pmu_bootstrap,
612 .pmu_pg_set_sub_feature_mask = gv11b_pg_set_subfeature_mask, 612 .pmu_pg_set_sub_feature_mask = gv11b_pg_set_subfeature_mask,
613 .is_pmu_supported = gv11b_is_pmu_supported, 613 .is_pmu_supported = gv11b_is_pmu_supported,
614 .get_irqdest = gv11b_pmu_get_irqdest,
615 .handle_ext_irq = gv11b_pmu_handle_ext_irq,
614 }, 616 },
615 .regops = { 617 .regops = {
616 .get_global_whitelist_ranges = 618 .get_global_whitelist_ranges =
diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
index 4b244f5a..a972510f 100644
--- a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c
@@ -287,6 +287,123 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
287 return 0; 287 return 0;
288} 288}
289 289
290void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0)
291{
292 u32 intr1;
293 u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
294 u32 corrected_delta, uncorrected_delta;
295 u32 corrected_overflow, uncorrected_overflow;
296
297 /*
298 * handle the ECC interrupt
299 */
300 if (intr0 & pwr_falcon_irqstat_ext_ecc_parity_true_f()) {
301 intr1 = gk20a_readl(g, pwr_pmu_ecc_intr_status_r());
302 if (intr1 & (pwr_pmu_ecc_intr_status_corrected_m() |
303 pwr_pmu_ecc_intr_status_uncorrected_m())) {
304
305 ecc_status = gk20a_readl(g,
306 pwr_pmu_falcon_ecc_status_r());
307 ecc_addr = gk20a_readl(g,
308 pwr_pmu_falcon_ecc_address_r());
309 corrected_cnt = gk20a_readl(g,
310 pwr_pmu_falcon_ecc_corrected_err_count_r());
311 uncorrected_cnt = gk20a_readl(g,
312 pwr_pmu_falcon_ecc_uncorrected_err_count_r());
313
314 corrected_delta =
315 pwr_pmu_falcon_ecc_corrected_err_count_total_v(corrected_cnt);
316 uncorrected_delta =
317 pwr_pmu_falcon_ecc_uncorrected_err_count_total_v(uncorrected_cnt);
318 corrected_overflow = ecc_status &
319 pwr_pmu_falcon_ecc_status_corrected_err_total_counter_overflow_m();
320
321 uncorrected_overflow = ecc_status &
322 pwr_pmu_falcon_ecc_status_uncorrected_err_total_counter_overflow_m();
323 corrected_overflow = ecc_status &
324 pwr_pmu_falcon_ecc_status_corrected_err_total_counter_overflow_m();
325
326 /* clear the interrupt */
327 if ((intr1 & pwr_pmu_ecc_intr_status_corrected_m()) ||
328 corrected_overflow) {
329 gk20a_writel(g, pwr_pmu_falcon_ecc_corrected_err_count_r(), 0);
330 }
331 if ((intr1 & pwr_pmu_ecc_intr_status_uncorrected_m()) ||
332 uncorrected_overflow) {
333 gk20a_writel(g,
334 pwr_pmu_falcon_ecc_uncorrected_err_count_r(), 0);
335 }
336
337 gk20a_writel(g, pwr_pmu_falcon_ecc_status_r(),
338 pwr_pmu_falcon_ecc_status_reset_task_f());
339
340 /* update counters per slice */
341 if (corrected_overflow)
342 corrected_delta += (0x1UL << pwr_pmu_falcon_ecc_corrected_err_count_total_s());
343 if (uncorrected_overflow)
344 uncorrected_delta += (0x1UL << pwr_pmu_falcon_ecc_uncorrected_err_count_total_s());
345
346 g->ecc.eng.t19x.pmu_corrected_err_count.counters[0] += corrected_delta;
347 g->ecc.eng.t19x.pmu_uncorrected_err_count.counters[0] += uncorrected_delta;
348
349 nvgpu_log(g, gpu_dbg_intr,
350 "pmu ecc interrupt intr1: 0x%x", intr1);
351
352 if (ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_imem_m())
353 nvgpu_log(g, gpu_dbg_intr,
354 "imem ecc error corrected");
355 if (ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_imem_m())
356 nvgpu_log(g, gpu_dbg_intr,
357 "imem ecc error uncorrected");
358 if (ecc_status & pwr_pmu_falcon_ecc_status_corrected_err_dmem_m())
359 nvgpu_log(g, gpu_dbg_intr,
360 "dmem ecc error corrected");
361 if (ecc_status & pwr_pmu_falcon_ecc_status_uncorrected_err_dmem_m())
362 nvgpu_log(g, gpu_dbg_intr,
363 "dmem ecc error uncorrected");
364
365 if (corrected_overflow || uncorrected_overflow)
366 nvgpu_info(g, "ecc counter overflow!");
367
368 nvgpu_log(g, gpu_dbg_intr,
369 "ecc error row address: 0x%x",
370 pwr_pmu_falcon_ecc_address_row_address_v(ecc_addr));
371
372 nvgpu_log(g, gpu_dbg_intr,
373 "ecc error count corrected: %d, uncorrected %d",
374 g->ecc.eng.t19x.pmu_corrected_err_count.counters[0],
375 g->ecc.eng.t19x.pmu_uncorrected_err_count.counters[0]);
376 }
377 }
378}
379
380u32 gv11b_pmu_get_irqdest(struct gk20a *g)
381{
382 u32 intr_dest;
383
384 /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */
385 intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) |
386 pwr_falcon_irqdest_host_wdtmr_f(1) |
387 pwr_falcon_irqdest_host_mthd_f(0) |
388 pwr_falcon_irqdest_host_ctxsw_f(0) |
389 pwr_falcon_irqdest_host_halt_f(1) |
390 pwr_falcon_irqdest_host_exterr_f(0) |
391 pwr_falcon_irqdest_host_swgen0_f(1) |
392 pwr_falcon_irqdest_host_swgen1_f(0) |
393 pwr_falcon_irqdest_host_ext_ecc_parity_f(1) |
394 pwr_falcon_irqdest_target_gptmr_f(1) |
395 pwr_falcon_irqdest_target_wdtmr_f(0) |
396 pwr_falcon_irqdest_target_mthd_f(0) |
397 pwr_falcon_irqdest_target_ctxsw_f(0) |
398 pwr_falcon_irqdest_target_halt_f(0) |
399 pwr_falcon_irqdest_target_exterr_f(0) |
400 pwr_falcon_irqdest_target_swgen0_f(0) |
401 pwr_falcon_irqdest_target_swgen1_f(0) |
402 pwr_falcon_irqdest_target_ext_ecc_parity_f(0);
403
404 return intr_dest;
405}
406
290static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg, 407static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg,
291 void *param, u32 handle, u32 status) 408 void *param, u32 handle, u32 status)
292{ 409{
diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h
index e917188d..dd6db10c 100644
--- a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h
@@ -35,4 +35,6 @@ bool gv11b_is_lazy_bootstrap(u32 falcon_id);
35bool gv11b_is_priv_load(u32 falcon_id); 35bool gv11b_is_priv_load(u32 falcon_id);
36int gv11b_pmu_setup_elpg(struct gk20a *g); 36int gv11b_pmu_setup_elpg(struct gk20a *g);
37 37
38u32 gv11b_pmu_get_irqdest(struct gk20a *g);
39void gv11b_pmu_handle_ext_irq(struct gk20a *g, u32 intr0);
38#endif /*__PMU_GV11B_H_*/ 40#endif /*__PMU_GV11B_H_*/
diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h
index eba6d806..c16d44f1 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h
@@ -88,6 +88,30 @@ static inline u32 pwr_falcon_irqstat_ext_second_true_f(void)
88{ 88{
89 return 0x800U; 89 return 0x800U;
90} 90}
91static inline u32 pwr_falcon_irqstat_ext_ecc_parity_true_f(void)
92{
93 return 0x400U;
94}
95static inline u32 pwr_pmu_ecc_intr_status_r(void)
96{
97 return 0x0010abfcU;
98}
99static inline u32 pwr_pmu_ecc_intr_status_corrected_f(u32 v)
100{
101 return (v & 0x1U) << 0U;
102}
103static inline u32 pwr_pmu_ecc_intr_status_corrected_m(void)
104{
105 return 0x1U << 0U;
106}
107static inline u32 pwr_pmu_ecc_intr_status_uncorrected_f(u32 v)
108{
109 return (v & 0x1U) << 1U;
110}
111static inline u32 pwr_pmu_ecc_intr_status_uncorrected_m(void)
112{
113 return 0x1U << 1U;
114}
91static inline u32 pwr_falcon_irqmode_r(void) 115static inline u32 pwr_falcon_irqmode_r(void)
92{ 116{
93 return 0x0010a00cU; 117 return 0x0010a00cU;
@@ -160,6 +184,10 @@ static inline u32 pwr_falcon_irqmset_ext_rsvd8_f(u32 v)
160{ 184{
161 return (v & 0x1U) << 15U; 185 return (v & 0x1U) << 15U;
162} 186}
187static inline u32 pwr_falcon_irqmset_ext_ecc_parity_f(u32 v)
188{
189 return (v & 0x1U) << 10U;
190}
163static inline u32 pwr_falcon_irqmclr_r(void) 191static inline u32 pwr_falcon_irqmclr_r(void)
164{ 192{
165 return 0x0010a014U; 193 return 0x0010a014U;
@@ -228,6 +256,10 @@ static inline u32 pwr_falcon_irqmclr_ext_rsvd8_f(u32 v)
228{ 256{
229 return (v & 0x1U) << 15U; 257 return (v & 0x1U) << 15U;
230} 258}
259static inline u32 pwr_falcon_irqmclr_ext_ecc_parity_f(u32 v)
260{
261 return (v & 0x1U) << 10U;
262}
231static inline u32 pwr_falcon_irqmask_r(void) 263static inline u32 pwr_falcon_irqmask_r(void)
232{ 264{
233 return 0x0010a018U; 265 return 0x0010a018U;
@@ -300,6 +332,10 @@ static inline u32 pwr_falcon_irqdest_host_ext_rsvd8_f(u32 v)
300{ 332{
301 return (v & 0x1U) << 15U; 333 return (v & 0x1U) << 15U;
302} 334}
335static inline u32 pwr_falcon_irqdest_host_ext_ecc_parity_f(u32 v)
336{
337 return (v & 0x1U) << 10U;
338}
303static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v) 339static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v)
304{ 340{
305 return (v & 0x1U) << 16U; 341 return (v & 0x1U) << 16U;
@@ -364,6 +400,10 @@ static inline u32 pwr_falcon_irqdest_target_ext_rsvd8_f(u32 v)
364{ 400{
365 return (v & 0x1U) << 31U; 401 return (v & 0x1U) << 31U;
366} 402}
403static inline u32 pwr_falcon_irqdest_target_ext_ecc_parity_f(u32 v)
404{
405 return (v & 0x1U) << 26U;
406}
367static inline u32 pwr_falcon_curctx_r(void) 407static inline u32 pwr_falcon_curctx_r(void)
368{ 408{
369 return 0x0010a050U; 409 return 0x0010a050U;
@@ -908,6 +948,174 @@ static inline u32 pwr_pmu_pg_intren_r(u32 i)
908{ 948{
909 return 0x0010a760U + i*4U; 949 return 0x0010a760U + i*4U;
910} 950}
951static inline u32 pwr_pmu_falcon_ecc_status_r(void)
952{
953 return 0x0010a6b0U;
954}
955static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_imem_f(u32 v)
956{
957 return (v & 0x1U) << 0U;
958}
959static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_imem_m(void)
960{
961 return 0x1U << 0U;
962}
963static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_dmem_f(u32 v)
964{
965 return (v & 0x1U) << 1U;
966}
967static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_dmem_m(void)
968{
969 return 0x1U << 1U;
970}
971static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_imem_f(u32 v)
972{
973 return (v & 0x1U) << 8U;
974}
975static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_imem_m(void)
976{
977 return 0x1U << 8U;
978}
979static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_dmem_f(u32 v)
980{
981 return (v & 0x1U) << 9U;
982}
983static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_dmem_m(void)
984{
985 return 0x1U << 9U;
986}
987static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_total_counter_overflow_f(u32 v)
988{
989 return (v & 0x1U) << 16U;
990}
991static inline u32 pwr_pmu_falcon_ecc_status_corrected_err_total_counter_overflow_m(void)
992{
993 return 0x1U << 16U;
994}
995static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v)
996{
997 return (v & 0x1U) << 18U;
998}
999static inline u32 pwr_pmu_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(void)
1000{
1001 return 0x1U << 18U;
1002}
1003static inline u32 pwr_pmu_falcon_ecc_status_reset_f(u32 v)
1004{
1005 return (v & 0x1U) << 31U;
1006}
1007static inline u32 pwr_pmu_falcon_ecc_status_reset_task_f(void)
1008{
1009 return 0x80000000U;
1010}
1011static inline u32 pwr_pmu_falcon_ecc_address_r(void)
1012{
1013 return 0x0010a6b4U;
1014}
1015static inline u32 pwr_pmu_falcon_ecc_address_index_f(u32 v)
1016{
1017 return (v & 0xffffffU) << 0U;
1018}
1019static inline u32 pwr_pmu_falcon_ecc_address_type_f(u32 v)
1020{
1021 return (v & 0xfU) << 20U;
1022}
1023static inline u32 pwr_pmu_falcon_ecc_address_type_imem_f(void)
1024{
1025 return 0x0U;
1026}
1027static inline u32 pwr_pmu_falcon_ecc_address_type_dmem_f(void)
1028{
1029 return 0x100000U;
1030}
1031static inline u32 pwr_pmu_falcon_ecc_address_row_address_s(void)
1032{
1033 return 16U;
1034}
1035static inline u32 pwr_pmu_falcon_ecc_address_row_address_f(u32 v)
1036{
1037 return (v & 0xffffU) << 0U;
1038}
1039static inline u32 pwr_pmu_falcon_ecc_address_row_address_m(void)
1040{
1041 return 0xffffU << 0U;
1042}
1043static inline u32 pwr_pmu_falcon_ecc_address_row_address_v(u32 r)
1044{
1045 return (r >> 0U) & 0xffffU;
1046}
1047static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_r(void)
1048{
1049 return 0x0010a6b8U;
1050}
1051static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_total_s(void)
1052{
1053 return 16U;
1054}
1055static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_total_f(u32 v)
1056{
1057 return (v & 0xffffU) << 0U;
1058}
1059static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_total_m(void)
1060{
1061 return 0xffffU << 0U;
1062}
1063static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_total_v(u32 r)
1064{
1065 return (r >> 0U) & 0xffffU;
1066}
1067static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_unique_total_s(void)
1068{
1069 return 16U;
1070}
1071static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_unique_total_f(u32 v)
1072{
1073 return (v & 0xffffU) << 16U;
1074}
1075static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_unique_total_m(void)
1076{
1077 return 0xffffU << 16U;
1078}
1079static inline u32 pwr_pmu_falcon_ecc_corrected_err_count_unique_total_v(u32 r)
1080{
1081 return (r >> 16U) & 0xffffU;
1082}
1083static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_r(void)
1084{
1085 return 0x0010a6bcU;
1086}
1087static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_total_s(void)
1088{
1089 return 16U;
1090}
1091static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_total_f(u32 v)
1092{
1093 return (v & 0xffffU) << 0U;
1094}
1095static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_total_m(void)
1096{
1097 return 0xffffU << 0U;
1098}
1099static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_total_v(u32 r)
1100{
1101 return (r >> 0U) & 0xffffU;
1102}
1103static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_unique_total_s(void)
1104{
1105 return 16U;
1106}
1107static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_unique_total_f(u32 v)
1108{
1109 return (v & 0xffffU) << 16U;
1110}
1111static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_unique_total_m(void)
1112{
1113 return 0xffffU << 16U;
1114}
1115static inline u32 pwr_pmu_falcon_ecc_uncorrected_err_count_unique_total_v(u32 r)
1116{
1117 return (r >> 16U) & 0xffffU;
1118}
911static inline u32 pwr_fbif_transcfg_r(u32 i) 1119static inline u32 pwr_fbif_transcfg_r(u32 i)
912{ 1120{
913 return 0x0010ae00U + i*4U; 1121 return 0x0010ae00U + i*4U;