diff options
author | David Nieto <dmartineznie@nvidia.com> | 2017-12-05 18:20:18 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-12-11 19:42:01 -0500 |
commit | 258ae4471296bcee03987778e3b7c79d3a027e53 (patch) | |
tree | a4890fa3a54b1857ba5c6ff3d770f84733b95154 /drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |
parent | ba69628aafefcf4567f2f3b1459ccc4ebd8e203f (diff) |
gpu: nvgpu: gv11b: PMU parity HWW ECC support
Adding support for ISR handling of ECC parity errors for PMU unit and setting
the initial IRQDST mask to deliver ECC interrupts to host in the non-stall
PMU irq path
JIRA: GPUT19X-83
Change-Id: I8efae6777811893ecce79d0e32ba81b62c27b1ef
Signed-off-by: David Nieto <dmartineznie@nvidia.com>
Signed-off-by: Richard Zhao <rizhao@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1611625
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 52 |
1 files changed, 32 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index f9f85219..9c2f72fb 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -110,6 +110,33 @@ static void print_pmu_trace(struct nvgpu_pmu *pmu) | |||
110 | nvgpu_kfree(g, tracebuffer); | 110 | nvgpu_kfree(g, tracebuffer); |
111 | } | 111 | } |
112 | 112 | ||
113 | u32 gk20a_pmu_get_irqdest(struct gk20a *g) | ||
114 | { | ||
115 | u32 intr_dest; | ||
116 | |||
117 | /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */ | ||
118 | intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) | | ||
119 | pwr_falcon_irqdest_host_wdtmr_f(1) | | ||
120 | pwr_falcon_irqdest_host_mthd_f(0) | | ||
121 | pwr_falcon_irqdest_host_ctxsw_f(0) | | ||
122 | pwr_falcon_irqdest_host_halt_f(1) | | ||
123 | pwr_falcon_irqdest_host_exterr_f(0) | | ||
124 | pwr_falcon_irqdest_host_swgen0_f(1) | | ||
125 | pwr_falcon_irqdest_host_swgen1_f(0) | | ||
126 | pwr_falcon_irqdest_host_ext_f(0xff) | | ||
127 | pwr_falcon_irqdest_target_gptmr_f(1) | | ||
128 | pwr_falcon_irqdest_target_wdtmr_f(0) | | ||
129 | pwr_falcon_irqdest_target_mthd_f(0) | | ||
130 | pwr_falcon_irqdest_target_ctxsw_f(0) | | ||
131 | pwr_falcon_irqdest_target_halt_f(0) | | ||
132 | pwr_falcon_irqdest_target_exterr_f(0) | | ||
133 | pwr_falcon_irqdest_target_swgen0_f(0) | | ||
134 | pwr_falcon_irqdest_target_swgen1_f(0) | | ||
135 | pwr_falcon_irqdest_target_ext_f(0xff); | ||
136 | |||
137 | return intr_dest; | ||
138 | } | ||
139 | |||
113 | void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) | 140 | void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) |
114 | { | 141 | { |
115 | struct gk20a *g = gk20a_from_pmu(pmu); | 142 | struct gk20a *g = gk20a_from_pmu(pmu); |
@@ -126,26 +153,7 @@ void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) | |||
126 | nvgpu_flcn_set_irq(pmu->flcn, false, 0x0, 0x0); | 153 | nvgpu_flcn_set_irq(pmu->flcn, false, 0x0, 0x0); |
127 | 154 | ||
128 | if (enable) { | 155 | if (enable) { |
129 | /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */ | 156 | intr_dest = g->ops.pmu.get_irqdest(g); |
130 | intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) | | ||
131 | pwr_falcon_irqdest_host_wdtmr_f(1) | | ||
132 | pwr_falcon_irqdest_host_mthd_f(0) | | ||
133 | pwr_falcon_irqdest_host_ctxsw_f(0) | | ||
134 | pwr_falcon_irqdest_host_halt_f(1) | | ||
135 | pwr_falcon_irqdest_host_exterr_f(0) | | ||
136 | pwr_falcon_irqdest_host_swgen0_f(1) | | ||
137 | pwr_falcon_irqdest_host_swgen1_f(0) | | ||
138 | pwr_falcon_irqdest_host_ext_f(0xff) | | ||
139 | pwr_falcon_irqdest_target_gptmr_f(1) | | ||
140 | pwr_falcon_irqdest_target_wdtmr_f(0) | | ||
141 | pwr_falcon_irqdest_target_mthd_f(0) | | ||
142 | pwr_falcon_irqdest_target_ctxsw_f(0) | | ||
143 | pwr_falcon_irqdest_target_halt_f(0) | | ||
144 | pwr_falcon_irqdest_target_exterr_f(0) | | ||
145 | pwr_falcon_irqdest_target_swgen0_f(0) | | ||
146 | pwr_falcon_irqdest_target_swgen1_f(0) | | ||
147 | pwr_falcon_irqdest_target_ext_f(0xff); | ||
148 | |||
149 | /* 0=disable, 1=enable */ | 157 | /* 0=disable, 1=enable */ |
150 | intr_mask = pwr_falcon_irqmset_gptmr_f(1) | | 158 | intr_mask = pwr_falcon_irqmset_gptmr_f(1) | |
151 | pwr_falcon_irqmset_wdtmr_f(1) | | 159 | pwr_falcon_irqmset_wdtmr_f(1) | |
@@ -729,6 +737,10 @@ void gk20a_pmu_isr(struct gk20a *g) | |||
729 | gk20a_readl(g, pwr_falcon_exterrstat_r()) & | 737 | gk20a_readl(g, pwr_falcon_exterrstat_r()) & |
730 | ~pwr_falcon_exterrstat_valid_m()); | 738 | ~pwr_falcon_exterrstat_valid_m()); |
731 | } | 739 | } |
740 | |||
741 | if (g->ops.pmu.handle_ext_irq) | ||
742 | g->ops.pmu.handle_ext_irq(g, intr); | ||
743 | |||
732 | if (intr & pwr_falcon_irqstat_swgen0_true_f()) { | 744 | if (intr & pwr_falcon_irqstat_swgen0_true_f()) { |
733 | nvgpu_pmu_process_message(pmu); | 745 | nvgpu_pmu_process_message(pmu); |
734 | recheck = true; | 746 | recheck = true; |