summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/ltc_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/ltc_gv11b.c205
1 files changed, 205 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
new file mode 100644
index 00000000..a199e024
--- /dev/null
+++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c
@@ -0,0 +1,205 @@
1/*
2 * GV11B LTC
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "gk20a/gk20a.h"
26#include "gp10b/ltc_gp10b.h"
27
28#include "ltc_gv11b.h"
29
30#include <nvgpu/hw/gv11b/hw_ltc_gv11b.h>
31#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
32#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
33#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
34#include <nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h>
35
36/*
37 * Sets the ZBC stencil for the passed index.
38 */
39void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g,
40 struct zbc_entry *stencil_val,
41 u32 index)
42{
43 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
44
45 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
46 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
47
48 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(),
49 stencil_val->depth);
50
51 gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r());
52}
53
54void gv11b_ltc_init_fs_state(struct gk20a *g)
55{
56 u32 ltc_intr;
57 u32 reg;
58
59 gk20a_dbg_info("initialize gv11b l2");
60
61 g->ops.mc.reset(g, mc_enable_pfb_enabled_f() |
62 mc_enable_l2_enabled_f());
63
64 reg = gk20a_readl(g, mc_elpg_enable_r());
65 reg |= mc_elpg_enable_l2_enabled_f();
66 gk20a_writel(g, mc_elpg_enable_r(), reg);
67
68 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
69 g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r());
70 gk20a_dbg_info("%u ltcs out of %u", g->ltc_count, g->max_ltc_count);
71
72 gk20a_writel(g, ltc_ltcs_ltss_dstg_cfg0_r(),
73 gk20a_readl(g, ltc_ltc0_lts0_dstg_cfg0_r()) |
74 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m());
75
76 /* Disable LTC interrupts */
77 reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
78 reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m();
79 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m();
80 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg);
81
82 /* Enable ECC interrupts */
83 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
84 ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() |
85 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f();
86 gk20a_writel(g, ltc_ltcs_ltss_intr_r(),
87 ltc_intr);
88}
89
90void gv11b_ltc_isr(struct gk20a *g)
91{
92 u32 mc_intr, ltc_intr3;
93 unsigned int ltc, slice;
94 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
95 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
96 u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
97 u32 corrected_delta, uncorrected_delta;
98 u32 corrected_overflow, uncorrected_overflow;
99 u32 ltc_corrected, ltc_uncorrected;
100
101 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
102 for (ltc = 0; ltc < g->ltc_count; ltc++) {
103 if ((mc_intr & 1 << ltc) == 0)
104 continue;
105 ltc_corrected = ltc_uncorrected = 0;
106
107 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
108 u32 offset = ltc_stride * ltc + lts_stride * slice;
109 ltc_intr3 = gk20a_readl(g, ltc_ltc0_lts0_intr3_r() +
110 offset);
111
112 /* Detect and handle ECC PARITY errors */
113
114 if (ltc_intr3 &
115 (ltc_ltcs_ltss_intr3_ecc_uncorrected_m() |
116 ltc_ltcs_ltss_intr3_ecc_corrected_m())) {
117
118 ecc_status = gk20a_readl(g,
119 ltc_ltc0_lts0_l2_cache_ecc_status_r() +
120 offset);
121 ecc_addr = gk20a_readl(g,
122 ltc_ltc0_lts0_l2_cache_ecc_address_r() +
123 offset);
124 corrected_cnt = gk20a_readl(g,
125 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset);
126 uncorrected_cnt = gk20a_readl(g,
127 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset);
128
129 corrected_delta =
130 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(corrected_cnt);
131 uncorrected_delta =
132 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(uncorrected_cnt);
133 corrected_overflow = ecc_status &
134 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m();
135
136 uncorrected_overflow = ecc_status &
137 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m();
138
139 /* clear the interrupt */
140 if ((corrected_delta > 0) || corrected_overflow) {
141 gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0);
142 }
143 if ((uncorrected_delta > 0) || uncorrected_overflow) {
144 gk20a_writel(g,
145 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0);
146 }
147
148 gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset,
149 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f());
150
151 /* update counters per slice */
152 if (corrected_overflow)
153 corrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
154 if (uncorrected_overflow)
155 uncorrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
156
157 ltc_corrected += corrected_delta;
158 ltc_uncorrected += uncorrected_delta;
159 nvgpu_log(g, gpu_dbg_intr,
160 "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3);
161
162 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m())
163 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected");
164 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m())
165 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected");
166 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m())
167 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected");
168 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m())
169 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected");
170 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m())
171 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected");
172 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m())
173 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected");
174
175 if (corrected_overflow || uncorrected_overflow)
176 nvgpu_info(g, "ecc counter overflow!");
177
178 nvgpu_log(g, gpu_dbg_intr,
179 "ecc error address: 0x%x", ecc_addr);
180
181 }
182
183 }
184 g->ecc.ltc.t19x.l2_cache_corrected_err_count.counters[ltc] +=
185 ltc_corrected;
186 g->ecc.ltc.t19x.l2_cache_uncorrected_err_count.counters[ltc] +=
187 ltc_uncorrected;
188
189 }
190
191 /* fallback to other interrupts */
192 gp10b_ltc_isr(g);
193}
194
195u32 gv11b_ltc_cbc_fix_config(struct gk20a *g, int base)
196{
197 u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r());
198
199 if (ltc_ltcs_ltss_cbc_num_active_ltcs__v(val) == 2)
200 return base * 2;
201 else if (ltc_ltcs_ltss_cbc_num_active_ltcs__v(val) != 1) {
202 nvgpu_err(g, "Invalid number of active ltcs: %08x", val);
203 }
204 return base;
205}