summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c207
1 files changed, 207 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
new file mode 100644
index 00000000..98306079
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/ltc/ltc_gv11b.c
@@ -0,0 +1,207 @@
1/*
2 * GV11B LTC
3 *
4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/io.h>
26#include "gk20a/gk20a.h"
27
28#include "ltc_gp10b.h"
29#include "ltc_gv11b.h"
30
31#include <nvgpu/hw/gv11b/hw_ltc_gv11b.h>
32#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
33#include <nvgpu/hw/gv11b/hw_top_gv11b.h>
34#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
35
36#include <nvgpu/utils.h>
37
38/*
39 * Sets the ZBC stencil for the passed index.
40 */
41void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g,
42 struct zbc_entry *stencil_val,
43 u32 index)
44{
45 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
46
47 nvgpu_writel_check(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
48 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
49
50 nvgpu_writel_check(g,
51 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(),
52 stencil_val->depth);
53}
54
55void gv11b_ltc_init_fs_state(struct gk20a *g)
56{
57 struct gr_gk20a *gr = &g->gr;
58 u32 ltc_intr;
59 u32 reg;
60
61 nvgpu_log_info(g, "initialize gv11b l2");
62
63 g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r());
64 g->ltc_count = g->ops.priv_ring.enum_ltc(g);
65 nvgpu_log_info(g, "%u ltcs out of %u", g->ltc_count, g->max_ltc_count);
66
67 reg = gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r());
68 gr->slices_per_ltc = ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(reg);;
69 gr->cacheline_size =
70 512U << ltc_ltcs_ltss_cbc_param_cache_line_size_v(reg);
71
72 /* Disable LTC interrupts */
73 reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
74 reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m();
75 reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m();
76 nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(), reg);
77
78 if (g->ops.ltc.intr_en_illegal_compstat)
79 g->ops.ltc.intr_en_illegal_compstat(g,
80 g->ltc_intr_en_illegal_compstat);
81
82 /* Enable ECC interrupts */
83 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
84 ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() |
85 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f();
86 nvgpu_writel_check(g, ltc_ltcs_ltss_intr_r(),
87 ltc_intr);
88}
89
90void gv11b_ltc_intr_en_illegal_compstat(struct gk20a *g, bool enable)
91{
92 u32 val;
93
94 /* disble/enble illegal_compstat interrupt */
95 val = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
96 if (enable)
97 val = set_field(val,
98 ltc_ltcs_ltss_intr_en_illegal_compstat_m(),
99 ltc_ltcs_ltss_intr_en_illegal_compstat_enabled_f());
100 else
101 val = set_field(val,
102 ltc_ltcs_ltss_intr_en_illegal_compstat_m(),
103 ltc_ltcs_ltss_intr_en_illegal_compstat_disabled_f());
104 gk20a_writel(g, ltc_ltcs_ltss_intr_r(), val);
105}
106
107
108void gv11b_ltc_isr(struct gk20a *g)
109{
110 u32 mc_intr, ltc_intr3;
111 unsigned int ltc, slice;
112 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
113 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
114 u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt;
115 u32 corrected_delta, uncorrected_delta;
116 u32 corrected_overflow, uncorrected_overflow;
117
118 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
119 for (ltc = 0; ltc < g->ltc_count; ltc++) {
120 if ((mc_intr & 1U << ltc) == 0)
121 continue;
122
123 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
124 u32 offset = ltc_stride * ltc + lts_stride * slice;
125 ltc_intr3 = gk20a_readl(g, ltc_ltc0_lts0_intr3_r() +
126 offset);
127
128 /* Detect and handle ECC PARITY errors */
129
130 if (ltc_intr3 &
131 (ltc_ltcs_ltss_intr3_ecc_uncorrected_m() |
132 ltc_ltcs_ltss_intr3_ecc_corrected_m())) {
133
134 ecc_status = gk20a_readl(g,
135 ltc_ltc0_lts0_l2_cache_ecc_status_r() +
136 offset);
137 ecc_addr = gk20a_readl(g,
138 ltc_ltc0_lts0_l2_cache_ecc_address_r() +
139 offset);
140 corrected_cnt = gk20a_readl(g,
141 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset);
142 uncorrected_cnt = gk20a_readl(g,
143 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset);
144
145 corrected_delta =
146 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(corrected_cnt);
147 uncorrected_delta =
148 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(uncorrected_cnt);
149 corrected_overflow = ecc_status &
150 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m();
151
152 uncorrected_overflow = ecc_status &
153 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m();
154
155 /* clear the interrupt */
156 if ((corrected_delta > 0U) || corrected_overflow) {
157 nvgpu_writel_check(g,
158 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0);
159 }
160 if ((uncorrected_delta > 0U) || uncorrected_overflow) {
161 nvgpu_writel_check(g,
162 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0);
163 }
164
165 nvgpu_writel_check(g,
166 ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset,
167 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f());
168
169 /* update counters per slice */
170 if (corrected_overflow)
171 corrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s());
172 if (uncorrected_overflow)
173 uncorrected_delta += (0x1U << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s());
174
175 g->ecc.ltc.ecc_sec_count[ltc][slice].counter += corrected_delta;
176 g->ecc.ltc.ecc_ded_count[ltc][slice].counter += uncorrected_delta;
177 nvgpu_log(g, gpu_dbg_intr,
178 "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3);
179
180 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m())
181 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected");
182 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m())
183 nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected");
184 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m())
185 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected");
186 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m())
187 nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected");
188 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m())
189 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected");
190 if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m())
191 nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected");
192
193 if (corrected_overflow || uncorrected_overflow)
194 nvgpu_info(g, "ecc counter overflow!");
195
196 nvgpu_log(g, gpu_dbg_intr,
197 "ecc error address: 0x%x", ecc_addr);
198
199 }
200
201 }
202
203 }
204
205 /* fallback to other interrupts */
206 gp10b_ltc_isr(g);
207}