summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/ltc_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/ltc_gp10b.c226
1 files changed, 226 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
new file mode 100644
index 00000000..92a899b8
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp10b/ltc_gp10b.c
@@ -0,0 +1,226 @@
1/*
2 * GP10B L2
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <dt-bindings/memory/tegra-swgroup.h>
26
27#include <nvgpu/ltc.h>
28#include <nvgpu/log.h>
29#include <nvgpu/enabled.h>
30
31#include <nvgpu/hw/gp10b/hw_mc_gp10b.h>
32#include <nvgpu/hw/gp10b/hw_ltc_gp10b.h>
33
34#include "gk20a/gk20a.h"
35#include "gm20b/ltc_gm20b.h"
36
37#include "ltc_gp10b.h"
38
39int gp10b_determine_L2_size_bytes(struct gk20a *g)
40{
41 u32 tmp;
42 int ret;
43
44 gk20a_dbg_fn("");
45
46 tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_info_1_r());
47
48 ret = g->ltc_count *
49 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(tmp)*1024 *
50 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(tmp);
51
52 gk20a_dbg(gpu_dbg_info, "L2 size: %d\n", ret);
53
54 gk20a_dbg_fn("done");
55
56 return ret;
57}
58
59int gp10b_ltc_init_comptags(struct gk20a *g, struct gr_gk20a *gr)
60{
61 /* max memory size (MB) to cover */
62 u32 max_size = gr->max_comptag_mem;
63 /* one tag line covers 64KB */
64 u32 max_comptag_lines = max_size << 4;
65
66 u32 hw_max_comptag_lines =
67 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v();
68
69 u32 cbc_param =
70 gk20a_readl(g, ltc_ltcs_ltss_cbc_param_r());
71 u32 comptags_per_cacheline =
72 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(cbc_param);
73 u32 cacheline_size =
74 512 << ltc_ltcs_ltss_cbc_param_cache_line_size_v(cbc_param);
75 u32 slices_per_ltc =
76 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(cbc_param);
77 u32 cbc_param2 =
78 gk20a_readl(g, ltc_ltcs_ltss_cbc_param2_r());
79 u32 gobs_per_comptagline_per_slice =
80 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(cbc_param2);
81
82 u32 compbit_backing_size;
83
84 int err;
85
86 gk20a_dbg_fn("");
87
88 if (max_comptag_lines == 0)
89 return 0;
90
91 if (max_comptag_lines > hw_max_comptag_lines)
92 max_comptag_lines = hw_max_comptag_lines;
93
94 compbit_backing_size =
95 roundup(max_comptag_lines * gobs_per_comptagline_per_slice,
96 cacheline_size);
97 compbit_backing_size =
98 roundup(compbit_backing_size * slices_per_ltc * g->ltc_count,
99 g->ops.fb.compressible_page_size(g));
100
101 /* aligned to 2KB * ltc_count */
102 compbit_backing_size +=
103 g->ltc_count << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
104
105 /* must be a multiple of 64KB */
106 compbit_backing_size = roundup(compbit_backing_size, 64*1024);
107
108 gk20a_dbg_info("compbit backing store size : %d",
109 compbit_backing_size);
110 gk20a_dbg_info("max comptag lines : %d",
111 max_comptag_lines);
112 gk20a_dbg_info("gobs_per_comptagline_per_slice: %d",
113 gobs_per_comptagline_per_slice);
114
115 err = nvgpu_ltc_alloc_cbc(g, compbit_backing_size);
116 if (err)
117 return err;
118
119 err = gk20a_comptag_allocator_init(g, &gr->comp_tags, max_comptag_lines);
120 if (err)
121 return err;
122
123 gr->comptags_per_cacheline = comptags_per_cacheline;
124 gr->slices_per_ltc = slices_per_ltc;
125 gr->cacheline_size = cacheline_size;
126 gr->gobs_per_comptagline_per_slice = gobs_per_comptagline_per_slice;
127
128 return 0;
129}
130
131void gp10b_ltc_isr(struct gk20a *g)
132{
133 u32 mc_intr, ltc_intr;
134 unsigned int ltc, slice;
135 u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE);
136 u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE);
137
138 mc_intr = gk20a_readl(g, mc_intr_ltc_r());
139 nvgpu_err(g, "mc_ltc_intr: %08x", mc_intr);
140 for (ltc = 0; ltc < g->ltc_count; ltc++) {
141 if ((mc_intr & 1 << ltc) == 0)
142 continue;
143 for (slice = 0; slice < g->gr.slices_per_ltc; slice++) {
144 u32 offset = ltc_stride * ltc + lts_stride * slice;
145 ltc_intr = gk20a_readl(g, ltc_ltc0_lts0_intr_r() + offset);
146
147 /* Detect and handle ECC errors */
148 if (ltc_intr &
149 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f()) {
150 u32 ecc_stats_reg_val;
151
152 nvgpu_err(g,
153 "Single bit error detected in GPU L2!");
154
155 ecc_stats_reg_val =
156 gk20a_readl(g,
157 ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
158 g->ecc.gr.t18x.l2_sec_count.counters[ltc] +=
159 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(ecc_stats_reg_val);
160 ecc_stats_reg_val &=
161 ~(ltc_ltc0_lts0_dstg_ecc_report_sec_count_m());
162 gk20a_writel(g,
163 ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
164 ecc_stats_reg_val);
165
166 g->ops.mm.l2_flush(g, true);
167 }
168 if (ltc_intr &
169 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f()) {
170 u32 ecc_stats_reg_val;
171
172 nvgpu_err(g,
173 "Double bit error detected in GPU L2!");
174
175 ecc_stats_reg_val =
176 gk20a_readl(g,
177 ltc_ltc0_lts0_dstg_ecc_report_r() + offset);
178 g->ecc.gr.t18x.l2_ded_count.counters[ltc] +=
179 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(ecc_stats_reg_val);
180 ecc_stats_reg_val &=
181 ~(ltc_ltc0_lts0_dstg_ecc_report_ded_count_m());
182 gk20a_writel(g,
183 ltc_ltc0_lts0_dstg_ecc_report_r() + offset,
184 ecc_stats_reg_val);
185 }
186
187 nvgpu_err(g, "ltc%d, slice %d: %08x",
188 ltc, slice, ltc_intr);
189 gk20a_writel(g, ltc_ltc0_lts0_intr_r() +
190 ltc_stride * ltc + lts_stride * slice,
191 ltc_intr);
192 }
193 }
194}
195
196void gp10b_ltc_init_fs_state(struct gk20a *g)
197{
198 u32 ltc_intr;
199
200 gm20b_ltc_init_fs_state(g);
201
202 gk20a_writel(g, ltc_ltca_g_axi_pctrl_r(),
203 ltc_ltca_g_axi_pctrl_user_sid_f(TEGRA_SID_GPUB));
204
205 /* Enable ECC interrupts */
206 ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r());
207 ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() |
208 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f();
209 gk20a_writel(g, ltc_ltcs_ltss_intr_r(),
210 ltc_intr);
211}
212
213void gp10b_ltc_set_enabled(struct gk20a *g, bool enabled)
214{
215 u32 reg_f = ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f();
216 u32 reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r());
217
218 if (enabled)
219 /* bypass disabled (normal caching ops)*/
220 reg &= ~reg_f;
221 else
222 /* bypass enabled (no caching) */
223 reg |= reg_f;
224
225 gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_2_r(), reg);
226}