summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/ltc_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/ltc_common.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/ltc_common.c243
1 files changed, 243 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c
new file mode 100644
index 00000000..cbb27cc7
--- /dev/null
+++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c
@@ -0,0 +1,243 @@
1/*
2 * drivers/video/tegra/host/gk20a/ltc_common.c
3 *
4 * GK20A Graphics
5 *
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/dma-mapping.h>
22#include <linux/delay.h>
23
24#include "gk20a.h"
25#include "gr_gk20a.h"
26
27static int gk20a_determine_L2_size_bytes(struct gk20a *g)
28{
29 const u32 gpuid = GK20A_GPUID(g->gpu_characteristics.arch,
30 g->gpu_characteristics.impl);
31 u32 lts_per_ltc;
32 u32 ways;
33 u32 sets;
34 u32 bytes_per_line;
35 u32 active_ltcs;
36 u32 cache_size;
37
38 u32 tmp;
39 u32 active_sets_value;
40
41 tmp = gk20a_readl(g, ltc_ltc0_lts0_tstg_cfg1_r());
42 ways = hweight32(ltc_ltc0_lts0_tstg_cfg1_active_ways_v(tmp));
43
44 active_sets_value = ltc_ltc0_lts0_tstg_cfg1_active_sets_v(tmp);
45 if (active_sets_value == ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v()) {
46 sets = 64;
47 } else if (active_sets_value ==
48 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v()) {
49 sets = 32;
50 } else if (active_sets_value ==
51 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v()) {
52 sets = 16;
53 } else {
54 dev_err(dev_from_gk20a(g),
55 "Unknown constant %u for active sets",
56 (unsigned)active_sets_value);
57 sets = 0;
58 }
59
60 active_ltcs = g->gr.num_fbps;
61
62 /* chip-specific values */
63 switch (gpuid) {
64 case GK20A_GPUID_GK20A:
65 lts_per_ltc = 1;
66 bytes_per_line = 128;
67 break;
68
69 default:
70 dev_err(dev_from_gk20a(g), "Unknown GPU id 0x%02x\n",
71 (unsigned)gpuid);
72 lts_per_ltc = 0;
73 bytes_per_line = 0;
74 }
75
76 cache_size = active_ltcs * lts_per_ltc * ways * sets * bytes_per_line;
77
78 return cache_size;
79}
80
81/*
82 * Set the maximum number of ways that can have the "EVIST_LAST" class.
83 */
84static void gk20a_ltc_set_max_ways_evict_last(struct gk20a *g, u32 max_ways)
85{
86 u32 mgmt_reg;
87
88 mgmt_reg = gk20a_readl(g, ltc_ltcs_ltss_tstg_set_mgmt_r()) &
89 ~ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(~0);
90 mgmt_reg |= ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(max_ways);
91
92 gk20a_writel(g, ltc_ltcs_ltss_tstg_set_mgmt_r(), mgmt_reg);
93}
94
95/*
96 * Sets the ZBC color for the passed index.
97 */
98static void gk20a_ltc_set_zbc_color_entry(struct gk20a *g,
99 struct zbc_entry *color_val,
100 u32 index)
101{
102 u32 i;
103 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
104
105 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
106 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
107
108 for (i = 0;
109 i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++)
110 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i),
111 color_val->color_l2[i]);
112}
113
114/*
115 * Sets the ZBC depth for the passed index.
116 */
117static void gk20a_ltc_set_zbc_depth_entry(struct gk20a *g,
118 struct zbc_entry *depth_val,
119 u32 index)
120{
121 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
122
123 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
124 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
125
126 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(),
127 depth_val->depth);
128}
129
130/*
131 * Clear the L2 ZBC color table for the passed index.
132 */
133static void gk20a_ltc_clear_zbc_color_entry(struct gk20a *g, u32 index)
134{
135 u32 i;
136 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
137
138 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
139 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
140
141 for (i = 0;
142 i < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); i++)
143 gk20a_writel(g,
144 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(i), 0);
145}
146
147/*
148 * Clear the L2 ZBC depth entry for the passed index.
149 */
150static void gk20a_ltc_clear_zbc_depth_entry(struct gk20a *g, u32 index)
151{
152 u32 real_index = index + GK20A_STARTOF_ZBC_TABLE;
153
154 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
155 ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index));
156
157 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(), 0);
158}
159
160static int gk20a_ltc_init_zbc(struct gk20a *g, struct gr_gk20a *gr)
161{
162 u32 i, j;
163
164 /* reset zbc clear */
165 for (i = 0; i < GK20A_SIZEOF_ZBC_TABLE -
166 GK20A_STARTOF_ZBC_TABLE; i++) {
167 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(),
168 (gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r()) &
169 ~ltc_ltcs_ltss_dstg_zbc_index_address_f(~0)) |
170 ltc_ltcs_ltss_dstg_zbc_index_address_f(
171 i + GK20A_STARTOF_ZBC_TABLE));
172 for (j = 0; j < ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(); j++)
173 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(j), 0);
174 gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(), 0);
175 }
176
177 gr_gk20a_clear_zbc_table(g, gr);
178 gr_gk20a_load_zbc_default_table(g, gr);
179
180 return 0;
181}
182
183static void gk20a_ltc_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
184{
185 u32 compbit_base_post_divide;
186 u64 compbit_base_post_multiply64;
187 u64 compbit_store_base_iova =
188 NV_MC_SMMU_VADDR_TRANSLATE(gr->compbit_store.base_iova);
189 u64 compbit_base_post_divide64 = (compbit_store_base_iova >>
190 ltc_ltcs_ltss_cbc_base_alignment_shift_v());
191
192 do_div(compbit_base_post_divide64, gr->num_fbps);
193 compbit_base_post_divide = u64_lo32(compbit_base_post_divide64);
194
195 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
196 gr->num_fbps) << ltc_ltcs_ltss_cbc_base_alignment_shift_v();
197
198 if (compbit_base_post_multiply64 < compbit_store_base_iova)
199 compbit_base_post_divide++;
200
201 gk20a_writel(g, ltc_ltcs_ltss_cbc_base_r(),
202 compbit_base_post_divide);
203
204 gk20a_dbg(gpu_dbg_info | gpu_dbg_map | gpu_dbg_pte,
205 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
206 (u32)(compbit_store_base_iova >> 32),
207 (u32)(compbit_store_base_iova & 0xffffffff),
208 compbit_base_post_divide);
209}
210
211/* Flushes the compression bit cache as well as "data".
212 * Note: the name here is a bit of a misnomer. ELPG uses this
213 * internally... but ELPG doesn't have to be on to do it manually.
214 */
215static void gk20a_mm_g_elpg_flush_locked(struct gk20a *g)
216{
217 u32 data;
218 s32 retry = 100;
219
220 gk20a_dbg_fn("");
221
222 /* Make sure all previous writes are committed to the L2. There's no
223 guarantee that writes are to DRAM. This will be a sysmembar internal
224 to the L2. */
225 gk20a_writel(g, ltc_ltss_g_elpg_r(),
226 ltc_ltss_g_elpg_flush_pending_f());
227 do {
228 data = gk20a_readl(g, ltc_ltss_g_elpg_r());
229
230 if (ltc_ltss_g_elpg_flush_v(data) ==
231 ltc_ltss_g_elpg_flush_pending_v()) {
232 gk20a_dbg_info("g_elpg_flush 0x%x", data);
233 retry--;
234 usleep_range(20, 40);
235 } else
236 break;
237 } while (retry >= 0 || !tegra_platform_is_silicon());
238
239 if (retry < 0)
240 gk20a_warn(dev_from_gk20a(g),
241 "g_elpg_flush too many retries");
242
243}