summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp10b/pmu_gp10b.c')
-rw-r--r--drivers/gpu/nvgpu/gp10b/pmu_gp10b.c399
1 files changed, 399 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
new file mode 100644
index 00000000..147cd020
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp10b/pmu_gp10b.c
@@ -0,0 +1,399 @@
1/*
2 * GP10B PMU
3 *
4 * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/pmu.h>
26#include <nvgpu/log.h>
27#include <nvgpu/fuse.h>
28#include <nvgpu/enabled.h>
29
30#include "gk20a/gk20a.h"
31#include "gk20a/pmu_gk20a.h"
32#include "gm20b/acr_gm20b.h"
33#include "gm20b/pmu_gm20b.h"
34
35#include "pmu_gp10b.h"
36
37#include <nvgpu/hw/gp10b/hw_pwr_gp10b.h>
38#include <nvgpu/hw/gp10b/hw_fuse_gp10b.h>
39
40#define gp10b_dbg_pmu(fmt, arg...) \
41 gk20a_dbg(gpu_dbg_pmu, fmt, ##arg)
42/*!
43 * Structure/object which single register write need to be done during PG init
44 * sequence to set PROD values.
45 */
46struct pg_init_sequence_list {
47 u32 regaddr;
48 u32 writeval;
49};
50
51/* PROD settings for ELPG sequencing registers*/
52static struct pg_init_sequence_list _pginitseq_gp10b[] = {
53 {0x0010ab10, 0x0000868B} ,
54 {0x0010e118, 0x8590848F} ,
55 {0x0010e000, 0} ,
56 {0x0010e06c, 0x000000A3} ,
57 {0x0010e06c, 0x000000A0} ,
58 {0x0010e06c, 0x00000095} ,
59 {0x0010e06c, 0x000000A6} ,
60 {0x0010e06c, 0x0000008C} ,
61 {0x0010e06c, 0x00000080} ,
62 {0x0010e06c, 0x00000081} ,
63 {0x0010e06c, 0x00000087} ,
64 {0x0010e06c, 0x00000088} ,
65 {0x0010e06c, 0x0000008D} ,
66 {0x0010e06c, 0x00000082} ,
67 {0x0010e06c, 0x00000083} ,
68 {0x0010e06c, 0x00000089} ,
69 {0x0010e06c, 0x0000008A} ,
70 {0x0010e06c, 0x000000A2} ,
71 {0x0010e06c, 0x00000097} ,
72 {0x0010e06c, 0x00000092} ,
73 {0x0010e06c, 0x00000099} ,
74 {0x0010e06c, 0x0000009B} ,
75 {0x0010e06c, 0x0000009D} ,
76 {0x0010e06c, 0x0000009F} ,
77 {0x0010e06c, 0x000000A1} ,
78 {0x0010e06c, 0x00000096} ,
79 {0x0010e06c, 0x00000091} ,
80 {0x0010e06c, 0x00000098} ,
81 {0x0010e06c, 0x0000009A} ,
82 {0x0010e06c, 0x0000009C} ,
83 {0x0010e06c, 0x0000009E} ,
84 {0x0010ab14, 0x00000000} ,
85 {0x0010e024, 0x00000000} ,
86 {0x0010e028, 0x00000000} ,
87 {0x0010e11c, 0x00000000} ,
88 {0x0010ab1c, 0x140B0BFF} ,
89 {0x0010e020, 0x0E2626FF} ,
90 {0x0010e124, 0x251010FF} ,
91 {0x0010ab20, 0x89abcdef} ,
92 {0x0010ab24, 0x00000000} ,
93 {0x0010e02c, 0x89abcdef} ,
94 {0x0010e030, 0x00000000} ,
95 {0x0010e128, 0x89abcdef} ,
96 {0x0010e12c, 0x00000000} ,
97 {0x0010ab28, 0x7FFFFFFF} ,
98 {0x0010ab2c, 0x70000000} ,
99 {0x0010e034, 0x7FFFFFFF} ,
100 {0x0010e038, 0x70000000} ,
101 {0x0010e130, 0x7FFFFFFF} ,
102 {0x0010e134, 0x70000000} ,
103 {0x0010ab30, 0x00000000} ,
104 {0x0010ab34, 0x00000001} ,
105 {0x00020004, 0x00000000} ,
106 {0x0010e138, 0x00000000} ,
107 {0x0010e040, 0x00000000} ,
108 {0x0010e168, 0x00000000} ,
109 {0x0010e114, 0x0000A5A4} ,
110 {0x0010e110, 0x00000000} ,
111 {0x0010e10c, 0x8590848F} ,
112 {0x0010e05c, 0x00000000} ,
113 {0x0010e044, 0x00000000} ,
114 {0x0010a644, 0x0000868B} ,
115 {0x0010a648, 0x00000000} ,
116 {0x0010a64c, 0x00829493} ,
117 {0x0010a650, 0x00000000} ,
118 {0x0010e000, 0} ,
119 {0x0010e068, 0x000000A3} ,
120 {0x0010e068, 0x000000A0} ,
121 {0x0010e068, 0x00000095} ,
122 {0x0010e068, 0x000000A6} ,
123 {0x0010e068, 0x0000008C} ,
124 {0x0010e068, 0x00000080} ,
125 {0x0010e068, 0x00000081} ,
126 {0x0010e068, 0x00000087} ,
127 {0x0010e068, 0x00000088} ,
128 {0x0010e068, 0x0000008D} ,
129 {0x0010e068, 0x00000082} ,
130 {0x0010e068, 0x00000083} ,
131 {0x0010e068, 0x00000089} ,
132 {0x0010e068, 0x0000008A} ,
133 {0x0010e068, 0x000000A2} ,
134 {0x0010e068, 0x00000097} ,
135 {0x0010e068, 0x00000092} ,
136 {0x0010e068, 0x00000099} ,
137 {0x0010e068, 0x0000009B} ,
138 {0x0010e068, 0x0000009D} ,
139 {0x0010e068, 0x0000009F} ,
140 {0x0010e068, 0x000000A1} ,
141 {0x0010e068, 0x00000096} ,
142 {0x0010e068, 0x00000091} ,
143 {0x0010e068, 0x00000098} ,
144 {0x0010e068, 0x0000009A} ,
145 {0x0010e068, 0x0000009C} ,
146 {0x0010e068, 0x0000009E} ,
147 {0x0010e000, 0} ,
148 {0x0010e004, 0x0000008E},
149};
150
151static void gp10b_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask,
152 u32 flags)
153{
154 struct nvgpu_pmu *pmu = &g->pmu;
155 struct pmu_cmd cmd;
156 u32 seq;
157
158 gk20a_dbg_fn("");
159
160 gp10b_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done);
161 if (g->pmu_lsf_pmu_wpr_init_done) {
162 /* send message to load FECS falcon */
163 memset(&cmd, 0, sizeof(struct pmu_cmd));
164 cmd.hdr.unit_id = PMU_UNIT_ACR;
165 cmd.hdr.size = PMU_CMD_HDR_SIZE +
166 sizeof(struct pmu_acr_cmd_bootstrap_multiple_falcons);
167 cmd.cmd.acr.boot_falcons.cmd_type =
168 PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS;
169 cmd.cmd.acr.boot_falcons.flags = flags;
170 cmd.cmd.acr.boot_falcons.falconidmask =
171 falconidmask;
172 cmd.cmd.acr.boot_falcons.usevamask = 0;
173 cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0x0;
174 cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0x0;
175 gp10b_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n",
176 falconidmask);
177 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
178 pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0);
179 }
180
181 gk20a_dbg_fn("done");
182 return;
183}
184
185int gp10b_load_falcon_ucode(struct gk20a *g, u32 falconidmask)
186{
187 u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
188
189 /* GM20B PMU supports loading FECS and GPCCS only */
190 if (falconidmask == 0)
191 return -EINVAL;
192 if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) |
193 (1 << LSF_FALCON_ID_GPCCS)))
194 return -EINVAL;
195 g->pmu_lsf_loaded_falcon_id = 0;
196 /* check whether pmu is ready to bootstrap lsf if not wait for it */
197 if (!g->pmu_lsf_pmu_wpr_init_done) {
198 pmu_wait_message_cond(&g->pmu,
199 gk20a_get_gr_idle_timeout(g),
200 &g->pmu_lsf_pmu_wpr_init_done, 1);
201 /* check again if it still not ready indicate an error */
202 if (!g->pmu_lsf_pmu_wpr_init_done) {
203 nvgpu_err(g, "PMU not ready to load LSF");
204 return -ETIMEDOUT;
205 }
206 }
207 /* load falcon(s) */
208 gp10b_pmu_load_multiple_falcons(g, falconidmask, flags);
209 pmu_wait_message_cond(&g->pmu,
210 gk20a_get_gr_idle_timeout(g),
211 &g->pmu_lsf_loaded_falcon_id, falconidmask);
212 if (g->pmu_lsf_loaded_falcon_id != falconidmask)
213 return -ETIMEDOUT;
214 return 0;
215}
216
217static void pmu_handle_gr_param_msg(struct gk20a *g, struct pmu_msg *msg,
218 void *param, u32 handle, u32 status)
219{
220 gk20a_dbg_fn("");
221
222 if (status != 0) {
223 nvgpu_err(g, "GR PARAM cmd aborted");
224 /* TBD: disable ELPG */
225 return;
226 }
227
228 gp10b_dbg_pmu("GR PARAM is acknowledged from PMU %x \n",
229 msg->msg.pg.msg_type);
230
231 return;
232}
233
234int gp10b_pg_gr_init(struct gk20a *g, u32 pg_engine_id)
235{
236 struct nvgpu_pmu *pmu = &g->pmu;
237 struct pmu_cmd cmd;
238 u32 seq;
239
240 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
241 memset(&cmd, 0, sizeof(struct pmu_cmd));
242 cmd.hdr.unit_id = PMU_UNIT_PG;
243 cmd.hdr.size = PMU_CMD_HDR_SIZE +
244 sizeof(struct pmu_pg_cmd_gr_init_param);
245 cmd.cmd.pg.gr_init_param.cmd_type =
246 PMU_PG_CMD_ID_PG_PARAM;
247 cmd.cmd.pg.gr_init_param.sub_cmd_id =
248 PMU_PG_PARAM_CMD_GR_INIT_PARAM;
249 cmd.cmd.pg.gr_init_param.featuremask =
250 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED;
251
252 gp10b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM ");
253 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
254 pmu_handle_gr_param_msg, pmu, &seq, ~0);
255
256 } else
257 return -EINVAL;
258
259 return 0;
260}
261
262void gp10b_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
263 struct pmu_pg_stats_data *pg_stat_data)
264{
265 struct nvgpu_pmu *pmu = &g->pmu;
266 struct pmu_pg_stats_v1 stats;
267
268 nvgpu_flcn_copy_from_dmem(pmu->flcn,
269 pmu->stat_dmem_offset[pg_engine_id],
270 (u8 *)&stats, sizeof(struct pmu_pg_stats_v1), 0);
271
272 pg_stat_data->ingating_time = stats.total_sleep_timeus;
273 pg_stat_data->ungating_time = stats.total_nonsleep_timeus;
274 pg_stat_data->gating_cnt = stats.entry_count;
275 pg_stat_data->avg_entry_latency_us = stats.entrylatency_avgus;
276 pg_stat_data->avg_exit_latency_us = stats.exitlatency_avgus;
277}
278
279int gp10b_pmu_setup_elpg(struct gk20a *g)
280{
281 int ret = 0;
282 u32 reg_writes;
283 u32 index;
284
285 gk20a_dbg_fn("");
286
287 if (g->elpg_enabled) {
288 reg_writes = ((sizeof(_pginitseq_gp10b) /
289 sizeof((_pginitseq_gp10b)[0])));
290 /* Initialize registers with production values*/
291 for (index = 0; index < reg_writes; index++) {
292 gk20a_writel(g, _pginitseq_gp10b[index].regaddr,
293 _pginitseq_gp10b[index].writeval);
294 }
295 }
296
297 gk20a_dbg_fn("done");
298 return ret;
299}
300
301void gp10b_write_dmatrfbase(struct gk20a *g, u32 addr)
302{
303 gk20a_writel(g, pwr_falcon_dmatrfbase_r(),
304 addr);
305 gk20a_writel(g, pwr_falcon_dmatrfbase1_r(),
306 0x0);
307}
308
309int gp10b_init_pmu_setup_hw1(struct gk20a *g)
310{
311 struct nvgpu_pmu *pmu = &g->pmu;
312 int err;
313
314 gk20a_dbg_fn("");
315
316 nvgpu_mutex_acquire(&pmu->isr_mutex);
317 nvgpu_flcn_reset(pmu->flcn);
318 pmu->isr_enabled = true;
319 nvgpu_mutex_release(&pmu->isr_mutex);
320
321 /* setup apertures - virtual */
322 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE),
323 pwr_fbif_transcfg_mem_type_virtual_f());
324 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT),
325 pwr_fbif_transcfg_mem_type_virtual_f());
326
327 /* setup apertures - physical */
328 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID),
329 pwr_fbif_transcfg_mem_type_physical_f() |
330 pwr_fbif_transcfg_target_local_fb_f());
331 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH),
332 pwr_fbif_transcfg_mem_type_physical_f() |
333 pwr_fbif_transcfg_target_coherent_sysmem_f());
334 gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH),
335 pwr_fbif_transcfg_mem_type_physical_f() |
336 pwr_fbif_transcfg_target_noncoherent_sysmem_f());
337
338 err = g->ops.pmu.pmu_nsbootstrap(pmu);
339 if (err)
340 return err;
341
342 gk20a_dbg_fn("done");
343 return 0;
344
345}
346
347bool gp10b_is_lazy_bootstrap(u32 falcon_id)
348{
349 bool enable_status = false;
350
351 switch (falcon_id) {
352 case LSF_FALCON_ID_FECS:
353 enable_status = false;
354 break;
355 case LSF_FALCON_ID_GPCCS:
356 enable_status = true;
357 break;
358 default:
359 break;
360 }
361
362 return enable_status;
363}
364
365bool gp10b_is_priv_load(u32 falcon_id)
366{
367 bool enable_status = false;
368
369 switch (falcon_id) {
370 case LSF_FALCON_ID_FECS:
371 enable_status = false;
372 break;
373 case LSF_FALCON_ID_GPCCS:
374 enable_status = true;
375 break;
376 default:
377 break;
378 }
379
380 return enable_status;
381}
382
383/*Dump Security related fuses*/
384void pmu_dump_security_fuses_gp10b(struct gk20a *g)
385{
386 u32 val;
387
388 nvgpu_err(g, "FUSE_OPT_SEC_DEBUG_EN_0: 0x%x",
389 gk20a_readl(g, fuse_opt_sec_debug_en_r()));
390 nvgpu_err(g, "FUSE_OPT_PRIV_SEC_EN_0: 0x%x",
391 gk20a_readl(g, fuse_opt_priv_sec_en_r()));
392 nvgpu_tegra_fuse_read_gcplex_config_fuse(g, &val);
393 nvgpu_err(g, "FUSE_GCPLEX_CONFIG_FUSE_0: 0x%x", val);
394}
395
396bool gp10b_is_pmu_supported(struct gk20a *g)
397{
398 return true;
399}