summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gp106/pmu_gp106.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/pmu_gp106.c')
-rw-r--r--drivers/gpu/nvgpu/gp106/pmu_gp106.c296
1 files changed, 296 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
new file mode 100644
index 00000000..eecd7351
--- /dev/null
+++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c
@@ -0,0 +1,296 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <linux/delay.h> /* for udelay */
15#include "gk20a/gk20a.h"
16#include "gk20a/pmu_gk20a.h"
17
18#include "gm206/pmu_gm206.h"
19#include "gm20b/pmu_gm20b.h"
20#include "gp10b/pmu_gp10b.h"
21#include "gp106/pmu_gp106.h"
22#include "gp106/acr_gp106.h"
23#include "gp106/hw_psec_gp106.h"
24#include "clk/clk_mclk.h"
25#include "hw_mc_gp106.h"
26#include "hw_pwr_gp106.h"
27#include "lpwr/lpwr.h"
28#include "lpwr/rppg.h"
29
30#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000
31#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10
32
33static int gp106_pmu_enable_hw(struct pmu_gk20a *pmu, bool enable)
34{
35 struct gk20a *g = gk20a_from_pmu(pmu);
36
37 gk20a_dbg_fn("");
38
39 /*
40 * From GP10X onwards, we are using PPWR_FALCON_ENGINE for reset. And as
41 * it may come into same behaviour, reading NV_PPWR_FALCON_ENGINE again
42 * after Reset.
43 */
44
45 if (enable) {
46 int retries = PMU_MEM_SCRUBBING_TIMEOUT_MAX /
47 PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT;
48 gk20a_writel(g, pwr_falcon_engine_r(),
49 pwr_falcon_engine_reset_false_f());
50 gk20a_readl(g, pwr_falcon_engine_r());
51
52 /* make sure ELPG is in a good state */
53 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
54 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
55 g->slcg_enabled);
56 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
57 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
58 g->blcg_enabled);
59
60 /* wait for Scrubbing to complete */
61 do {
62 u32 w = gk20a_readl(g, pwr_falcon_dmactl_r()) &
63 (pwr_falcon_dmactl_dmem_scrubbing_m() |
64 pwr_falcon_dmactl_imem_scrubbing_m());
65
66 if (!w) {
67 gk20a_dbg_fn("done");
68 return 0;
69 }
70 udelay(PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT);
71 } while (--retries || !tegra_platform_is_silicon());
72
73 /* If scrubbing timeout, keep PMU in reset state */
74 gk20a_writel(g, pwr_falcon_engine_r(),
75 pwr_falcon_engine_reset_true_f());
76 gk20a_readl(g, pwr_falcon_engine_r());
77 gk20a_err(dev_from_gk20a(g), "Falcon mem scrubbing timeout");
78 return -ETIMEDOUT;
79 } else {
80 /* DISBALE */
81 gk20a_writel(g, pwr_falcon_engine_r(),
82 pwr_falcon_engine_reset_true_f());
83 gk20a_readl(g, pwr_falcon_engine_r());
84 return 0;
85 }
86}
87
88static int pmu_enable(struct pmu_gk20a *pmu, bool enable)
89{
90 struct gk20a *g = gk20a_from_pmu(pmu);
91 u32 reg_reset;
92 int err;
93
94 gk20a_dbg_fn("");
95
96 if (!enable) {
97 reg_reset = gk20a_readl(g, pwr_falcon_engine_r());
98 if (reg_reset !=
99 pwr_falcon_engine_reset_true_f()) {
100
101 pmu_enable_irq(pmu, false);
102 gp106_pmu_enable_hw(pmu, false);
103 udelay(10);
104 }
105 } else {
106 gp106_pmu_enable_hw(pmu, true);
107 /* TBD: post reset */
108
109 /*idle the PMU and enable interrupts on the Falcon*/
110 err = pmu_idle(pmu);
111 if (err)
112 return err;
113 udelay(5);
114 pmu_enable_irq(pmu, true);
115 }
116
117 gk20a_dbg_fn("done");
118 return 0;
119}
120
121static int gp106_pmu_reset(struct gk20a *g)
122{
123 struct pmu_gk20a *pmu = &g->pmu;
124 int err = 0;
125
126 gk20a_dbg_fn("");
127
128 err = pmu_idle(pmu);
129 if (err)
130 return err;
131
132 /* TBD: release pmu hw mutex */
133
134 err = pmu_enable(pmu, false);
135 if (err)
136 return err;
137
138 /* TBD: cancel all sequences */
139 /* TBD: init all sequences and state tables */
140 /* TBD: restore pre-init message handler */
141
142 err = pmu_enable(pmu, true);
143 if (err)
144 return err;
145
146 return err;
147}
148
149static int gp106_sec2_reset(struct gk20a *g)
150{
151 gk20a_dbg_fn("");
152 //sec2 reset
153 gk20a_writel(g, psec_falcon_engine_r(),
154 pwr_falcon_engine_reset_true_f());
155 udelay(10);
156 gk20a_writel(g, psec_falcon_engine_r(),
157 pwr_falcon_engine_reset_false_f());
158
159 gk20a_dbg_fn("done");
160 return 0;
161}
162
163static int gp106_falcon_reset(struct gk20a *g)
164{
165 gk20a_dbg_fn("");
166
167 gp106_pmu_reset(g);
168 gp106_sec2_reset(g);
169
170 gk20a_dbg_fn("done");
171 return 0;
172}
173
174static bool gp106_is_pmu_supported(struct gk20a *g)
175{
176 return true;
177}
178
179static u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id)
180{
181 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
182 return PMU_PG_FEATURE_GR_RPPG_ENABLED;
183
184 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
185 return NVGPU_PMU_MS_FEATURE_MASK_ALL;
186
187 return 0;
188}
189
190static u32 gp106_pmu_pg_engines_list(struct gk20a *g)
191{
192 return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) |
193 BIT(PMU_PG_ELPG_ENGINE_ID_MS);
194}
195
196static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg,
197 void *param, u32 handle, u32 status)
198{
199 gk20a_dbg_fn("");
200
201 if (status != 0) {
202 gk20a_err(dev_from_gk20a(g), "PG PARAM cmd aborted");
203 return;
204 }
205
206 gp106_dbg_pmu("PG PARAM is acknowledged from PMU %x",
207 msg->msg.pg.msg_type);
208}
209
210static int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id)
211{
212 struct pmu_gk20a *pmu = &g->pmu;
213 struct pmu_cmd cmd;
214 u32 seq;
215 u32 status;
216
217 memset(&cmd, 0, sizeof(struct pmu_cmd));
218 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) {
219
220 status = init_rppg(g);
221 if (status != 0) {
222 gk20a_err(dev_from_gk20a(g), "RPPG init Failed");
223 return -1;
224 }
225
226 cmd.hdr.unit_id = PMU_UNIT_PG;
227 cmd.hdr.size = PMU_CMD_HDR_SIZE +
228 sizeof(struct pmu_pg_cmd_gr_init_param);
229 cmd.cmd.pg.gr_init_param.cmd_type =
230 PMU_PG_CMD_ID_PG_PARAM;
231 cmd.cmd.pg.gr_init_param.sub_cmd_id =
232 PMU_PG_PARAM_CMD_GR_INIT_PARAM;
233 cmd.cmd.pg.gr_init_param.featuremask =
234 PMU_PG_FEATURE_GR_RPPG_ENABLED;
235
236 gp106_dbg_pmu("cmd post GR PMU_PG_CMD_ID_PG_PARAM");
237 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
238 pmu_handle_param_msg, pmu, &seq, ~0);
239 } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) {
240 cmd.hdr.unit_id = PMU_UNIT_PG;
241 cmd.hdr.size = PMU_CMD_HDR_SIZE +
242 sizeof(struct pmu_pg_cmd_ms_init_param);
243 cmd.cmd.pg.ms_init_param.cmd_type =
244 PMU_PG_CMD_ID_PG_PARAM;
245 cmd.cmd.pg.ms_init_param.cmd_id =
246 PMU_PG_PARAM_CMD_MS_INIT_PARAM;
247 cmd.cmd.pg.ms_init_param.support_mask =
248 NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING |
249 NVGPU_PMU_MS_FEATURE_MASK_SW_ASR |
250 NVGPU_PMU_MS_FEATURE_MASK_RPPG |
251 NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING;
252
253 gp106_dbg_pmu("cmd post MS PMU_PG_CMD_ID_PG_PARAM");
254 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
255 pmu_handle_param_msg, pmu, &seq, ~0);
256 }
257
258 return 0;
259}
260
261void gp106_init_pmu_ops(struct gpu_ops *gops)
262{
263 gk20a_dbg_fn("");
264
265 if (gops->privsecurity) {
266 gp106_init_secure_pmu(gops);
267 gops->pmu.init_wpr_region = gm20b_pmu_init_acr;
268 gops->pmu.load_lsfalcon_ucode = gm206_load_falcon_ucode;
269 gops->pmu.is_lazy_bootstrap = gm206_is_lazy_bootstrap;
270 gops->pmu.is_priv_load = gm206_is_priv_load;
271 } else {
272 gk20a_init_pmu_ops(gops);
273 gops->pmu.pmu_setup_hw_and_bootstrap =
274 gm20b_init_nspmu_setup_hw1;
275 gops->pmu.load_lsfalcon_ucode = NULL;
276 gops->pmu.init_wpr_region = NULL;
277 }
278 gops->pmu.pmu_setup_elpg = NULL;
279 gops->pmu.lspmuwprinitdone = 0;
280 gops->pmu.fecsbootstrapdone = false;
281 gops->pmu.write_dmatrfbase = gp10b_write_dmatrfbase;
282 gops->pmu.pmu_elpg_statistics = gp10b_pmu_elpg_statistics;
283 gops->pmu.pmu_pg_init_param = gp106_pg_param_init;
284 gops->pmu.pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list;
285 gops->pmu.pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list;
286 gops->pmu.pmu_lpwr_enable_pg = nvgpu_lpwr_enable_pg;
287 gops->pmu.pmu_lpwr_disable_pg = nvgpu_lpwr_disable_pg;
288 gops->pmu.pmu_pg_param_post_init = nvgpu_lpwr_post_init;
289 gops->pmu.send_lrf_tex_ltc_dram_overide_en_dis_cmd = NULL;
290 gops->pmu.dump_secure_fuses = NULL;
291 gops->pmu.reset = gp106_falcon_reset;
292 gops->pmu.mclk_init = clk_mclkseq_init_mclk_gddr5;
293 gops->pmu.is_pmu_supported = gp106_is_pmu_supported;
294
295 gk20a_dbg_fn("done");
296}