diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gp106/pmu_gp106.c')
-rw-r--r-- | drivers/gpu/nvgpu/gp106/pmu_gp106.c | 300 |
1 files changed, 300 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gp106/pmu_gp106.c b/drivers/gpu/nvgpu/gp106/pmu_gp106.c new file mode 100644 index 00000000..de26ecf2 --- /dev/null +++ b/drivers/gpu/nvgpu/gp106/pmu_gp106.c | |||
@@ -0,0 +1,300 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <nvgpu/pmu.h> | ||
24 | #include <nvgpu/enabled.h> | ||
25 | |||
26 | #include "gk20a/gk20a.h" | ||
27 | #include "gk20a/pmu_gk20a.h" | ||
28 | |||
29 | #include "gm20b/acr_gm20b.h" | ||
30 | #include "gm20b/pmu_gm20b.h" | ||
31 | #include "gp10b/pmu_gp10b.h" | ||
32 | #include "gp106/pmu_gp106.h" | ||
33 | #include "gp106/acr_gp106.h" | ||
34 | |||
35 | #include "clk/clk_mclk.h" | ||
36 | |||
37 | #include "lpwr/lpwr.h" | ||
38 | #include "lpwr/rppg.h" | ||
39 | |||
40 | #include <nvgpu/hw/gp106/hw_psec_gp106.h> | ||
41 | #include <nvgpu/hw/gp106/hw_mc_gp106.h> | ||
42 | #include <nvgpu/hw/gp106/hw_pwr_gp106.h> | ||
43 | |||
44 | bool gp106_is_pmu_supported(struct gk20a *g) | ||
45 | { | ||
46 | return true; | ||
47 | } | ||
48 | |||
49 | bool gp106_pmu_is_engine_in_reset(struct gk20a *g) | ||
50 | { | ||
51 | u32 reg_reset; | ||
52 | bool status = false; | ||
53 | |||
54 | reg_reset = gk20a_readl(g, pwr_falcon_engine_r()); | ||
55 | if (reg_reset == pwr_falcon_engine_reset_true_f()) | ||
56 | status = true; | ||
57 | |||
58 | return status; | ||
59 | } | ||
60 | |||
61 | int gp106_pmu_engine_reset(struct gk20a *g, bool do_reset) | ||
62 | { | ||
63 | /* | ||
64 | * From GP10X onwards, we are using PPWR_FALCON_ENGINE for reset. And as | ||
65 | * it may come into same behavior, reading NV_PPWR_FALCON_ENGINE again | ||
66 | * after Reset. | ||
67 | */ | ||
68 | if (do_reset) { | ||
69 | gk20a_writel(g, pwr_falcon_engine_r(), | ||
70 | pwr_falcon_engine_reset_false_f()); | ||
71 | gk20a_readl(g, pwr_falcon_engine_r()); | ||
72 | } else { | ||
73 | gk20a_writel(g, pwr_falcon_engine_r(), | ||
74 | pwr_falcon_engine_reset_true_f()); | ||
75 | gk20a_readl(g, pwr_falcon_engine_r()); | ||
76 | } | ||
77 | |||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | u32 gp106_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) | ||
82 | { | ||
83 | if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) | ||
84 | return PMU_PG_FEATURE_GR_RPPG_ENABLED; | ||
85 | |||
86 | if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) | ||
87 | return NVGPU_PMU_MS_FEATURE_MASK_ALL; | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | u32 gp106_pmu_pg_engines_list(struct gk20a *g) | ||
93 | { | ||
94 | return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS) | | ||
95 | BIT(PMU_PG_ELPG_ENGINE_ID_MS); | ||
96 | } | ||
97 | |||
98 | static void pmu_handle_param_msg(struct gk20a *g, struct pmu_msg *msg, | ||
99 | void *param, u32 handle, u32 status) | ||
100 | { | ||
101 | gk20a_dbg_fn(""); | ||
102 | |||
103 | if (status != 0) { | ||
104 | nvgpu_err(g, "PG PARAM cmd aborted"); | ||
105 | return; | ||
106 | } | ||
107 | |||
108 | gp106_dbg_pmu("PG PARAM is acknowledged from PMU %x", | ||
109 | msg->msg.pg.msg_type); | ||
110 | } | ||
111 | |||
112 | int gp106_pg_param_init(struct gk20a *g, u32 pg_engine_id) | ||
113 | { | ||
114 | struct nvgpu_pmu *pmu = &g->pmu; | ||
115 | struct pmu_cmd cmd; | ||
116 | u32 seq; | ||
117 | u32 status; | ||
118 | |||
119 | memset(&cmd, 0, sizeof(struct pmu_cmd)); | ||
120 | if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { | ||
121 | |||
122 | status = init_rppg(g); | ||
123 | if (status != 0) { | ||
124 | nvgpu_err(g, "RPPG init Failed"); | ||
125 | return -1; | ||
126 | } | ||
127 | |||
128 | cmd.hdr.unit_id = PMU_UNIT_PG; | ||
129 | cmd.hdr.size = PMU_CMD_HDR_SIZE + | ||
130 | sizeof(struct pmu_pg_cmd_gr_init_param); | ||
131 | cmd.cmd.pg.gr_init_param.cmd_type = | ||
132 | PMU_PG_CMD_ID_PG_PARAM; | ||
133 | cmd.cmd.pg.gr_init_param.sub_cmd_id = | ||
134 | PMU_PG_PARAM_CMD_GR_INIT_PARAM; | ||
135 | cmd.cmd.pg.gr_init_param.featuremask = | ||
136 | PMU_PG_FEATURE_GR_RPPG_ENABLED; | ||
137 | |||
138 | gp106_dbg_pmu("cmd post GR PMU_PG_CMD_ID_PG_PARAM"); | ||
139 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | ||
140 | pmu_handle_param_msg, pmu, &seq, ~0); | ||
141 | } else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS) { | ||
142 | cmd.hdr.unit_id = PMU_UNIT_PG; | ||
143 | cmd.hdr.size = PMU_CMD_HDR_SIZE + | ||
144 | sizeof(struct pmu_pg_cmd_ms_init_param); | ||
145 | cmd.cmd.pg.ms_init_param.cmd_type = | ||
146 | PMU_PG_CMD_ID_PG_PARAM; | ||
147 | cmd.cmd.pg.ms_init_param.cmd_id = | ||
148 | PMU_PG_PARAM_CMD_MS_INIT_PARAM; | ||
149 | cmd.cmd.pg.ms_init_param.support_mask = | ||
150 | NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING | | ||
151 | NVGPU_PMU_MS_FEATURE_MASK_SW_ASR | | ||
152 | NVGPU_PMU_MS_FEATURE_MASK_RPPG | | ||
153 | NVGPU_PMU_MS_FEATURE_MASK_FB_TRAINING; | ||
154 | |||
155 | gp106_dbg_pmu("cmd post MS PMU_PG_CMD_ID_PG_PARAM"); | ||
156 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | ||
157 | pmu_handle_param_msg, pmu, &seq, ~0); | ||
158 | } | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | void gp106_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, | ||
164 | struct pmu_pg_stats_data *pg_stat_data) | ||
165 | { | ||
166 | struct nvgpu_pmu *pmu = &g->pmu; | ||
167 | struct pmu_pg_stats_v2 stats; | ||
168 | |||
169 | nvgpu_flcn_copy_from_dmem(pmu->flcn, | ||
170 | pmu->stat_dmem_offset[pg_engine_id], | ||
171 | (u8 *)&stats, sizeof(struct pmu_pg_stats_v2), 0); | ||
172 | |||
173 | pg_stat_data->ingating_time = stats.total_sleep_time_us; | ||
174 | pg_stat_data->ungating_time = stats.total_non_sleep_time_us; | ||
175 | pg_stat_data->gating_cnt = stats.entry_count; | ||
176 | pg_stat_data->avg_entry_latency_us = stats.entry_latency_avg_us; | ||
177 | pg_stat_data->avg_exit_latency_us = stats.exit_latency_avg_us; | ||
178 | } | ||
179 | |||
180 | bool gp106_pmu_is_lpwr_feature_supported(struct gk20a *g, u32 feature_id) | ||
181 | { | ||
182 | bool is_feature_supported = false; | ||
183 | |||
184 | switch (feature_id) { | ||
185 | case PMU_PG_LPWR_FEATURE_RPPG: | ||
186 | is_feature_supported = nvgpu_lpwr_is_rppg_supported(g, | ||
187 | nvgpu_clk_arb_get_current_pstate(g)); | ||
188 | break; | ||
189 | case PMU_PG_LPWR_FEATURE_MSCG: | ||
190 | is_feature_supported = nvgpu_lpwr_is_mscg_supported(g, | ||
191 | nvgpu_clk_arb_get_current_pstate(g)); | ||
192 | break; | ||
193 | default: | ||
194 | is_feature_supported = false; | ||
195 | } | ||
196 | |||
197 | return is_feature_supported; | ||
198 | } | ||
199 | |||
200 | bool gp106_is_lazy_bootstrap(u32 falcon_id) | ||
201 | { | ||
202 | bool enable_status = false; | ||
203 | |||
204 | switch (falcon_id) { | ||
205 | case LSF_FALCON_ID_FECS: | ||
206 | enable_status = true; | ||
207 | break; | ||
208 | case LSF_FALCON_ID_GPCCS: | ||
209 | enable_status = true; | ||
210 | break; | ||
211 | default: | ||
212 | break; | ||
213 | } | ||
214 | |||
215 | return enable_status; | ||
216 | } | ||
217 | |||
218 | bool gp106_is_priv_load(u32 falcon_id) | ||
219 | { | ||
220 | bool enable_status = false; | ||
221 | |||
222 | switch (falcon_id) { | ||
223 | case LSF_FALCON_ID_FECS: | ||
224 | enable_status = true; | ||
225 | break; | ||
226 | case LSF_FALCON_ID_GPCCS: | ||
227 | enable_status = true; | ||
228 | break; | ||
229 | default: | ||
230 | break; | ||
231 | } | ||
232 | |||
233 | return enable_status; | ||
234 | } | ||
235 | |||
236 | static void gp106_pmu_load_multiple_falcons(struct gk20a *g, u32 falconidmask, | ||
237 | u32 flags) | ||
238 | { | ||
239 | struct nvgpu_pmu *pmu = &g->pmu; | ||
240 | struct pmu_cmd cmd; | ||
241 | u32 seq; | ||
242 | |||
243 | gk20a_dbg_fn(""); | ||
244 | |||
245 | gp106_dbg_pmu("wprinit status = %x\n", g->pmu_lsf_pmu_wpr_init_done); | ||
246 | if (g->pmu_lsf_pmu_wpr_init_done) { | ||
247 | /* send message to load FECS falcon */ | ||
248 | memset(&cmd, 0, sizeof(struct pmu_cmd)); | ||
249 | cmd.hdr.unit_id = PMU_UNIT_ACR; | ||
250 | cmd.hdr.size = PMU_CMD_HDR_SIZE + | ||
251 | sizeof(struct pmu_acr_cmd_bootstrap_multiple_falcons); | ||
252 | cmd.cmd.acr.boot_falcons.cmd_type = | ||
253 | PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS; | ||
254 | cmd.cmd.acr.boot_falcons.flags = flags; | ||
255 | cmd.cmd.acr.boot_falcons.falconidmask = | ||
256 | falconidmask; | ||
257 | cmd.cmd.acr.boot_falcons.usevamask = 0; | ||
258 | cmd.cmd.acr.boot_falcons.wprvirtualbase.lo = 0; | ||
259 | cmd.cmd.acr.boot_falcons.wprvirtualbase.hi = 0; | ||
260 | |||
261 | gp106_dbg_pmu("PMU_ACR_CMD_ID_BOOTSTRAP_MULTIPLE_FALCONS:%x\n", | ||
262 | falconidmask); | ||
263 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | ||
264 | pmu_handle_fecs_boot_acr_msg, pmu, &seq, ~0); | ||
265 | } | ||
266 | |||
267 | gk20a_dbg_fn("done"); | ||
268 | } | ||
269 | |||
270 | int gp106_load_falcon_ucode(struct gk20a *g, u32 falconidmask) | ||
271 | { | ||
272 | u32 flags = PMU_ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES; | ||
273 | |||
274 | /* GM20B PMU supports loading FECS and GPCCS only */ | ||
275 | if (falconidmask == 0) | ||
276 | return -EINVAL; | ||
277 | if (falconidmask & ~((1 << LSF_FALCON_ID_FECS) | | ||
278 | (1 << LSF_FALCON_ID_GPCCS))) | ||
279 | return -EINVAL; | ||
280 | g->pmu_lsf_loaded_falcon_id = 0; | ||
281 | /* check whether pmu is ready to bootstrap lsf if not wait for it */ | ||
282 | if (!g->pmu_lsf_pmu_wpr_init_done) { | ||
283 | pmu_wait_message_cond(&g->pmu, | ||
284 | gk20a_get_gr_idle_timeout(g), | ||
285 | &g->pmu_lsf_pmu_wpr_init_done, 1); | ||
286 | /* check again if it still not ready indicate an error */ | ||
287 | if (!g->pmu_lsf_pmu_wpr_init_done) { | ||
288 | nvgpu_err(g, "PMU not ready to load LSF"); | ||
289 | return -ETIMEDOUT; | ||
290 | } | ||
291 | } | ||
292 | /* load falcon(s) */ | ||
293 | gp106_pmu_load_multiple_falcons(g, falconidmask, flags); | ||
294 | pmu_wait_message_cond(&g->pmu, | ||
295 | gk20a_get_gr_idle_timeout(g), | ||
296 | &g->pmu_lsf_loaded_falcon_id, falconidmask); | ||
297 | if (g->pmu_lsf_loaded_falcon_id != falconidmask) | ||
298 | return -ETIMEDOUT; | ||
299 | return 0; | ||
300 | } | ||