diff options
Diffstat (limited to 'include/lpwr/lpwr.c')
-rw-r--r-- | include/lpwr/lpwr.c | 448 |
1 files changed, 0 insertions, 448 deletions
diff --git a/include/lpwr/lpwr.c b/include/lpwr/lpwr.c deleted file mode 100644 index c8cfb84..0000000 --- a/include/lpwr/lpwr.c +++ /dev/null | |||
@@ -1,448 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <nvgpu/bios.h> | ||
24 | #include <nvgpu/pmu.h> | ||
25 | #include <nvgpu/clk_arb.h> | ||
26 | #include <nvgpu/gk20a.h> | ||
27 | |||
28 | #include "gp106/bios_gp106.h" | ||
29 | #include "pstate/pstate.h" | ||
30 | #include "pmu_perf/pmu_perf.h" | ||
31 | #include "lpwr.h" | ||
32 | |||
33 | static int get_lpwr_idx_table(struct gk20a *g) | ||
34 | { | ||
35 | u32 *lpwr_idx_table_ptr; | ||
36 | u8 *entry_addr; | ||
37 | u32 idx; | ||
38 | struct nvgpu_lpwr_bios_idx_data *pidx_data = | ||
39 | &g->perf_pmu.lpwr.lwpr_bios_data.idx; | ||
40 | struct nvgpu_bios_lpwr_idx_table_1x_header header = { 0 }; | ||
41 | struct nvgpu_bios_lpwr_idx_table_1x_entry entry = { 0 }; | ||
42 | |||
43 | lpwr_idx_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g, | ||
44 | g->bios.perf_token, LOWPOWER_TABLE); | ||
45 | if (lpwr_idx_table_ptr == NULL) { | ||
46 | return -EINVAL; | ||
47 | } | ||
48 | |||
49 | memcpy(&header, lpwr_idx_table_ptr, | ||
50 | sizeof(struct nvgpu_bios_lpwr_idx_table_1x_header)); | ||
51 | |||
52 | if (header.entry_count >= LPWR_VBIOS_IDX_ENTRY_COUNT_MAX) { | ||
53 | return -EINVAL; | ||
54 | } | ||
55 | |||
56 | pidx_data->base_sampling_period = (u16)header.base_sampling_period; | ||
57 | |||
58 | /* Parse the LPWR Index Table entries.*/ | ||
59 | for (idx = 0; idx < header.entry_count; idx++) { | ||
60 | entry_addr = (u8 *)lpwr_idx_table_ptr + header.header_size + | ||
61 | (idx * header.entry_size); | ||
62 | |||
63 | memcpy(&entry, entry_addr, | ||
64 | sizeof(struct nvgpu_bios_lpwr_idx_table_1x_entry)); | ||
65 | |||
66 | pidx_data->entry[idx].pcie_idx = entry.pcie_idx; | ||
67 | pidx_data->entry[idx].gr_idx = entry.gr_idx; | ||
68 | pidx_data->entry[idx].ms_idx = entry.ms_idx; | ||
69 | pidx_data->entry[idx].di_idx = entry.di_idx; | ||
70 | pidx_data->entry[idx].gc6_idx = entry.gc6_idx; | ||
71 | |||
72 | } | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static int get_lpwr_gr_table(struct gk20a *g) | ||
78 | { | ||
79 | u32 *lpwr_gr_table_ptr; | ||
80 | u8 *entry_addr; | ||
81 | u32 idx; | ||
82 | struct nvgpu_lpwr_bios_gr_data *pgr_data = | ||
83 | &g->perf_pmu.lpwr.lwpr_bios_data.gr; | ||
84 | struct nvgpu_bios_lpwr_gr_table_1x_header header = { 0 }; | ||
85 | struct nvgpu_bios_lpwr_gr_table_1x_entry entry = { 0 }; | ||
86 | |||
87 | lpwr_gr_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g, | ||
88 | g->bios.perf_token, LOWPOWER_GR_TABLE); | ||
89 | if (lpwr_gr_table_ptr == NULL) { | ||
90 | return -EINVAL; | ||
91 | } | ||
92 | |||
93 | memcpy(&header, lpwr_gr_table_ptr, | ||
94 | sizeof(struct nvgpu_bios_lpwr_gr_table_1x_header)); | ||
95 | |||
96 | /* Parse the LPWR Index Table entries.*/ | ||
97 | for (idx = 0; idx < header.entry_count; idx++) { | ||
98 | entry_addr = (u8 *)lpwr_gr_table_ptr + header.header_size + | ||
99 | (idx * header.entry_size); | ||
100 | |||
101 | memcpy(&entry, entry_addr, | ||
102 | sizeof(struct nvgpu_bios_lpwr_gr_table_1x_entry)); | ||
103 | |||
104 | if (BIOS_GET_FIELD(entry.feautre_mask, | ||
105 | NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) { | ||
106 | pgr_data->entry[idx].gr_enabled = true; | ||
107 | |||
108 | pgr_data->entry[idx].feature_mask = | ||
109 | NVGPU_PMU_GR_FEATURE_MASK_ALL; | ||
110 | |||
111 | if (!BIOS_GET_FIELD(entry.feautre_mask, | ||
112 | NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_RPPG)) { | ||
113 | pgr_data->entry[idx].feature_mask &= | ||
114 | ~NVGPU_PMU_GR_FEATURE_MASK_RPPG; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | } | ||
119 | |||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | static int get_lpwr_ms_table(struct gk20a *g) | ||
124 | { | ||
125 | u32 *lpwr_ms_table_ptr; | ||
126 | u8 *entry_addr; | ||
127 | u32 idx; | ||
128 | struct nvgpu_lpwr_bios_ms_data *pms_data = | ||
129 | &g->perf_pmu.lpwr.lwpr_bios_data.ms; | ||
130 | struct nvgpu_bios_lpwr_ms_table_1x_header header = { 0 }; | ||
131 | struct nvgpu_bios_lpwr_ms_table_1x_entry entry = { 0 }; | ||
132 | |||
133 | lpwr_ms_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g, | ||
134 | g->bios.perf_token, LOWPOWER_MS_TABLE); | ||
135 | if (lpwr_ms_table_ptr == NULL) { | ||
136 | return -EINVAL; | ||
137 | } | ||
138 | |||
139 | memcpy(&header, lpwr_ms_table_ptr, | ||
140 | sizeof(struct nvgpu_bios_lpwr_ms_table_1x_header)); | ||
141 | |||
142 | if (header.entry_count >= LPWR_VBIOS_MS_ENTRY_COUNT_MAX) { | ||
143 | return -EINVAL; | ||
144 | } | ||
145 | |||
146 | pms_data->default_entry_idx = (u8)header.default_entry_idx; | ||
147 | |||
148 | pms_data->idle_threshold_us = (u32)(header.idle_threshold_us * 10); | ||
149 | |||
150 | /* Parse the LPWR MS Table entries.*/ | ||
151 | for (idx = 0; idx < header.entry_count; idx++) { | ||
152 | entry_addr = (u8 *)lpwr_ms_table_ptr + header.header_size + | ||
153 | (idx * header.entry_size); | ||
154 | |||
155 | memcpy(&entry, entry_addr, | ||
156 | sizeof(struct nvgpu_bios_lpwr_ms_table_1x_entry)); | ||
157 | |||
158 | if (BIOS_GET_FIELD(entry.feautre_mask, | ||
159 | NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) { | ||
160 | pms_data->entry[idx].ms_enabled = true; | ||
161 | |||
162 | pms_data->entry[idx].feature_mask = | ||
163 | NVGPU_PMU_MS_FEATURE_MASK_ALL; | ||
164 | |||
165 | if (!BIOS_GET_FIELD(entry.feautre_mask, | ||
166 | NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_CLOCK_GATING)) { | ||
167 | pms_data->entry[idx].feature_mask &= | ||
168 | ~NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING; | ||
169 | } | ||
170 | |||
171 | if (!BIOS_GET_FIELD(entry.feautre_mask, | ||
172 | NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_SWASR)) { | ||
173 | pms_data->entry[idx].feature_mask &= | ||
174 | ~NVGPU_PMU_MS_FEATURE_MASK_SW_ASR; | ||
175 | } | ||
176 | |||
177 | if (!BIOS_GET_FIELD(entry.feautre_mask, | ||
178 | NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_RPPG)) { | ||
179 | pms_data->entry[idx].feature_mask &= | ||
180 | ~NVGPU_PMU_MS_FEATURE_MASK_RPPG; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | pms_data->entry[idx].dynamic_current_logic = | ||
185 | entry.dynamic_current_logic; | ||
186 | |||
187 | pms_data->entry[idx].dynamic_current_sram = | ||
188 | entry.dynamic_current_sram; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | u32 nvgpu_lpwr_pg_setup(struct gk20a *g) | ||
195 | { | ||
196 | u32 err = 0; | ||
197 | |||
198 | nvgpu_log_fn(g, " "); | ||
199 | |||
200 | err = get_lpwr_gr_table(g); | ||
201 | if (err) { | ||
202 | return err; | ||
203 | } | ||
204 | |||
205 | err = get_lpwr_ms_table(g); | ||
206 | if (err) { | ||
207 | return err; | ||
208 | } | ||
209 | |||
210 | err = get_lpwr_idx_table(g); | ||
211 | |||
212 | return err; | ||
213 | } | ||
214 | |||
215 | static void nvgpu_pmu_handle_param_lpwr_msg(struct gk20a *g, | ||
216 | struct pmu_msg *msg, void *param, | ||
217 | u32 handle, u32 status) | ||
218 | { | ||
219 | u32 *ack_status = param; | ||
220 | |||
221 | nvgpu_log_fn(g, " "); | ||
222 | |||
223 | if (status != 0) { | ||
224 | nvgpu_err(g, "LWPR PARAM cmd aborted"); | ||
225 | return; | ||
226 | } | ||
227 | |||
228 | *ack_status = 1; | ||
229 | |||
230 | nvgpu_pmu_dbg(g, "lpwr-param is acknowledged from PMU %x", | ||
231 | msg->msg.pg.msg_type); | ||
232 | } | ||
233 | |||
234 | int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate) | ||
235 | { | ||
236 | struct pmu_cmd cmd; | ||
237 | u32 seq, status = 0; | ||
238 | u32 payload = NV_PMU_PG_PARAM_MCLK_CHANGE_MS_SWASR_ENABLED; | ||
239 | struct clk_set_info *pstate_info; | ||
240 | u32 ack_status = 0; | ||
241 | |||
242 | nvgpu_log_fn(g, " "); | ||
243 | |||
244 | pstate_info = pstate_get_clk_set_info(g, pstate, | ||
245 | clkwhich_mclk); | ||
246 | if (!pstate_info) { | ||
247 | return -EINVAL; | ||
248 | } | ||
249 | |||
250 | if (pstate_info->max_mhz > | ||
251 | MAX_SWASR_MCLK_FREQ_WITHOUT_WR_TRAINING_MAXWELL_MHZ) { | ||
252 | payload |= | ||
253 | NV_PMU_PG_PARAM_MCLK_CHANGE_GDDR5_WR_TRAINING_ENABLED; | ||
254 | } | ||
255 | |||
256 | if (payload != g->perf_pmu.lpwr.mclk_change_cache) { | ||
257 | g->perf_pmu.lpwr.mclk_change_cache = payload; | ||
258 | |||
259 | cmd.hdr.unit_id = PMU_UNIT_PG; | ||
260 | cmd.hdr.size = PMU_CMD_HDR_SIZE + | ||
261 | sizeof(struct pmu_pg_cmd_mclk_change); | ||
262 | cmd.cmd.pg.mclk_change.cmd_type = | ||
263 | PMU_PG_CMD_ID_PG_PARAM; | ||
264 | cmd.cmd.pg.mclk_change.cmd_id = | ||
265 | PMU_PG_PARAM_CMD_MCLK_CHANGE; | ||
266 | cmd.cmd.pg.mclk_change.data = payload; | ||
267 | |||
268 | nvgpu_pmu_dbg(g, "cmd post MS PMU_PG_PARAM_CMD_MCLK_CHANGE"); | ||
269 | status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, | ||
270 | PMU_COMMAND_QUEUE_HPQ, | ||
271 | nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0); | ||
272 | |||
273 | pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g), | ||
274 | &ack_status, 1); | ||
275 | if (ack_status == 0) { | ||
276 | status = -EINVAL; | ||
277 | nvgpu_err(g, "MCLK-CHANGE ACK failed"); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | return status; | ||
282 | } | ||
283 | |||
284 | u32 nvgpu_lpwr_post_init(struct gk20a *g) | ||
285 | { | ||
286 | struct pmu_cmd cmd; | ||
287 | u32 seq; | ||
288 | u32 status = 0; | ||
289 | u32 ack_status = 0; | ||
290 | |||
291 | memset(&cmd, 0, sizeof(struct pmu_cmd)); | ||
292 | cmd.hdr.unit_id = PMU_UNIT_PG; | ||
293 | cmd.hdr.size = PMU_CMD_HDR_SIZE + | ||
294 | sizeof(struct pmu_pg_cmd_post_init_param); | ||
295 | |||
296 | cmd.cmd.pg.post_init.cmd_type = | ||
297 | PMU_PG_CMD_ID_PG_PARAM; | ||
298 | cmd.cmd.pg.post_init.cmd_id = | ||
299 | PMU_PG_PARAM_CMD_POST_INIT; | ||
300 | |||
301 | nvgpu_pmu_dbg(g, "cmd post post-init PMU_PG_PARAM_CMD_POST_INIT"); | ||
302 | status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, | ||
303 | PMU_COMMAND_QUEUE_LPQ, | ||
304 | nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0); | ||
305 | |||
306 | pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g), | ||
307 | &ack_status, 1); | ||
308 | if (ack_status == 0) { | ||
309 | status = -EINVAL; | ||
310 | nvgpu_err(g, "post-init ack failed"); | ||
311 | } | ||
312 | |||
313 | return status; | ||
314 | } | ||
315 | |||
316 | u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num) | ||
317 | { | ||
318 | struct nvgpu_lpwr_bios_ms_data *pms_data = | ||
319 | &g->perf_pmu.lpwr.lwpr_bios_data.ms; | ||
320 | struct nvgpu_lpwr_bios_idx_data *pidx_data = | ||
321 | &g->perf_pmu.lpwr.lwpr_bios_data.idx; | ||
322 | struct pstate *pstate = pstate_find(g, pstate_num); | ||
323 | u32 ms_idx; | ||
324 | |||
325 | nvgpu_log_fn(g, " "); | ||
326 | |||
327 | if (!pstate) { | ||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | ms_idx = pidx_data->entry[pstate->lpwr_entry_idx].ms_idx; | ||
332 | if (pms_data->entry[ms_idx].ms_enabled) { | ||
333 | return 1; | ||
334 | } else { | ||
335 | return 0; | ||
336 | } | ||
337 | } | ||
338 | |||
339 | u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num) | ||
340 | { | ||
341 | struct nvgpu_lpwr_bios_gr_data *pgr_data = | ||
342 | &g->perf_pmu.lpwr.lwpr_bios_data.gr; | ||
343 | struct nvgpu_lpwr_bios_idx_data *pidx_data = | ||
344 | &g->perf_pmu.lpwr.lwpr_bios_data.idx; | ||
345 | struct pstate *pstate = pstate_find(g, pstate_num); | ||
346 | u32 idx; | ||
347 | |||
348 | nvgpu_log_fn(g, " "); | ||
349 | |||
350 | if (!pstate) { | ||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | idx = pidx_data->entry[pstate->lpwr_entry_idx].gr_idx; | ||
355 | if (pgr_data->entry[idx].gr_enabled) { | ||
356 | return 1; | ||
357 | } else { | ||
358 | return 0; | ||
359 | } | ||
360 | } | ||
361 | |||
362 | |||
363 | int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock) | ||
364 | { | ||
365 | struct nvgpu_pmu *pmu = &g->pmu; | ||
366 | u32 status = 0; | ||
367 | u32 is_mscg_supported = 0; | ||
368 | u32 is_rppg_supported = 0; | ||
369 | u32 present_pstate = 0; | ||
370 | |||
371 | nvgpu_log_fn(g, " "); | ||
372 | |||
373 | if (pstate_lock) { | ||
374 | nvgpu_clk_arb_pstate_change_lock(g, true); | ||
375 | } | ||
376 | nvgpu_mutex_acquire(&pmu->pg_mutex); | ||
377 | |||
378 | present_pstate = nvgpu_clk_arb_get_current_pstate(g); | ||
379 | |||
380 | is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g, | ||
381 | present_pstate); | ||
382 | if (is_mscg_supported && g->mscg_enabled) { | ||
383 | if (!pmu->mscg_stat) { | ||
384 | pmu->mscg_stat = PMU_MSCG_ENABLED; | ||
385 | } | ||
386 | } | ||
387 | |||
388 | is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g, | ||
389 | present_pstate); | ||
390 | if (is_rppg_supported) { | ||
391 | if (g->support_pmu && g->can_elpg) { | ||
392 | status = nvgpu_pmu_enable_elpg(g); | ||
393 | } | ||
394 | } | ||
395 | |||
396 | nvgpu_mutex_release(&pmu->pg_mutex); | ||
397 | if (pstate_lock) { | ||
398 | nvgpu_clk_arb_pstate_change_lock(g, false); | ||
399 | } | ||
400 | |||
401 | return status; | ||
402 | } | ||
403 | |||
404 | int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock) | ||
405 | { | ||
406 | struct nvgpu_pmu *pmu = &g->pmu; | ||
407 | int status = 0; | ||
408 | u32 is_mscg_supported = 0; | ||
409 | u32 is_rppg_supported = 0; | ||
410 | u32 present_pstate = 0; | ||
411 | |||
412 | nvgpu_log_fn(g, " "); | ||
413 | |||
414 | if (pstate_lock) { | ||
415 | nvgpu_clk_arb_pstate_change_lock(g, true); | ||
416 | } | ||
417 | nvgpu_mutex_acquire(&pmu->pg_mutex); | ||
418 | |||
419 | present_pstate = nvgpu_clk_arb_get_current_pstate(g); | ||
420 | |||
421 | is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g, | ||
422 | present_pstate); | ||
423 | if (is_rppg_supported) { | ||
424 | if (g->support_pmu && g->elpg_enabled) { | ||
425 | status = nvgpu_pmu_disable_elpg(g); | ||
426 | if (status) { | ||
427 | goto exit_unlock; | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | |||
432 | is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g, | ||
433 | present_pstate); | ||
434 | if (is_mscg_supported && g->mscg_enabled) { | ||
435 | if (pmu->mscg_stat) { | ||
436 | pmu->mscg_stat = PMU_MSCG_DISABLED; | ||
437 | } | ||
438 | } | ||
439 | |||
440 | exit_unlock: | ||
441 | nvgpu_mutex_release(&pmu->pg_mutex); | ||
442 | if (pstate_lock) { | ||
443 | nvgpu_clk_arb_pstate_change_lock(g, false); | ||
444 | } | ||
445 | |||
446 | nvgpu_log_fn(g, "done"); | ||
447 | return status; | ||
448 | } | ||