aboutsummaryrefslogtreecommitdiffstats
path: root/include/lpwr
diff options
context:
space:
mode:
authorJoshua Bakita <bakitajoshua@gmail.com>2023-06-28 18:24:25 -0400
committerJoshua Bakita <bakitajoshua@gmail.com>2023-06-28 18:24:25 -0400
commit01e6fac4d61fdd7fff5433942ec93fc2ea1e4df1 (patch)
tree4ef34501728a087be24f4ba0af90f91486bf780b /include/lpwr
parent306a03d18b305e4e573be3b2931978fa10679eb9 (diff)
Include nvgpu headers
These are needed to build on NVIDIA's Jetson boards for the time being. Only a couple structs are required, so it should be fairly easy to remove this dependency at some point in the future.
Diffstat (limited to 'include/lpwr')
-rw-r--r--include/lpwr/lpwr.c448
-rw-r--r--include/lpwr/lpwr.h101
-rw-r--r--include/lpwr/rppg.c160
-rw-r--r--include/lpwr/rppg.h26
4 files changed, 735 insertions, 0 deletions
diff --git a/include/lpwr/lpwr.c b/include/lpwr/lpwr.c
new file mode 100644
index 0000000..c8cfb84
--- /dev/null
+++ b/include/lpwr/lpwr.c
@@ -0,0 +1,448 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/bios.h>
24#include <nvgpu/pmu.h>
25#include <nvgpu/clk_arb.h>
26#include <nvgpu/gk20a.h>
27
28#include "gp106/bios_gp106.h"
29#include "pstate/pstate.h"
30#include "pmu_perf/pmu_perf.h"
31#include "lpwr.h"
32
33static int get_lpwr_idx_table(struct gk20a *g)
34{
35 u32 *lpwr_idx_table_ptr;
36 u8 *entry_addr;
37 u32 idx;
38 struct nvgpu_lpwr_bios_idx_data *pidx_data =
39 &g->perf_pmu.lpwr.lwpr_bios_data.idx;
40 struct nvgpu_bios_lpwr_idx_table_1x_header header = { 0 };
41 struct nvgpu_bios_lpwr_idx_table_1x_entry entry = { 0 };
42
43 lpwr_idx_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g,
44 g->bios.perf_token, LOWPOWER_TABLE);
45 if (lpwr_idx_table_ptr == NULL) {
46 return -EINVAL;
47 }
48
49 memcpy(&header, lpwr_idx_table_ptr,
50 sizeof(struct nvgpu_bios_lpwr_idx_table_1x_header));
51
52 if (header.entry_count >= LPWR_VBIOS_IDX_ENTRY_COUNT_MAX) {
53 return -EINVAL;
54 }
55
56 pidx_data->base_sampling_period = (u16)header.base_sampling_period;
57
58 /* Parse the LPWR Index Table entries.*/
59 for (idx = 0; idx < header.entry_count; idx++) {
60 entry_addr = (u8 *)lpwr_idx_table_ptr + header.header_size +
61 (idx * header.entry_size);
62
63 memcpy(&entry, entry_addr,
64 sizeof(struct nvgpu_bios_lpwr_idx_table_1x_entry));
65
66 pidx_data->entry[idx].pcie_idx = entry.pcie_idx;
67 pidx_data->entry[idx].gr_idx = entry.gr_idx;
68 pidx_data->entry[idx].ms_idx = entry.ms_idx;
69 pidx_data->entry[idx].di_idx = entry.di_idx;
70 pidx_data->entry[idx].gc6_idx = entry.gc6_idx;
71
72 }
73
74 return 0;
75}
76
77static int get_lpwr_gr_table(struct gk20a *g)
78{
79 u32 *lpwr_gr_table_ptr;
80 u8 *entry_addr;
81 u32 idx;
82 struct nvgpu_lpwr_bios_gr_data *pgr_data =
83 &g->perf_pmu.lpwr.lwpr_bios_data.gr;
84 struct nvgpu_bios_lpwr_gr_table_1x_header header = { 0 };
85 struct nvgpu_bios_lpwr_gr_table_1x_entry entry = { 0 };
86
87 lpwr_gr_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g,
88 g->bios.perf_token, LOWPOWER_GR_TABLE);
89 if (lpwr_gr_table_ptr == NULL) {
90 return -EINVAL;
91 }
92
93 memcpy(&header, lpwr_gr_table_ptr,
94 sizeof(struct nvgpu_bios_lpwr_gr_table_1x_header));
95
96 /* Parse the LPWR Index Table entries.*/
97 for (idx = 0; idx < header.entry_count; idx++) {
98 entry_addr = (u8 *)lpwr_gr_table_ptr + header.header_size +
99 (idx * header.entry_size);
100
101 memcpy(&entry, entry_addr,
102 sizeof(struct nvgpu_bios_lpwr_gr_table_1x_entry));
103
104 if (BIOS_GET_FIELD(entry.feautre_mask,
105 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) {
106 pgr_data->entry[idx].gr_enabled = true;
107
108 pgr_data->entry[idx].feature_mask =
109 NVGPU_PMU_GR_FEATURE_MASK_ALL;
110
111 if (!BIOS_GET_FIELD(entry.feautre_mask,
112 NV_VBIOS_LPWR_GR_FEATURE_MASK_GR_RPPG)) {
113 pgr_data->entry[idx].feature_mask &=
114 ~NVGPU_PMU_GR_FEATURE_MASK_RPPG;
115 }
116 }
117
118 }
119
120 return 0;
121}
122
123static int get_lpwr_ms_table(struct gk20a *g)
124{
125 u32 *lpwr_ms_table_ptr;
126 u8 *entry_addr;
127 u32 idx;
128 struct nvgpu_lpwr_bios_ms_data *pms_data =
129 &g->perf_pmu.lpwr.lwpr_bios_data.ms;
130 struct nvgpu_bios_lpwr_ms_table_1x_header header = { 0 };
131 struct nvgpu_bios_lpwr_ms_table_1x_entry entry = { 0 };
132
133 lpwr_ms_table_ptr = (u32 *)nvgpu_bios_get_perf_table_ptrs(g,
134 g->bios.perf_token, LOWPOWER_MS_TABLE);
135 if (lpwr_ms_table_ptr == NULL) {
136 return -EINVAL;
137 }
138
139 memcpy(&header, lpwr_ms_table_ptr,
140 sizeof(struct nvgpu_bios_lpwr_ms_table_1x_header));
141
142 if (header.entry_count >= LPWR_VBIOS_MS_ENTRY_COUNT_MAX) {
143 return -EINVAL;
144 }
145
146 pms_data->default_entry_idx = (u8)header.default_entry_idx;
147
148 pms_data->idle_threshold_us = (u32)(header.idle_threshold_us * 10);
149
150 /* Parse the LPWR MS Table entries.*/
151 for (idx = 0; idx < header.entry_count; idx++) {
152 entry_addr = (u8 *)lpwr_ms_table_ptr + header.header_size +
153 (idx * header.entry_size);
154
155 memcpy(&entry, entry_addr,
156 sizeof(struct nvgpu_bios_lpwr_ms_table_1x_entry));
157
158 if (BIOS_GET_FIELD(entry.feautre_mask,
159 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS)) {
160 pms_data->entry[idx].ms_enabled = true;
161
162 pms_data->entry[idx].feature_mask =
163 NVGPU_PMU_MS_FEATURE_MASK_ALL;
164
165 if (!BIOS_GET_FIELD(entry.feautre_mask,
166 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_CLOCK_GATING)) {
167 pms_data->entry[idx].feature_mask &=
168 ~NVGPU_PMU_MS_FEATURE_MASK_CLOCK_GATING;
169 }
170
171 if (!BIOS_GET_FIELD(entry.feautre_mask,
172 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_SWASR)) {
173 pms_data->entry[idx].feature_mask &=
174 ~NVGPU_PMU_MS_FEATURE_MASK_SW_ASR;
175 }
176
177 if (!BIOS_GET_FIELD(entry.feautre_mask,
178 NV_VBIOS_LPWR_MS_FEATURE_MASK_MS_RPPG)) {
179 pms_data->entry[idx].feature_mask &=
180 ~NVGPU_PMU_MS_FEATURE_MASK_RPPG;
181 }
182 }
183
184 pms_data->entry[idx].dynamic_current_logic =
185 entry.dynamic_current_logic;
186
187 pms_data->entry[idx].dynamic_current_sram =
188 entry.dynamic_current_sram;
189 }
190
191 return 0;
192}
193
194u32 nvgpu_lpwr_pg_setup(struct gk20a *g)
195{
196 u32 err = 0;
197
198 nvgpu_log_fn(g, " ");
199
200 err = get_lpwr_gr_table(g);
201 if (err) {
202 return err;
203 }
204
205 err = get_lpwr_ms_table(g);
206 if (err) {
207 return err;
208 }
209
210 err = get_lpwr_idx_table(g);
211
212 return err;
213}
214
215static void nvgpu_pmu_handle_param_lpwr_msg(struct gk20a *g,
216 struct pmu_msg *msg, void *param,
217 u32 handle, u32 status)
218{
219 u32 *ack_status = param;
220
221 nvgpu_log_fn(g, " ");
222
223 if (status != 0) {
224 nvgpu_err(g, "LWPR PARAM cmd aborted");
225 return;
226 }
227
228 *ack_status = 1;
229
230 nvgpu_pmu_dbg(g, "lpwr-param is acknowledged from PMU %x",
231 msg->msg.pg.msg_type);
232}
233
234int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate)
235{
236 struct pmu_cmd cmd;
237 u32 seq, status = 0;
238 u32 payload = NV_PMU_PG_PARAM_MCLK_CHANGE_MS_SWASR_ENABLED;
239 struct clk_set_info *pstate_info;
240 u32 ack_status = 0;
241
242 nvgpu_log_fn(g, " ");
243
244 pstate_info = pstate_get_clk_set_info(g, pstate,
245 clkwhich_mclk);
246 if (!pstate_info) {
247 return -EINVAL;
248 }
249
250 if (pstate_info->max_mhz >
251 MAX_SWASR_MCLK_FREQ_WITHOUT_WR_TRAINING_MAXWELL_MHZ) {
252 payload |=
253 NV_PMU_PG_PARAM_MCLK_CHANGE_GDDR5_WR_TRAINING_ENABLED;
254 }
255
256 if (payload != g->perf_pmu.lpwr.mclk_change_cache) {
257 g->perf_pmu.lpwr.mclk_change_cache = payload;
258
259 cmd.hdr.unit_id = PMU_UNIT_PG;
260 cmd.hdr.size = PMU_CMD_HDR_SIZE +
261 sizeof(struct pmu_pg_cmd_mclk_change);
262 cmd.cmd.pg.mclk_change.cmd_type =
263 PMU_PG_CMD_ID_PG_PARAM;
264 cmd.cmd.pg.mclk_change.cmd_id =
265 PMU_PG_PARAM_CMD_MCLK_CHANGE;
266 cmd.cmd.pg.mclk_change.data = payload;
267
268 nvgpu_pmu_dbg(g, "cmd post MS PMU_PG_PARAM_CMD_MCLK_CHANGE");
269 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
270 PMU_COMMAND_QUEUE_HPQ,
271 nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0);
272
273 pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g),
274 &ack_status, 1);
275 if (ack_status == 0) {
276 status = -EINVAL;
277 nvgpu_err(g, "MCLK-CHANGE ACK failed");
278 }
279 }
280
281 return status;
282}
283
284u32 nvgpu_lpwr_post_init(struct gk20a *g)
285{
286 struct pmu_cmd cmd;
287 u32 seq;
288 u32 status = 0;
289 u32 ack_status = 0;
290
291 memset(&cmd, 0, sizeof(struct pmu_cmd));
292 cmd.hdr.unit_id = PMU_UNIT_PG;
293 cmd.hdr.size = PMU_CMD_HDR_SIZE +
294 sizeof(struct pmu_pg_cmd_post_init_param);
295
296 cmd.cmd.pg.post_init.cmd_type =
297 PMU_PG_CMD_ID_PG_PARAM;
298 cmd.cmd.pg.post_init.cmd_id =
299 PMU_PG_PARAM_CMD_POST_INIT;
300
301 nvgpu_pmu_dbg(g, "cmd post post-init PMU_PG_PARAM_CMD_POST_INIT");
302 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
303 PMU_COMMAND_QUEUE_LPQ,
304 nvgpu_pmu_handle_param_lpwr_msg, &ack_status, &seq, ~0);
305
306 pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g),
307 &ack_status, 1);
308 if (ack_status == 0) {
309 status = -EINVAL;
310 nvgpu_err(g, "post-init ack failed");
311 }
312
313 return status;
314}
315
316u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num)
317{
318 struct nvgpu_lpwr_bios_ms_data *pms_data =
319 &g->perf_pmu.lpwr.lwpr_bios_data.ms;
320 struct nvgpu_lpwr_bios_idx_data *pidx_data =
321 &g->perf_pmu.lpwr.lwpr_bios_data.idx;
322 struct pstate *pstate = pstate_find(g, pstate_num);
323 u32 ms_idx;
324
325 nvgpu_log_fn(g, " ");
326
327 if (!pstate) {
328 return 0;
329 }
330
331 ms_idx = pidx_data->entry[pstate->lpwr_entry_idx].ms_idx;
332 if (pms_data->entry[ms_idx].ms_enabled) {
333 return 1;
334 } else {
335 return 0;
336 }
337}
338
339u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num)
340{
341 struct nvgpu_lpwr_bios_gr_data *pgr_data =
342 &g->perf_pmu.lpwr.lwpr_bios_data.gr;
343 struct nvgpu_lpwr_bios_idx_data *pidx_data =
344 &g->perf_pmu.lpwr.lwpr_bios_data.idx;
345 struct pstate *pstate = pstate_find(g, pstate_num);
346 u32 idx;
347
348 nvgpu_log_fn(g, " ");
349
350 if (!pstate) {
351 return 0;
352 }
353
354 idx = pidx_data->entry[pstate->lpwr_entry_idx].gr_idx;
355 if (pgr_data->entry[idx].gr_enabled) {
356 return 1;
357 } else {
358 return 0;
359 }
360}
361
362
363int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
364{
365 struct nvgpu_pmu *pmu = &g->pmu;
366 u32 status = 0;
367 u32 is_mscg_supported = 0;
368 u32 is_rppg_supported = 0;
369 u32 present_pstate = 0;
370
371 nvgpu_log_fn(g, " ");
372
373 if (pstate_lock) {
374 nvgpu_clk_arb_pstate_change_lock(g, true);
375 }
376 nvgpu_mutex_acquire(&pmu->pg_mutex);
377
378 present_pstate = nvgpu_clk_arb_get_current_pstate(g);
379
380 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g,
381 present_pstate);
382 if (is_mscg_supported && g->mscg_enabled) {
383 if (!pmu->mscg_stat) {
384 pmu->mscg_stat = PMU_MSCG_ENABLED;
385 }
386 }
387
388 is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g,
389 present_pstate);
390 if (is_rppg_supported) {
391 if (g->support_pmu && g->can_elpg) {
392 status = nvgpu_pmu_enable_elpg(g);
393 }
394 }
395
396 nvgpu_mutex_release(&pmu->pg_mutex);
397 if (pstate_lock) {
398 nvgpu_clk_arb_pstate_change_lock(g, false);
399 }
400
401 return status;
402}
403
404int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
405{
406 struct nvgpu_pmu *pmu = &g->pmu;
407 int status = 0;
408 u32 is_mscg_supported = 0;
409 u32 is_rppg_supported = 0;
410 u32 present_pstate = 0;
411
412 nvgpu_log_fn(g, " ");
413
414 if (pstate_lock) {
415 nvgpu_clk_arb_pstate_change_lock(g, true);
416 }
417 nvgpu_mutex_acquire(&pmu->pg_mutex);
418
419 present_pstate = nvgpu_clk_arb_get_current_pstate(g);
420
421 is_rppg_supported = nvgpu_lpwr_is_rppg_supported(g,
422 present_pstate);
423 if (is_rppg_supported) {
424 if (g->support_pmu && g->elpg_enabled) {
425 status = nvgpu_pmu_disable_elpg(g);
426 if (status) {
427 goto exit_unlock;
428 }
429 }
430 }
431
432 is_mscg_supported = nvgpu_lpwr_is_mscg_supported(g,
433 present_pstate);
434 if (is_mscg_supported && g->mscg_enabled) {
435 if (pmu->mscg_stat) {
436 pmu->mscg_stat = PMU_MSCG_DISABLED;
437 }
438 }
439
440exit_unlock:
441 nvgpu_mutex_release(&pmu->pg_mutex);
442 if (pstate_lock) {
443 nvgpu_clk_arb_pstate_change_lock(g, false);
444 }
445
446 nvgpu_log_fn(g, "done");
447 return status;
448}
diff --git a/include/lpwr/lpwr.h b/include/lpwr/lpwr.h
new file mode 100644
index 0000000..c38ba62
--- /dev/null
+++ b/include/lpwr/lpwr.h
@@ -0,0 +1,101 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef NVGPU_LPWR_H
23#define NVGPU_LPWR_H
24
25#define MAX_SWASR_MCLK_FREQ_WITHOUT_WR_TRAINING_MAXWELL_MHZ 540
26
27#define NV_PMU_PG_PARAM_MCLK_CHANGE_MS_SWASR_ENABLED BIT(0x1)
28#define NV_PMU_PG_PARAM_MCLK_CHANGE_GDDR5_WR_TRAINING_ENABLED BIT(0x3)
29
30#define LPWR_ENTRY_COUNT_MAX 0x06
31
32#define LPWR_VBIOS_IDX_ENTRY_COUNT_MAX (LPWR_ENTRY_COUNT_MAX)
33
34#define LPWR_VBIOS_IDX_ENTRY_RSVD \
35 (LPWR_VBIOS_IDX_ENTRY_COUNT_MAX - 1)
36
37#define LPWR_VBIOS_BASE_SAMPLING_PERIOD_DEFAULT (500)
38
39struct nvgpu_lpwr_bios_idx_entry {
40 u8 pcie_idx;
41 u8 gr_idx;
42 u8 ms_idx;
43 u8 di_idx;
44 u8 gc6_idx;
45};
46
47struct nvgpu_lpwr_bios_idx_data {
48 u16 base_sampling_period;
49 struct nvgpu_lpwr_bios_idx_entry entry[LPWR_VBIOS_IDX_ENTRY_COUNT_MAX];
50};
51
52#define LPWR_VBIOS_MS_ENTRY_COUNT_MAX (LPWR_ENTRY_COUNT_MAX)
53
54struct nvgpu_lpwr_bios_ms_entry {
55 bool ms_enabled;
56 u32 feature_mask;
57 u32 asr_efficiency_thresholdl;
58 u16 dynamic_current_logic;
59 u16 dynamic_current_sram;
60};
61
62struct nvgpu_lpwr_bios_ms_data {
63 u8 default_entry_idx;
64 u32 idle_threshold_us;
65 struct nvgpu_lpwr_bios_ms_entry entry[LPWR_VBIOS_MS_ENTRY_COUNT_MAX];
66};
67
68#define LPWR_VBIOS_GR_ENTRY_COUNT_MAX (LPWR_ENTRY_COUNT_MAX)
69
70struct nvgpu_lpwr_bios_gr_entry {
71 bool gr_enabled;
72 u32 feature_mask;
73};
74
75struct nvgpu_lpwr_bios_gr_data {
76 u8 default_entry_idx;
77 u32 idle_threshold_us;
78 u8 adaptive_gr_multiplier;
79 struct nvgpu_lpwr_bios_gr_entry entry[LPWR_VBIOS_GR_ENTRY_COUNT_MAX];
80};
81
82struct nvgpu_lpwr_bios_data {
83 struct nvgpu_lpwr_bios_idx_data idx;
84 struct nvgpu_lpwr_bios_ms_data ms;
85 struct nvgpu_lpwr_bios_gr_data gr;
86};
87
88struct obj_lwpr {
89 struct nvgpu_lpwr_bios_data lwpr_bios_data;
90 u32 mclk_change_cache;
91};
92
93u32 nvgpu_lpwr_pg_setup(struct gk20a *g);
94int nvgpu_lwpr_mclk_change(struct gk20a *g, u32 pstate);
95int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock);
96int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock);
97u32 nvgpu_lpwr_is_mscg_supported(struct gk20a *g, u32 pstate_num);
98u32 nvgpu_lpwr_is_rppg_supported(struct gk20a *g, u32 pstate_num);
99u32 nvgpu_lpwr_post_init(struct gk20a *g);
100
101#endif /* NVGPU_LPWR_H */
diff --git a/include/lpwr/rppg.c b/include/lpwr/rppg.c
new file mode 100644
index 0000000..13e8126
--- /dev/null
+++ b/include/lpwr/rppg.c
@@ -0,0 +1,160 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/pmu.h>
24#include <nvgpu/gk20a.h>
25
26#include "gp106/bios_gp106.h"
27#include "pstate/pstate.h"
28#include "lpwr/rppg.h"
29
30static void pmu_handle_rppg_init_msg(struct gk20a *g, struct pmu_msg *msg,
31 void *param, u32 handle, u32 status)
32{
33 u32 *success = param;
34
35 if (status == 0) {
36 switch (msg->msg.pg.rppg_msg.cmn.msg_id) {
37 case NV_PMU_RPPG_MSG_ID_INIT_CTRL_ACK:
38 *success = 1;
39 nvgpu_pmu_dbg(g, "RPPG is acknowledged from PMU %x",
40 msg->msg.pg.msg_type);
41 break;
42 }
43 }
44
45 nvgpu_pmu_dbg(g, "RPPG is acknowledged from PMU %x",
46 msg->msg.pg.msg_type);
47}
48
49static u32 rppg_send_cmd(struct gk20a *g, struct nv_pmu_rppg_cmd *prppg_cmd)
50{
51 struct pmu_cmd cmd;
52 u32 seq;
53 u32 status = 0;
54 u32 success = 0;
55
56 memset(&cmd, 0, sizeof(struct pmu_cmd));
57 cmd.hdr.unit_id = PMU_UNIT_PG;
58 cmd.hdr.size = PMU_CMD_HDR_SIZE +
59 sizeof(struct nv_pmu_rppg_cmd);
60
61 cmd.cmd.pg.rppg_cmd.cmn.cmd_type = PMU_PMU_PG_CMD_ID_RPPG;
62 cmd.cmd.pg.rppg_cmd.cmn.cmd_id = prppg_cmd->cmn.cmd_id;
63
64 switch (prppg_cmd->cmn.cmd_id) {
65 case NV_PMU_RPPG_CMD_ID_INIT:
66 break;
67 case NV_PMU_RPPG_CMD_ID_INIT_CTRL:
68 cmd.cmd.pg.rppg_cmd.init_ctrl.ctrl_id =
69 prppg_cmd->init_ctrl.ctrl_id;
70 cmd.cmd.pg.rppg_cmd.init_ctrl.domain_id =
71 prppg_cmd->init_ctrl.domain_id;
72 break;
73 case NV_PMU_RPPG_CMD_ID_STATS_RESET:
74 cmd.cmd.pg.rppg_cmd.stats_reset.ctrl_id =
75 prppg_cmd->stats_reset.ctrl_id;
76 break;
77 default:
78 nvgpu_err(g, "Inivalid RPPG command %d",
79 prppg_cmd->cmn.cmd_id);
80 return -1;
81 }
82
83 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
84 pmu_handle_rppg_init_msg, &success, &seq, ~0);
85 if (status) {
86 nvgpu_err(g, "Unable to submit parameter command %d",
87 prppg_cmd->cmn.cmd_id);
88 goto exit;
89 }
90
91 if (prppg_cmd->cmn.cmd_id == NV_PMU_RPPG_CMD_ID_INIT_CTRL) {
92 pmu_wait_message_cond(&g->pmu, gk20a_get_gr_idle_timeout(g),
93 &success, 1);
94 if (success == 0) {
95 status = -EINVAL;
96 nvgpu_err(g, "Ack for the parameter command %x",
97 prppg_cmd->cmn.cmd_id);
98 }
99 }
100
101exit:
102 return status;
103}
104
105static u32 rppg_init(struct gk20a *g)
106{
107 struct nv_pmu_rppg_cmd rppg_cmd;
108
109 rppg_cmd.init.cmd_id = NV_PMU_RPPG_CMD_ID_INIT;
110
111 return rppg_send_cmd(g, &rppg_cmd);
112}
113
114static u32 rppg_ctrl_init(struct gk20a *g, u8 ctrl_id)
115{
116 struct nv_pmu_rppg_cmd rppg_cmd;
117
118 rppg_cmd.init_ctrl.cmd_id = NV_PMU_RPPG_CMD_ID_INIT_CTRL;
119 rppg_cmd.init_ctrl.ctrl_id = ctrl_id;
120
121 switch (ctrl_id) {
122 case NV_PMU_RPPG_CTRL_ID_GR:
123 case NV_PMU_RPPG_CTRL_ID_MS:
124 rppg_cmd.init_ctrl.domain_id = NV_PMU_RPPG_DOMAIN_ID_GFX;
125 break;
126 }
127
128 return rppg_send_cmd(g, &rppg_cmd);
129}
130
131u32 init_rppg(struct gk20a *g)
132{
133 u32 status;
134
135 status = rppg_init(g);
136 if (status != 0) {
137 nvgpu_err(g,
138 "Failed to initialize RPPG in PMU: 0x%08x", status);
139 return status;
140 }
141
142
143 status = rppg_ctrl_init(g, NV_PMU_RPPG_CTRL_ID_GR);
144 if (status != 0) {
145 nvgpu_err(g,
146 "Failed to initialize RPPG_CTRL: GR in PMU: 0x%08x",
147 status);
148 return status;
149 }
150
151 status = rppg_ctrl_init(g, NV_PMU_RPPG_CTRL_ID_MS);
152 if (status != 0) {
153 nvgpu_err(g,
154 "Failed to initialize RPPG_CTRL: MS in PMU: 0x%08x",
155 status);
156 return status;
157 }
158
159 return status;
160}
diff --git a/include/lpwr/rppg.h b/include/lpwr/rppg.h
new file mode 100644
index 0000000..d66600a
--- /dev/null
+++ b/include/lpwr/rppg.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22#ifndef NVGPU_LPWR_RPPG_H
23#define NVGPU_LPWR_RPPG_H
24
25u32 init_rppg(struct gk20a *g);
26#endif /* NVGPU_LPWR_RPPG_H */