aboutsummaryrefslogtreecommitdiffstats
path: root/include/pmgr/pmgrpmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/pmgr/pmgrpmu.c')
-rw-r--r--include/pmgr/pmgrpmu.c546
1 files changed, 546 insertions, 0 deletions
diff --git a/include/pmgr/pmgrpmu.c b/include/pmgr/pmgrpmu.c
new file mode 100644
index 0000000..b6947f2
--- /dev/null
+++ b/include/pmgr/pmgrpmu.c
@@ -0,0 +1,546 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/kmem.h>
24#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
25#include <nvgpu/pmu.h>
26#include <nvgpu/gk20a.h>
27
28#include "gp106/bios_gp106.h"
29
30#include "boardobj/boardobjgrp.h"
31#include "boardobj/boardobjgrp_e32.h"
32
33#include "pwrdev.h"
34#include "pmgrpmu.h"
35
36struct pmgr_pmucmdhandler_params {
37 u32 success;
38};
39
40static void pmgr_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
41 void *param, u32 handle, u32 status)
42{
43 struct pmgr_pmucmdhandler_params *phandlerparams =
44 (struct pmgr_pmucmdhandler_params *)param;
45
46 if ((msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_SET_OBJECT) &&
47 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_QUERY) &&
48 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_LOAD)) {
49 nvgpu_err(g, "unknow msg %x", msg->msg.pmgr.msg_type);
50 return;
51 }
52
53 if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_SET_OBJECT) {
54 if ((msg->msg.pmgr.set_object.b_success != 1) ||
55 (msg->msg.pmgr.set_object.flcnstatus != 0U)) {
56 nvgpu_err(g, "pmgr msg failed %x %x %x %x",
57 msg->msg.pmgr.set_object.msg_type,
58 msg->msg.pmgr.set_object.b_success,
59 msg->msg.pmgr.set_object.flcnstatus,
60 msg->msg.pmgr.set_object.object_type);
61 return;
62 }
63 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_QUERY) {
64 if ((msg->msg.pmgr.query.b_success != 1) ||
65 (msg->msg.pmgr.query.flcnstatus != 0U)) {
66 nvgpu_err(g, "pmgr msg failed %x %x %x %x",
67 msg->msg.pmgr.query.msg_type,
68 msg->msg.pmgr.query.b_success,
69 msg->msg.pmgr.query.flcnstatus,
70 msg->msg.pmgr.query.cmd_type);
71 return;
72 }
73 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_LOAD) {
74 if ((msg->msg.pmgr.query.b_success != 1) ||
75 (msg->msg.pmgr.query.flcnstatus != 0U)) {
76 nvgpu_err(g, "pmgr msg failed %x %x %x",
77 msg->msg.pmgr.load.msg_type,
78 msg->msg.pmgr.load.b_success,
79 msg->msg.pmgr.load.flcnstatus);
80 return;
81 }
82 }
83
84 phandlerparams->success = 1;
85}
86
87static u32 pmgr_pmu_set_object(struct gk20a *g,
88 u8 type,
89 u16 dmem_size,
90 u16 fb_size,
91 void *pobj)
92{
93 struct pmu_cmd cmd;
94 struct pmu_payload payload;
95 struct nv_pmu_pmgr_cmd_set_object *pcmd;
96 u32 status;
97 u32 seqdesc;
98 struct pmgr_pmucmdhandler_params handlerparams;
99
100 memset(&payload, 0, sizeof(struct pmu_payload));
101 memset(&cmd, 0, sizeof(struct pmu_cmd));
102 memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params));
103
104 cmd.hdr.unit_id = PMU_UNIT_PMGR;
105 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_set_object) +
106 (u32)sizeof(struct pmu_hdr);;
107
108 pcmd = &cmd.cmd.pmgr.set_object;
109 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_SET_OBJECT;
110 pcmd->object_type = type;
111
112 payload.in.buf = pobj;
113 payload.in.size = dmem_size;
114 payload.in.fb_size = fb_size;
115 payload.in.offset = NV_PMU_PMGR_SET_OBJECT_ALLOC_OFFSET;
116
117 /* Setup the handler params to communicate back results.*/
118 handlerparams.success = 0;
119
120 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
121 PMU_COMMAND_QUEUE_LPQ,
122 pmgr_pmucmdhandler,
123 (void *)&handlerparams,
124 &seqdesc, ~0);
125 if (status) {
126 nvgpu_err(g,
127 "unable to post pmgr cmd for unit %x cmd id %x obj type %x",
128 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->object_type);
129 goto exit;
130 }
131
132 pmu_wait_message_cond(&g->pmu,
133 gk20a_get_gr_idle_timeout(g),
134 &handlerparams.success, 1);
135
136 if (handlerparams.success == 0U) {
137 nvgpu_err(g, "could not process cmd");
138 status = -ETIMEDOUT;
139 goto exit;
140 }
141
142exit:
143 return status;
144}
145
146static u32 pmgr_send_i2c_device_topology_to_pmu(struct gk20a *g)
147{
148 struct nv_pmu_pmgr_i2c_device_desc_table i2c_desc_table;
149 u32 idx = g->ina3221_dcb_index;
150 u32 status = 0;
151
152 /* INA3221 I2C device info */
153 i2c_desc_table.dev_mask = (1UL << idx);
154
155 /* INA3221 */
156 i2c_desc_table.devices[idx].super.type = 0x4E;
157
158 i2c_desc_table.devices[idx].dcb_index = idx;
159 i2c_desc_table.devices[idx].i2c_address = g->ina3221_i2c_address;
160 i2c_desc_table.devices[idx].i2c_flags = 0xC2F;
161 i2c_desc_table.devices[idx].i2c_port = g->ina3221_i2c_port;
162
163 /* Pass the table down the PMU as an object */
164 status = pmgr_pmu_set_object(
165 g,
166 NV_PMU_PMGR_OBJECT_I2C_DEVICE_DESC_TABLE,
167 (u16)sizeof(struct nv_pmu_pmgr_i2c_device_desc_table),
168 PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED,
169 &i2c_desc_table);
170
171 if (status) {
172 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
173 status);
174 }
175
176 return status;
177}
178
179static int pmgr_send_pwr_device_topology_to_pmu(struct gk20a *g)
180{
181 struct nv_pmu_pmgr_pwr_device_desc_table *pwr_desc_table;
182 struct nv_pmu_pmgr_pwr_device_desc_table_header *ppwr_desc_header;
183 int status = 0;
184
185 /* Set the BA-device-independent HW information */
186 pwr_desc_table = nvgpu_kzalloc(g, sizeof(*pwr_desc_table));
187 if (!pwr_desc_table) {
188 return -ENOMEM;
189 }
190
191 ppwr_desc_header = &(pwr_desc_table->hdr.data);
192 ppwr_desc_header->ba_info.b_initialized_and_used = false;
193
194 /* populate the table */
195 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&ppwr_desc_header->super,
196 g->pmgr_pmu.pmgr_deviceobjs.super.super.objmask);
197
198 status = boardobjgrp_pmudatainit_legacy(g,
199 &g->pmgr_pmu.pmgr_deviceobjs.super.super,
200 (struct nv_pmu_boardobjgrp_super *)pwr_desc_table);
201
202 if (status) {
203 nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x",
204 status);
205 goto exit;
206 }
207
208 /* Pass the table down the PMU as an object */
209 status = pmgr_pmu_set_object(
210 g,
211 NV_PMU_PMGR_OBJECT_PWR_DEVICE_DESC_TABLE,
212 (u16)sizeof(
213 union nv_pmu_pmgr_pwr_device_dmem_size),
214 (u16)sizeof(struct nv_pmu_pmgr_pwr_device_desc_table),
215 pwr_desc_table);
216
217 if (status) {
218 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
219 status);
220 }
221
222exit:
223 nvgpu_kfree(g, pwr_desc_table);
224 return status;
225}
226
227static int pmgr_send_pwr_mointer_to_pmu(struct gk20a *g)
228{
229 struct nv_pmu_pmgr_pwr_monitor_pack *pwr_monitor_pack = NULL;
230 struct nv_pmu_pmgr_pwr_channel_header *pwr_channel_hdr;
231 struct nv_pmu_pmgr_pwr_chrelationship_header *pwr_chrelationship_header;
232 u32 max_dmem_size;
233 int status = 0;
234
235 pwr_monitor_pack = nvgpu_kzalloc(g, sizeof(*pwr_monitor_pack));
236 if (!pwr_monitor_pack) {
237 return -ENOMEM;
238 }
239
240 /* Copy all the global settings from the RM copy */
241 pwr_channel_hdr = &(pwr_monitor_pack->channels.hdr.data);
242 *pwr_monitor_pack = g->pmgr_pmu.pmgr_monitorobjs.pmu_data;
243
244 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_channel_hdr->super,
245 g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask);
246
247 /* Copy in each channel */
248 status = boardobjgrp_pmudatainit_legacy(g,
249 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super,
250 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack->channels));
251
252 if (status) {
253 nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x",
254 status);
255 goto exit;
256 }
257
258 /* Copy in each channel relationship */
259 pwr_chrelationship_header = &(pwr_monitor_pack->ch_rels.hdr.data);
260
261 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_chrelationship_header->super,
262 g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super.objmask);
263
264 pwr_channel_hdr->physical_channel_mask = g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask;
265 pwr_channel_hdr->type = NV_PMU_PMGR_PWR_MONITOR_TYPE_NO_POLLING;
266
267 status = boardobjgrp_pmudatainit_legacy(g,
268 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super,
269 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack->ch_rels));
270
271 if (status) {
272 nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x",
273 status);
274 goto exit;
275 }
276
277 /* Calculate the max Dmem buffer size */
278 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_monitor_dmem_size);
279
280 /* Pass the table down the PMU as an object */
281 status = pmgr_pmu_set_object(
282 g,
283 NV_PMU_PMGR_OBJECT_PWR_MONITOR,
284 (u16)max_dmem_size,
285 (u16)sizeof(struct nv_pmu_pmgr_pwr_monitor_pack),
286 pwr_monitor_pack);
287
288 if (status) {
289 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
290 status);
291 }
292
293exit:
294 nvgpu_kfree(g, pwr_monitor_pack);
295 return status;
296}
297
298static int pmgr_send_pwr_policy_to_pmu(struct gk20a *g)
299{
300 struct nv_pmu_pmgr_pwr_policy_pack *ppwrpack = NULL;
301 struct pwr_policy *ppolicy = NULL;
302 int status = 0;
303 u8 indx;
304 u32 max_dmem_size;
305
306 ppwrpack = nvgpu_kzalloc(g, sizeof(struct nv_pmu_pmgr_pwr_policy_pack));
307 if (!ppwrpack) {
308 nvgpu_err(g, "pwr policy alloc failed %x",
309 status);
310 status = -ENOMEM;
311 goto exit;
312 }
313
314 ppwrpack->policies.hdr.data.version = g->pmgr_pmu.pmgr_policyobjs.version;
315 ppwrpack->policies.hdr.data.b_enabled = g->pmgr_pmu.pmgr_policyobjs.b_enabled;
316
317 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
318 &ppwrpack->policies.hdr.data.super,
319 g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super.objmask);
320
321 memset(&ppwrpack->policies.hdr.data.reserved_pmu_policy_mask,
322 0,
323 sizeof(ppwrpack->policies.hdr.data.reserved_pmu_policy_mask));
324
325 ppwrpack->policies.hdr.data.base_sample_period =
326 g->pmgr_pmu.pmgr_policyobjs.base_sample_period;
327 ppwrpack->policies.hdr.data.min_client_sample_period =
328 g->pmgr_pmu.pmgr_policyobjs.min_client_sample_period;
329 ppwrpack->policies.hdr.data.low_sampling_mult =
330 g->pmgr_pmu.pmgr_policyobjs.low_sampling_mult;
331
332 memcpy(&ppwrpack->policies.hdr.data.global_ceiling,
333 &g->pmgr_pmu.pmgr_policyobjs.global_ceiling,
334 sizeof(struct nv_pmu_perf_domain_group_limits));
335
336 memcpy(&ppwrpack->policies.hdr.data.semantic_policy_tbl,
337 &g->pmgr_pmu.pmgr_policyobjs.policy_idxs,
338 sizeof(g->pmgr_pmu.pmgr_policyobjs.policy_idxs));
339
340 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx,
341 ppwrpack->policies.hdr.data.super.obj_mask.super.data[0]) {
342 ppolicy = PMGR_GET_PWR_POLICY(g, indx);
343
344 status = ((struct boardobj *)ppolicy)->pmudatainit(g, (struct boardobj *)ppolicy,
345 (struct nv_pmu_boardobj *)&(ppwrpack->policies.policies[indx].data));
346 if (status) {
347 nvgpu_err(g, "pmudatainit failed %x indx %x",
348 status, indx);
349 status = -ENOMEM;
350 goto exit;
351 }
352 }
353 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END;
354
355 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
356 &ppwrpack->policy_rels.hdr.data.super,
357 g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super.objmask);
358
359 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
360 &ppwrpack->violations.hdr.data.super,
361 g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super.objmask);
362
363 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_policy_dmem_size);
364
365 /* Pass the table down the PMU as an object */
366 status = pmgr_pmu_set_object(
367 g,
368 NV_PMU_PMGR_OBJECT_PWR_POLICY,
369 (u16)max_dmem_size,
370 (u16)sizeof(struct nv_pmu_pmgr_pwr_policy_pack),
371 ppwrpack);
372
373 if (status) {
374 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
375 status);
376 }
377
378exit:
379 if (ppwrpack) {
380 nvgpu_kfree(g, ppwrpack);
381 }
382
383 return status;
384}
385
386u32 pmgr_pmu_pwr_devices_query_blocking(
387 struct gk20a *g,
388 u32 pwr_dev_mask,
389 struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload)
390{
391 struct pmu_cmd cmd;
392 struct pmu_payload payload;
393 struct nv_pmu_pmgr_cmd_pwr_devices_query *pcmd;
394 u32 status;
395 u32 seqdesc;
396 struct pmgr_pmucmdhandler_params handlerparams;
397
398 memset(&payload, 0, sizeof(struct pmu_payload));
399 memset(&cmd, 0, sizeof(struct pmu_cmd));
400 memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params));
401
402 cmd.hdr.unit_id = PMU_UNIT_PMGR;
403 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_pwr_devices_query) +
404 (u32)sizeof(struct pmu_hdr);
405
406 pcmd = &cmd.cmd.pmgr.pwr_dev_query;
407 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_PWR_DEVICES_QUERY;
408 pcmd->dev_mask = pwr_dev_mask;
409
410 payload.out.buf = ppayload;
411 payload.out.size = sizeof(struct nv_pmu_pmgr_pwr_devices_query_payload);
412 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
413 payload.out.offset = NV_PMU_PMGR_PWR_DEVICES_QUERY_ALLOC_OFFSET;
414
415 /* Setup the handler params to communicate back results.*/
416 handlerparams.success = 0;
417
418 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
419 PMU_COMMAND_QUEUE_LPQ,
420 pmgr_pmucmdhandler,
421 (void *)&handlerparams,
422 &seqdesc, ~0);
423 if (status) {
424 nvgpu_err(g,
425 "unable to post pmgr query cmd for unit %x cmd id %x dev mask %x",
426 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->dev_mask);
427 goto exit;
428 }
429
430 pmu_wait_message_cond(&g->pmu,
431 gk20a_get_gr_idle_timeout(g),
432 &handlerparams.success, 1);
433
434 if (handlerparams.success == 0U) {
435 nvgpu_err(g, "could not process cmd");
436 status = -ETIMEDOUT;
437 goto exit;
438 }
439
440exit:
441 return status;
442}
443
444static u32 pmgr_pmu_load_blocking(struct gk20a *g)
445{
446 struct pmu_cmd cmd = { {0} };
447 struct nv_pmu_pmgr_cmd_load *pcmd;
448 u32 status;
449 u32 seqdesc;
450 struct pmgr_pmucmdhandler_params handlerparams = {0};
451
452 cmd.hdr.unit_id = PMU_UNIT_PMGR;
453 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_load) +
454 (u32)sizeof(struct pmu_hdr);
455
456 pcmd = &cmd.cmd.pmgr.load;
457 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_LOAD;
458
459 /* Setup the handler params to communicate back results.*/
460 handlerparams.success = 0;
461
462 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
463 PMU_COMMAND_QUEUE_LPQ,
464 pmgr_pmucmdhandler,
465 (void *)&handlerparams,
466 &seqdesc, ~0);
467 if (status) {
468 nvgpu_err(g,
469 "unable to post pmgr load cmd for unit %x cmd id %x",
470 cmd.hdr.unit_id, pcmd->cmd_type);
471 goto exit;
472 }
473
474 pmu_wait_message_cond(&g->pmu,
475 gk20a_get_gr_idle_timeout(g),
476 &handlerparams.success, 1);
477
478 if (handlerparams.success == 0U) {
479 nvgpu_err(g, "could not process cmd");
480 status = -ETIMEDOUT;
481 goto exit;
482 }
483
484exit:
485 return status;
486}
487
488int pmgr_send_pmgr_tables_to_pmu(struct gk20a *g)
489{
490 int status = 0;
491
492 status = pmgr_send_i2c_device_topology_to_pmu(g);
493
494 if (status) {
495 nvgpu_err(g,
496 "pmgr_send_i2c_device_topology_to_pmu failed %x",
497 status);
498 goto exit;
499 }
500
501 if (!BOARDOBJGRP_IS_EMPTY(&g->pmgr_pmu.pmgr_deviceobjs.super.super)) {
502 status = pmgr_send_pwr_device_topology_to_pmu(g);
503 if (status) {
504 nvgpu_err(g,
505 "pmgr_send_pwr_device_topology_to_pmu failed %x",
506 status);
507 goto exit;
508 }
509 }
510
511 if (!(BOARDOBJGRP_IS_EMPTY(
512 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super)) ||
513 !(BOARDOBJGRP_IS_EMPTY(
514 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super))) {
515 status = pmgr_send_pwr_mointer_to_pmu(g);
516 if (status) {
517 nvgpu_err(g,
518 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
519 goto exit;
520 }
521 }
522
523 if (!(BOARDOBJGRP_IS_EMPTY(
524 &g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super)) ||
525 !(BOARDOBJGRP_IS_EMPTY(
526 &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super)) ||
527 !(BOARDOBJGRP_IS_EMPTY(
528 &g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super))) {
529 status = pmgr_send_pwr_policy_to_pmu(g);
530 if (status) {
531 nvgpu_err(g,
532 "pmgr_send_pwr_policy_to_pmu failed %x", status);
533 goto exit;
534 }
535 }
536
537 status = pmgr_pmu_load_blocking(g);
538 if (status) {
539 nvgpu_err(g,
540 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
541 goto exit;
542 }
543
544exit:
545 return status;
546}