summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/pmgr/pmgrpmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/pmgr/pmgrpmu.c')
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgrpmu.c533
1 files changed, 533 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/pmgr/pmgrpmu.c b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c
new file mode 100644
index 00000000..6913c280
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c
@@ -0,0 +1,533 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/kmem.h>
24#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
25#include <nvgpu/pmu.h>
26
27#include "gk20a/gk20a.h"
28#include "gp106/bios_gp106.h"
29#include "common/linux/os_linux.h"
30#include "common/linux/platform_gk20a.h"
31
32#include "boardobj/boardobjgrp.h"
33#include "boardobj/boardobjgrp_e32.h"
34
35#include "pwrdev.h"
36#include "pmgrpmu.h"
37
38struct pmgr_pmucmdhandler_params {
39 u32 success;
40};
41
42static void pmgr_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
43 void *param, u32 handle, u32 status)
44{
45 struct pmgr_pmucmdhandler_params *phandlerparams =
46 (struct pmgr_pmucmdhandler_params *)param;
47
48 if ((msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_SET_OBJECT) &&
49 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_QUERY) &&
50 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_LOAD)) {
51 nvgpu_err(g, "unknow msg %x", msg->msg.pmgr.msg_type);
52 return;
53 }
54
55 if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_SET_OBJECT) {
56 if ((msg->msg.pmgr.set_object.b_success != 1) ||
57 (msg->msg.pmgr.set_object.flcnstatus != 0) ) {
58 nvgpu_err(g, "pmgr msg failed %x %x %x %x",
59 msg->msg.pmgr.set_object.msg_type,
60 msg->msg.pmgr.set_object.b_success,
61 msg->msg.pmgr.set_object.flcnstatus,
62 msg->msg.pmgr.set_object.object_type);
63 return;
64 }
65 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_QUERY) {
66 if ((msg->msg.pmgr.query.b_success != 1) ||
67 (msg->msg.pmgr.query.flcnstatus != 0) ) {
68 nvgpu_err(g, "pmgr msg failed %x %x %x %x",
69 msg->msg.pmgr.query.msg_type,
70 msg->msg.pmgr.query.b_success,
71 msg->msg.pmgr.query.flcnstatus,
72 msg->msg.pmgr.query.cmd_type);
73 return;
74 }
75 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_LOAD) {
76 if ((msg->msg.pmgr.query.b_success != 1) ||
77 (msg->msg.pmgr.query.flcnstatus != 0) ) {
78 nvgpu_err(g, "pmgr msg failed %x %x %x",
79 msg->msg.pmgr.load.msg_type,
80 msg->msg.pmgr.load.b_success,
81 msg->msg.pmgr.load.flcnstatus);
82 return;
83 }
84 }
85
86 phandlerparams->success = 1;
87}
88
89static u32 pmgr_pmu_set_object(struct gk20a *g,
90 u8 type,
91 u16 dmem_size,
92 u16 fb_size,
93 void *pobj)
94{
95 struct pmu_cmd cmd;
96 struct pmu_payload payload;
97 struct nv_pmu_pmgr_cmd_set_object *pcmd;
98 u32 status;
99 u32 seqdesc;
100 struct pmgr_pmucmdhandler_params handlerparams;
101
102 memset(&payload, 0, sizeof(struct pmu_payload));
103 memset(&cmd, 0, sizeof(struct pmu_cmd));
104 memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params));
105
106 cmd.hdr.unit_id = PMU_UNIT_PMGR;
107 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_set_object) +
108 (u32)sizeof(struct pmu_hdr);;
109
110 pcmd = &cmd.cmd.pmgr.set_object;
111 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_SET_OBJECT;
112 pcmd->object_type = type;
113
114 payload.in.buf = pobj;
115 payload.in.size = dmem_size;
116 payload.in.fb_size = fb_size;
117 payload.in.offset = NV_PMU_PMGR_SET_OBJECT_ALLOC_OFFSET;
118
119 /* Setup the handler params to communicate back results.*/
120 handlerparams.success = 0;
121
122 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
123 PMU_COMMAND_QUEUE_LPQ,
124 pmgr_pmucmdhandler,
125 (void *)&handlerparams,
126 &seqdesc, ~0);
127 if (status) {
128 nvgpu_err(g,
129 "unable to post pmgr cmd for unit %x cmd id %x obj type %x",
130 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->object_type);
131 goto exit;
132 }
133
134 pmu_wait_message_cond(&g->pmu,
135 gk20a_get_gr_idle_timeout(g),
136 &handlerparams.success, 1);
137
138 if (handlerparams.success == 0) {
139 nvgpu_err(g, "could not process cmd");
140 status = -ETIMEDOUT;
141 goto exit;
142 }
143
144exit:
145 return status;
146}
147
148static u32 pmgr_send_i2c_device_topology_to_pmu(struct gk20a *g)
149{
150 struct nv_pmu_pmgr_i2c_device_desc_table i2c_desc_table;
151 struct gk20a_platform *platform = gk20a_get_platform(dev_from_gk20a(g));
152 u32 idx = platform->ina3221_dcb_index;
153 u32 status = 0;
154
155 /* INA3221 I2C device info */
156 i2c_desc_table.dev_mask = (1UL << idx);
157
158 /* INA3221 */
159 i2c_desc_table.devices[idx].super.type = 0x4E;
160
161 i2c_desc_table.devices[idx].dcb_index = idx;
162 i2c_desc_table.devices[idx].i2c_address = platform->ina3221_i2c_address;
163 i2c_desc_table.devices[idx].i2c_flags = 0xC2F;
164 i2c_desc_table.devices[idx].i2c_port = platform->ina3221_i2c_port;
165
166 /* Pass the table down the PMU as an object */
167 status = pmgr_pmu_set_object(
168 g,
169 NV_PMU_PMGR_OBJECT_I2C_DEVICE_DESC_TABLE,
170 (u16)sizeof(struct nv_pmu_pmgr_i2c_device_desc_table),
171 PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED,
172 &i2c_desc_table);
173
174 if (status)
175 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
176 status);
177
178 return status;
179}
180
181static u32 pmgr_send_pwr_device_topology_to_pmu(struct gk20a *g)
182{
183 struct nv_pmu_pmgr_pwr_device_desc_table pwr_desc_table;
184 struct nv_pmu_pmgr_pwr_device_desc_table_header *ppwr_desc_header;
185 u32 status = 0;
186
187 /* Set the BA-device-independent HW information */
188 ppwr_desc_header = &(pwr_desc_table.hdr.data);
189 ppwr_desc_header->ba_info.b_initialized_and_used = false;
190
191 /* populate the table */
192 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&ppwr_desc_header->super,
193 g->pmgr_pmu.pmgr_deviceobjs.super.super.objmask);
194
195 status = boardobjgrp_pmudatainit_legacy(g,
196 &g->pmgr_pmu.pmgr_deviceobjs.super.super,
197 (struct nv_pmu_boardobjgrp_super *)&pwr_desc_table);
198
199 if (status) {
200 nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x",
201 status);
202 goto exit;
203 }
204
205 /* Pass the table down the PMU as an object */
206 status = pmgr_pmu_set_object(
207 g,
208 NV_PMU_PMGR_OBJECT_PWR_DEVICE_DESC_TABLE,
209 (u16)sizeof(
210 union nv_pmu_pmgr_pwr_device_dmem_size),
211 (u16)sizeof(struct nv_pmu_pmgr_pwr_device_desc_table),
212 &pwr_desc_table);
213
214 if (status)
215 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
216 status);
217
218exit:
219 return status;
220}
221
222static u32 pmgr_send_pwr_mointer_to_pmu(struct gk20a *g)
223{
224 struct nv_pmu_pmgr_pwr_monitor_pack pwr_monitor_pack;
225 struct nv_pmu_pmgr_pwr_channel_header *pwr_channel_hdr;
226 struct nv_pmu_pmgr_pwr_chrelationship_header *pwr_chrelationship_header;
227 u32 max_dmem_size;
228 u32 status = 0;
229
230 /* Copy all the global settings from the RM copy */
231 pwr_channel_hdr = &(pwr_monitor_pack.channels.hdr.data);
232 pwr_monitor_pack = g->pmgr_pmu.pmgr_monitorobjs.pmu_data;
233
234 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_channel_hdr->super,
235 g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask);
236
237 /* Copy in each channel */
238 status = boardobjgrp_pmudatainit_legacy(g,
239 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super,
240 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack.channels));
241
242 if (status) {
243 nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x",
244 status);
245 goto exit;
246 }
247
248 /* Copy in each channel relationship */
249 pwr_chrelationship_header = &(pwr_monitor_pack.ch_rels.hdr.data);
250
251 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_chrelationship_header->super,
252 g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super.objmask);
253
254 pwr_channel_hdr->physical_channel_mask = g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask;
255 pwr_channel_hdr->type = NV_PMU_PMGR_PWR_MONITOR_TYPE_NO_POLLING;
256
257 status = boardobjgrp_pmudatainit_legacy(g,
258 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super,
259 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack.ch_rels));
260
261 if (status) {
262 nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x",
263 status);
264 goto exit;
265 }
266
267 /* Calculate the max Dmem buffer size */
268 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_monitor_dmem_size);
269
270 /* Pass the table down the PMU as an object */
271 status = pmgr_pmu_set_object(
272 g,
273 NV_PMU_PMGR_OBJECT_PWR_MONITOR,
274 (u16)max_dmem_size,
275 (u16)sizeof(struct nv_pmu_pmgr_pwr_monitor_pack),
276 &pwr_monitor_pack);
277
278 if (status)
279 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
280 status);
281
282exit:
283 return status;
284}
285
286static u32 pmgr_send_pwr_policy_to_pmu(struct gk20a *g)
287{
288 struct nv_pmu_pmgr_pwr_policy_pack *ppwrpack = NULL;
289 struct pwr_policy *ppolicy = NULL;
290 u32 status = 0;
291 u8 indx;
292 u32 max_dmem_size;
293
294 ppwrpack = nvgpu_kzalloc(g, sizeof(struct nv_pmu_pmgr_pwr_policy_pack));
295 if (!ppwrpack) {
296 nvgpu_err(g, "pwr policy alloc failed %x",
297 status);
298 status = -ENOMEM;
299 goto exit;
300 }
301
302 ppwrpack->policies.hdr.data.version = g->pmgr_pmu.pmgr_policyobjs.version;
303 ppwrpack->policies.hdr.data.b_enabled = g->pmgr_pmu.pmgr_policyobjs.b_enabled;
304
305 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
306 &ppwrpack->policies.hdr.data.super,
307 g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super.objmask);
308
309 memset(&ppwrpack->policies.hdr.data.reserved_pmu_policy_mask,
310 0,
311 sizeof(ppwrpack->policies.hdr.data.reserved_pmu_policy_mask));
312
313 ppwrpack->policies.hdr.data.base_sample_period =
314 g->pmgr_pmu.pmgr_policyobjs.base_sample_period;
315 ppwrpack->policies.hdr.data.min_client_sample_period =
316 g->pmgr_pmu.pmgr_policyobjs.min_client_sample_period;
317 ppwrpack->policies.hdr.data.low_sampling_mult =
318 g->pmgr_pmu.pmgr_policyobjs.low_sampling_mult;
319
320 memcpy(&ppwrpack->policies.hdr.data.global_ceiling,
321 &g->pmgr_pmu.pmgr_policyobjs.global_ceiling,
322 sizeof(struct nv_pmu_perf_domain_group_limits));
323
324 memcpy(&ppwrpack->policies.hdr.data.semantic_policy_tbl,
325 &g->pmgr_pmu.pmgr_policyobjs.policy_idxs,
326 sizeof(g->pmgr_pmu.pmgr_policyobjs.policy_idxs));
327
328 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx,
329 ppwrpack->policies.hdr.data.super.obj_mask.super.data[0]) {
330 ppolicy = PMGR_GET_PWR_POLICY(g, indx);
331
332 status = ((struct boardobj *)ppolicy)->pmudatainit(g, (struct boardobj *)ppolicy,
333 (struct nv_pmu_boardobj *)&(ppwrpack->policies.policies[indx].data));
334 if (status) {
335 nvgpu_err(g, "pmudatainit failed %x indx %x",
336 status, indx);
337 status = -ENOMEM;
338 goto exit;
339 }
340 }
341 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END;
342
343 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
344 &ppwrpack->policy_rels.hdr.data.super,
345 g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super.objmask);
346
347 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
348 &ppwrpack->violations.hdr.data.super,
349 g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super.objmask);
350
351 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_policy_dmem_size);
352
353 /* Pass the table down the PMU as an object */
354 status = pmgr_pmu_set_object(
355 g,
356 NV_PMU_PMGR_OBJECT_PWR_POLICY,
357 (u16)max_dmem_size,
358 (u16)sizeof(struct nv_pmu_pmgr_pwr_policy_pack),
359 ppwrpack);
360
361 if (status)
362 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
363 status);
364
365exit:
366 if (ppwrpack) {
367 nvgpu_kfree(g, ppwrpack);
368 }
369
370 return status;
371}
372
373u32 pmgr_pmu_pwr_devices_query_blocking(
374 struct gk20a *g,
375 u32 pwr_dev_mask,
376 struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload)
377{
378 struct pmu_cmd cmd;
379 struct pmu_payload payload;
380 struct nv_pmu_pmgr_cmd_pwr_devices_query *pcmd;
381 u32 status;
382 u32 seqdesc;
383 struct pmgr_pmucmdhandler_params handlerparams;
384
385 memset(&payload, 0, sizeof(struct pmu_payload));
386 memset(&cmd, 0, sizeof(struct pmu_cmd));
387 memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params));
388
389 cmd.hdr.unit_id = PMU_UNIT_PMGR;
390 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_pwr_devices_query) +
391 (u32)sizeof(struct pmu_hdr);
392
393 pcmd = &cmd.cmd.pmgr.pwr_dev_query;
394 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_PWR_DEVICES_QUERY;
395 pcmd->dev_mask = pwr_dev_mask;
396
397 payload.out.buf = ppayload;
398 payload.out.size = sizeof(struct nv_pmu_pmgr_pwr_devices_query_payload);
399 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
400 payload.out.offset = NV_PMU_PMGR_PWR_DEVICES_QUERY_ALLOC_OFFSET;
401
402 /* Setup the handler params to communicate back results.*/
403 handlerparams.success = 0;
404
405 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
406 PMU_COMMAND_QUEUE_LPQ,
407 pmgr_pmucmdhandler,
408 (void *)&handlerparams,
409 &seqdesc, ~0);
410 if (status) {
411 nvgpu_err(g,
412 "unable to post pmgr query cmd for unit %x cmd id %x dev mask %x",
413 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->dev_mask);
414 goto exit;
415 }
416
417 pmu_wait_message_cond(&g->pmu,
418 gk20a_get_gr_idle_timeout(g),
419 &handlerparams.success, 1);
420
421 if (handlerparams.success == 0) {
422 nvgpu_err(g, "could not process cmd");
423 status = -ETIMEDOUT;
424 goto exit;
425 }
426
427exit:
428 return status;
429}
430
431static u32 pmgr_pmu_load_blocking(struct gk20a *g)
432{
433 struct pmu_cmd cmd = { {0} };
434 struct nv_pmu_pmgr_cmd_load *pcmd;
435 u32 status;
436 u32 seqdesc;
437 struct pmgr_pmucmdhandler_params handlerparams = {0};
438
439 cmd.hdr.unit_id = PMU_UNIT_PMGR;
440 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_load) +
441 (u32)sizeof(struct pmu_hdr);
442
443 pcmd = &cmd.cmd.pmgr.load;
444 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_LOAD;
445
446 /* Setup the handler params to communicate back results.*/
447 handlerparams.success = 0;
448
449 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
450 PMU_COMMAND_QUEUE_LPQ,
451 pmgr_pmucmdhandler,
452 (void *)&handlerparams,
453 &seqdesc, ~0);
454 if (status) {
455 nvgpu_err(g,
456 "unable to post pmgr load cmd for unit %x cmd id %x",
457 cmd.hdr.unit_id, pcmd->cmd_type);
458 goto exit;
459 }
460
461 pmu_wait_message_cond(&g->pmu,
462 gk20a_get_gr_idle_timeout(g),
463 &handlerparams.success, 1);
464
465 if (handlerparams.success == 0) {
466 nvgpu_err(g, "could not process cmd");
467 status = -ETIMEDOUT;
468 goto exit;
469 }
470
471exit:
472 return status;
473}
474
475u32 pmgr_send_pmgr_tables_to_pmu(struct gk20a *g)
476{
477 u32 status = 0;
478
479 status = pmgr_send_i2c_device_topology_to_pmu(g);
480
481 if (status) {
482 nvgpu_err(g,
483 "pmgr_send_i2c_device_topology_to_pmu failed %x",
484 status);
485 goto exit;
486 }
487
488 if (!BOARDOBJGRP_IS_EMPTY(&g->pmgr_pmu.pmgr_deviceobjs.super.super)) {
489 status = pmgr_send_pwr_device_topology_to_pmu(g);
490 if (status) {
491 nvgpu_err(g,
492 "pmgr_send_pwr_device_topology_to_pmu failed %x",
493 status);
494 goto exit;
495 }
496 }
497
498 if (!(BOARDOBJGRP_IS_EMPTY(
499 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super)) ||
500 !(BOARDOBJGRP_IS_EMPTY(
501 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super))) {
502 status = pmgr_send_pwr_mointer_to_pmu(g);
503 if (status) {
504 nvgpu_err(g,
505 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
506 goto exit;
507 }
508 }
509
510 if (!(BOARDOBJGRP_IS_EMPTY(
511 &g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super)) ||
512 !(BOARDOBJGRP_IS_EMPTY(
513 &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super)) ||
514 !(BOARDOBJGRP_IS_EMPTY(
515 &g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super))) {
516 status = pmgr_send_pwr_policy_to_pmu(g);
517 if (status) {
518 nvgpu_err(g,
519 "pmgr_send_pwr_policy_to_pmu failed %x", status);
520 goto exit;
521 }
522 }
523
524 status = pmgr_pmu_load_blocking(g);
525 if (status) {
526 nvgpu_err(g,
527 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
528 goto exit;
529 }
530
531exit:
532 return status;
533}