From 01e6fac4d61fdd7fff5433942ec93fc2ea1e4df1 Mon Sep 17 00:00:00 2001 From: Joshua Bakita Date: Wed, 28 Jun 2023 18:24:25 -0400 Subject: Include nvgpu headers These are needed to build on NVIDIA's Jetson boards for the time being. Only a couple structs are required, so it should be fairly easy to remove this dependency at some point in the future. --- include/pmgr/pmgr.c | 111 +++++++ include/pmgr/pmgr.h | 43 +++ include/pmgr/pmgrpmu.c | 546 ++++++++++++++++++++++++++++++++ include/pmgr/pmgrpmu.h | 39 +++ include/pmgr/pwrdev.c | 319 +++++++++++++++++++ include/pmgr/pwrdev.h | 60 ++++ include/pmgr/pwrmonitor.c | 376 ++++++++++++++++++++++ include/pmgr/pwrmonitor.h | 69 ++++ include/pmgr/pwrpolicy.c | 782 ++++++++++++++++++++++++++++++++++++++++++++++ include/pmgr/pwrpolicy.h | 136 ++++++++ 10 files changed, 2481 insertions(+) create mode 100644 include/pmgr/pmgr.c create mode 100644 include/pmgr/pmgr.h create mode 100644 include/pmgr/pmgrpmu.c create mode 100644 include/pmgr/pmgrpmu.h create mode 100644 include/pmgr/pwrdev.c create mode 100644 include/pmgr/pwrdev.h create mode 100644 include/pmgr/pwrmonitor.c create mode 100644 include/pmgr/pwrmonitor.h create mode 100644 include/pmgr/pwrpolicy.c create mode 100644 include/pmgr/pwrpolicy.h (limited to 'include/pmgr') diff --git a/include/pmgr/pmgr.c b/include/pmgr/pmgr.c new file mode 100644 index 0000000..f5be01b --- /dev/null +++ b/include/pmgr/pmgr.c @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "pwrdev.h" +#include "pmgrpmu.h" + +int pmgr_pwr_devices_get_power(struct gk20a *g, u32 *val) +{ + struct nv_pmu_pmgr_pwr_devices_query_payload payload; + int status; + + status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload); + if (status) { + nvgpu_err(g, "pmgr_pwr_devices_get_current_power failed %x", + status); + } + + *val = payload.devices[0].powerm_w; + + return status; +} + +int pmgr_pwr_devices_get_current(struct gk20a *g, u32 *val) +{ + struct nv_pmu_pmgr_pwr_devices_query_payload payload; + int status; + + status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload); + if (status) { + nvgpu_err(g, "pmgr_pwr_devices_get_current failed %x", + status); + } + + *val = payload.devices[0].currentm_a; + + return status; +} + +int pmgr_pwr_devices_get_voltage(struct gk20a *g, u32 *val) +{ + struct nv_pmu_pmgr_pwr_devices_query_payload payload; + int status; + + status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload); + if (status) { + nvgpu_err(g, "pmgr_pwr_devices_get_current_voltage failed %x", + status); + } + + *val = payload.devices[0].voltageu_v; + + return status; +} + +u32 pmgr_domain_sw_setup(struct gk20a *g) +{ + u32 status; + + status = pmgr_device_sw_setup(g); + if (status) { + nvgpu_err(g, + "error creating boardobjgrp for pmgr devices, status - 0x%x", + status); + goto exit; + } + + status = pmgr_monitor_sw_setup(g); + if (status) { + nvgpu_err(g, + "error creating boardobjgrp for pmgr monitor, status - 0x%x", + status); + goto exit; + } + + status = pmgr_policy_sw_setup(g); + if (status) { + nvgpu_err(g, + "error creating boardobjgrp for pmgr policy, status - 0x%x", + status); + goto exit; + } + +exit: + return status; +} + +int pmgr_domain_pmu_setup(struct gk20a *g) +{ + return pmgr_send_pmgr_tables_to_pmu(g); +} diff --git a/include/pmgr/pmgr.h b/include/pmgr/pmgr.h new file mode 100644 index 0000000..9b142de --- /dev/null +++ b/include/pmgr/pmgr.h @@ -0,0 +1,43 @@ +/* + * general power device structures & definitions + * + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NVGPU_PMGR_H +#define NVGPU_PMGR_H + +#include "pwrdev.h" +#include "pwrmonitor.h" +#include "pwrpolicy.h" + +struct pmgr_pmupstate { + struct pwr_devices pmgr_deviceobjs; + struct pmgr_pwr_monitor pmgr_monitorobjs; + struct pmgr_pwr_policy pmgr_policyobjs; +}; + +u32 pmgr_domain_sw_setup(struct gk20a *g); +int pmgr_domain_pmu_setup(struct gk20a *g); +int pmgr_pwr_devices_get_current(struct gk20a *g, u32 *val); +int pmgr_pwr_devices_get_voltage(struct gk20a *g, u32 *val); +int pmgr_pwr_devices_get_power(struct gk20a *g, u32 *val); + +#endif /* NVGPU_PMGR_H */ diff --git a/include/pmgr/pmgrpmu.c b/include/pmgr/pmgrpmu.c new file mode 100644 index 0000000..b6947f2 --- /dev/null +++ b/include/pmgr/pmgrpmu.c @@ -0,0 +1,546 @@ +/* + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "gp106/bios_gp106.h" + +#include "boardobj/boardobjgrp.h" +#include "boardobj/boardobjgrp_e32.h" + +#include "pwrdev.h" +#include "pmgrpmu.h" + +struct pmgr_pmucmdhandler_params { + u32 success; +}; + +static void pmgr_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) +{ + struct pmgr_pmucmdhandler_params *phandlerparams = + (struct pmgr_pmucmdhandler_params *)param; + + if ((msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_SET_OBJECT) && + (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_QUERY) && + (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_LOAD)) { + nvgpu_err(g, "unknow msg %x", msg->msg.pmgr.msg_type); + return; + } + + if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_SET_OBJECT) { + if ((msg->msg.pmgr.set_object.b_success != 1) || + (msg->msg.pmgr.set_object.flcnstatus != 0U)) { + nvgpu_err(g, "pmgr msg failed %x %x %x %x", + msg->msg.pmgr.set_object.msg_type, + msg->msg.pmgr.set_object.b_success, + msg->msg.pmgr.set_object.flcnstatus, + msg->msg.pmgr.set_object.object_type); + return; + } + } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_QUERY) { + if ((msg->msg.pmgr.query.b_success != 1) || + (msg->msg.pmgr.query.flcnstatus != 0U)) { + nvgpu_err(g, "pmgr msg failed %x %x %x %x", + msg->msg.pmgr.query.msg_type, + msg->msg.pmgr.query.b_success, + msg->msg.pmgr.query.flcnstatus, + msg->msg.pmgr.query.cmd_type); + return; + } + } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_LOAD) { + if ((msg->msg.pmgr.query.b_success != 1) || + (msg->msg.pmgr.query.flcnstatus != 0U)) { + nvgpu_err(g, "pmgr msg failed %x %x %x", + msg->msg.pmgr.load.msg_type, + msg->msg.pmgr.load.b_success, + msg->msg.pmgr.load.flcnstatus); + return; + } + } + + phandlerparams->success = 1; +} + +static u32 pmgr_pmu_set_object(struct gk20a *g, + u8 type, + u16 dmem_size, + u16 fb_size, + void *pobj) +{ + struct pmu_cmd cmd; + struct pmu_payload payload; + struct nv_pmu_pmgr_cmd_set_object *pcmd; + u32 status; + u32 seqdesc; + struct pmgr_pmucmdhandler_params handlerparams; + + memset(&payload, 0, sizeof(struct pmu_payload)); + memset(&cmd, 0, sizeof(struct pmu_cmd)); + memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params)); + + cmd.hdr.unit_id = PMU_UNIT_PMGR; + cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_set_object) + + (u32)sizeof(struct pmu_hdr);; + + pcmd = &cmd.cmd.pmgr.set_object; + pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_SET_OBJECT; + pcmd->object_type = type; + + payload.in.buf = pobj; + payload.in.size = dmem_size; + payload.in.fb_size = fb_size; + payload.in.offset = NV_PMU_PMGR_SET_OBJECT_ALLOC_OFFSET; + + /* Setup the handler params to communicate back results.*/ + handlerparams.success = 0; + + status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, + PMU_COMMAND_QUEUE_LPQ, + pmgr_pmucmdhandler, + (void *)&handlerparams, + &seqdesc, ~0); + if (status) { + nvgpu_err(g, + "unable to post pmgr cmd for unit %x cmd id %x obj type %x", + cmd.hdr.unit_id, pcmd->cmd_type, pcmd->object_type); + goto exit; + } + + pmu_wait_message_cond(&g->pmu, + gk20a_get_gr_idle_timeout(g), + &handlerparams.success, 1); + + if (handlerparams.success == 0U) { + nvgpu_err(g, "could not process cmd"); + status = -ETIMEDOUT; + goto exit; + } + +exit: + return status; +} + +static u32 pmgr_send_i2c_device_topology_to_pmu(struct gk20a *g) +{ + struct nv_pmu_pmgr_i2c_device_desc_table i2c_desc_table; + u32 idx = g->ina3221_dcb_index; + u32 status = 0; + + /* INA3221 I2C device info */ + i2c_desc_table.dev_mask = (1UL << idx); + + /* INA3221 */ + i2c_desc_table.devices[idx].super.type = 0x4E; + + i2c_desc_table.devices[idx].dcb_index = idx; + i2c_desc_table.devices[idx].i2c_address = g->ina3221_i2c_address; + i2c_desc_table.devices[idx].i2c_flags = 0xC2F; + i2c_desc_table.devices[idx].i2c_port = g->ina3221_i2c_port; + + /* Pass the table down the PMU as an object */ + status = pmgr_pmu_set_object( + g, + NV_PMU_PMGR_OBJECT_I2C_DEVICE_DESC_TABLE, + (u16)sizeof(struct nv_pmu_pmgr_i2c_device_desc_table), + PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED, + &i2c_desc_table); + + if (status) { + nvgpu_err(g, "pmgr_pmu_set_object failed %x", + status); + } + + return status; +} + +static int pmgr_send_pwr_device_topology_to_pmu(struct gk20a *g) +{ + struct nv_pmu_pmgr_pwr_device_desc_table *pwr_desc_table; + struct nv_pmu_pmgr_pwr_device_desc_table_header *ppwr_desc_header; + int status = 0; + + /* Set the BA-device-independent HW information */ + pwr_desc_table = nvgpu_kzalloc(g, sizeof(*pwr_desc_table)); + if (!pwr_desc_table) { + return -ENOMEM; + } + + ppwr_desc_header = &(pwr_desc_table->hdr.data); + ppwr_desc_header->ba_info.b_initialized_and_used = false; + + /* populate the table */ + boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&ppwr_desc_header->super, + g->pmgr_pmu.pmgr_deviceobjs.super.super.objmask); + + status = boardobjgrp_pmudatainit_legacy(g, + &g->pmgr_pmu.pmgr_deviceobjs.super.super, + (struct nv_pmu_boardobjgrp_super *)pwr_desc_table); + + if (status) { + nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x", + status); + goto exit; + } + + /* Pass the table down the PMU as an object */ + status = pmgr_pmu_set_object( + g, + NV_PMU_PMGR_OBJECT_PWR_DEVICE_DESC_TABLE, + (u16)sizeof( + union nv_pmu_pmgr_pwr_device_dmem_size), + (u16)sizeof(struct nv_pmu_pmgr_pwr_device_desc_table), + pwr_desc_table); + + if (status) { + nvgpu_err(g, "pmgr_pmu_set_object failed %x", + status); + } + +exit: + nvgpu_kfree(g, pwr_desc_table); + return status; +} + +static int pmgr_send_pwr_mointer_to_pmu(struct gk20a *g) +{ + struct nv_pmu_pmgr_pwr_monitor_pack *pwr_monitor_pack = NULL; + struct nv_pmu_pmgr_pwr_channel_header *pwr_channel_hdr; + struct nv_pmu_pmgr_pwr_chrelationship_header *pwr_chrelationship_header; + u32 max_dmem_size; + int status = 0; + + pwr_monitor_pack = nvgpu_kzalloc(g, sizeof(*pwr_monitor_pack)); + if (!pwr_monitor_pack) { + return -ENOMEM; + } + + /* Copy all the global settings from the RM copy */ + pwr_channel_hdr = &(pwr_monitor_pack->channels.hdr.data); + *pwr_monitor_pack = g->pmgr_pmu.pmgr_monitorobjs.pmu_data; + + boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_channel_hdr->super, + g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask); + + /* Copy in each channel */ + status = boardobjgrp_pmudatainit_legacy(g, + &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super, + (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack->channels)); + + if (status) { + nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x", + status); + goto exit; + } + + /* Copy in each channel relationship */ + pwr_chrelationship_header = &(pwr_monitor_pack->ch_rels.hdr.data); + + boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_chrelationship_header->super, + g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super.objmask); + + pwr_channel_hdr->physical_channel_mask = g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask; + pwr_channel_hdr->type = NV_PMU_PMGR_PWR_MONITOR_TYPE_NO_POLLING; + + status = boardobjgrp_pmudatainit_legacy(g, + &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super, + (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack->ch_rels)); + + if (status) { + nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x", + status); + goto exit; + } + + /* Calculate the max Dmem buffer size */ + max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_monitor_dmem_size); + + /* Pass the table down the PMU as an object */ + status = pmgr_pmu_set_object( + g, + NV_PMU_PMGR_OBJECT_PWR_MONITOR, + (u16)max_dmem_size, + (u16)sizeof(struct nv_pmu_pmgr_pwr_monitor_pack), + pwr_monitor_pack); + + if (status) { + nvgpu_err(g, "pmgr_pmu_set_object failed %x", + status); + } + +exit: + nvgpu_kfree(g, pwr_monitor_pack); + return status; +} + +static int pmgr_send_pwr_policy_to_pmu(struct gk20a *g) +{ + struct nv_pmu_pmgr_pwr_policy_pack *ppwrpack = NULL; + struct pwr_policy *ppolicy = NULL; + int status = 0; + u8 indx; + u32 max_dmem_size; + + ppwrpack = nvgpu_kzalloc(g, sizeof(struct nv_pmu_pmgr_pwr_policy_pack)); + if (!ppwrpack) { + nvgpu_err(g, "pwr policy alloc failed %x", + status); + status = -ENOMEM; + goto exit; + } + + ppwrpack->policies.hdr.data.version = g->pmgr_pmu.pmgr_policyobjs.version; + ppwrpack->policies.hdr.data.b_enabled = g->pmgr_pmu.pmgr_policyobjs.b_enabled; + + boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *) + &ppwrpack->policies.hdr.data.super, + g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super.objmask); + + memset(&ppwrpack->policies.hdr.data.reserved_pmu_policy_mask, + 0, + sizeof(ppwrpack->policies.hdr.data.reserved_pmu_policy_mask)); + + ppwrpack->policies.hdr.data.base_sample_period = + g->pmgr_pmu.pmgr_policyobjs.base_sample_period; + ppwrpack->policies.hdr.data.min_client_sample_period = + g->pmgr_pmu.pmgr_policyobjs.min_client_sample_period; + ppwrpack->policies.hdr.data.low_sampling_mult = + g->pmgr_pmu.pmgr_policyobjs.low_sampling_mult; + + memcpy(&ppwrpack->policies.hdr.data.global_ceiling, + &g->pmgr_pmu.pmgr_policyobjs.global_ceiling, + sizeof(struct nv_pmu_perf_domain_group_limits)); + + memcpy(&ppwrpack->policies.hdr.data.semantic_policy_tbl, + &g->pmgr_pmu.pmgr_policyobjs.policy_idxs, + sizeof(g->pmgr_pmu.pmgr_policyobjs.policy_idxs)); + + BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx, + ppwrpack->policies.hdr.data.super.obj_mask.super.data[0]) { + ppolicy = PMGR_GET_PWR_POLICY(g, indx); + + status = ((struct boardobj *)ppolicy)->pmudatainit(g, (struct boardobj *)ppolicy, + (struct nv_pmu_boardobj *)&(ppwrpack->policies.policies[indx].data)); + if (status) { + nvgpu_err(g, "pmudatainit failed %x indx %x", + status, indx); + status = -ENOMEM; + goto exit; + } + } + BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END; + + boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *) + &ppwrpack->policy_rels.hdr.data.super, + g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super.objmask); + + boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *) + &ppwrpack->violations.hdr.data.super, + g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super.objmask); + + max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_policy_dmem_size); + + /* Pass the table down the PMU as an object */ + status = pmgr_pmu_set_object( + g, + NV_PMU_PMGR_OBJECT_PWR_POLICY, + (u16)max_dmem_size, + (u16)sizeof(struct nv_pmu_pmgr_pwr_policy_pack), + ppwrpack); + + if (status) { + nvgpu_err(g, "pmgr_pmu_set_object failed %x", + status); + } + +exit: + if (ppwrpack) { + nvgpu_kfree(g, ppwrpack); + } + + return status; +} + +u32 pmgr_pmu_pwr_devices_query_blocking( + struct gk20a *g, + u32 pwr_dev_mask, + struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload) +{ + struct pmu_cmd cmd; + struct pmu_payload payload; + struct nv_pmu_pmgr_cmd_pwr_devices_query *pcmd; + u32 status; + u32 seqdesc; + struct pmgr_pmucmdhandler_params handlerparams; + + memset(&payload, 0, sizeof(struct pmu_payload)); + memset(&cmd, 0, sizeof(struct pmu_cmd)); + memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params)); + + cmd.hdr.unit_id = PMU_UNIT_PMGR; + cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_pwr_devices_query) + + (u32)sizeof(struct pmu_hdr); + + pcmd = &cmd.cmd.pmgr.pwr_dev_query; + pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_PWR_DEVICES_QUERY; + pcmd->dev_mask = pwr_dev_mask; + + payload.out.buf = ppayload; + payload.out.size = sizeof(struct nv_pmu_pmgr_pwr_devices_query_payload); + payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; + payload.out.offset = NV_PMU_PMGR_PWR_DEVICES_QUERY_ALLOC_OFFSET; + + /* Setup the handler params to communicate back results.*/ + handlerparams.success = 0; + + status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, + PMU_COMMAND_QUEUE_LPQ, + pmgr_pmucmdhandler, + (void *)&handlerparams, + &seqdesc, ~0); + if (status) { + nvgpu_err(g, + "unable to post pmgr query cmd for unit %x cmd id %x dev mask %x", + cmd.hdr.unit_id, pcmd->cmd_type, pcmd->dev_mask); + goto exit; + } + + pmu_wait_message_cond(&g->pmu, + gk20a_get_gr_idle_timeout(g), + &handlerparams.success, 1); + + if (handlerparams.success == 0U) { + nvgpu_err(g, "could not process cmd"); + status = -ETIMEDOUT; + goto exit; + } + +exit: + return status; +} + +static u32 pmgr_pmu_load_blocking(struct gk20a *g) +{ + struct pmu_cmd cmd = { {0} }; + struct nv_pmu_pmgr_cmd_load *pcmd; + u32 status; + u32 seqdesc; + struct pmgr_pmucmdhandler_params handlerparams = {0}; + + cmd.hdr.unit_id = PMU_UNIT_PMGR; + cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_load) + + (u32)sizeof(struct pmu_hdr); + + pcmd = &cmd.cmd.pmgr.load; + pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_LOAD; + + /* Setup the handler params to communicate back results.*/ + handlerparams.success = 0; + + status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, + PMU_COMMAND_QUEUE_LPQ, + pmgr_pmucmdhandler, + (void *)&handlerparams, + &seqdesc, ~0); + if (status) { + nvgpu_err(g, + "unable to post pmgr load cmd for unit %x cmd id %x", + cmd.hdr.unit_id, pcmd->cmd_type); + goto exit; + } + + pmu_wait_message_cond(&g->pmu, + gk20a_get_gr_idle_timeout(g), + &handlerparams.success, 1); + + if (handlerparams.success == 0U) { + nvgpu_err(g, "could not process cmd"); + status = -ETIMEDOUT; + goto exit; + } + +exit: + return status; +} + +int pmgr_send_pmgr_tables_to_pmu(struct gk20a *g) +{ + int status = 0; + + status = pmgr_send_i2c_device_topology_to_pmu(g); + + if (status) { + nvgpu_err(g, + "pmgr_send_i2c_device_topology_to_pmu failed %x", + status); + goto exit; + } + + if (!BOARDOBJGRP_IS_EMPTY(&g->pmgr_pmu.pmgr_deviceobjs.super.super)) { + status = pmgr_send_pwr_device_topology_to_pmu(g); + if (status) { + nvgpu_err(g, + "pmgr_send_pwr_device_topology_to_pmu failed %x", + status); + goto exit; + } + } + + if (!(BOARDOBJGRP_IS_EMPTY( + &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super)) || + !(BOARDOBJGRP_IS_EMPTY( + &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super))) { + status = pmgr_send_pwr_mointer_to_pmu(g); + if (status) { + nvgpu_err(g, + "pmgr_send_pwr_mointer_to_pmu failed %x", status); + goto exit; + } + } + + if (!(BOARDOBJGRP_IS_EMPTY( + &g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super)) || + !(BOARDOBJGRP_IS_EMPTY( + &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super)) || + !(BOARDOBJGRP_IS_EMPTY( + &g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super))) { + status = pmgr_send_pwr_policy_to_pmu(g); + if (status) { + nvgpu_err(g, + "pmgr_send_pwr_policy_to_pmu failed %x", status); + goto exit; + } + } + + status = pmgr_pmu_load_blocking(g); + if (status) { + nvgpu_err(g, + "pmgr_send_pwr_mointer_to_pmu failed %x", status); + goto exit; + } + +exit: + return status; +} diff --git a/include/pmgr/pmgrpmu.h b/include/pmgr/pmgrpmu.h new file mode 100644 index 0000000..f4ffaef --- /dev/null +++ b/include/pmgr/pmgrpmu.h @@ -0,0 +1,39 @@ +/* + * general power device control structures & definitions + * + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NVGPU_PMGRPMU_H +#define NVGPU_PMGRPMU_H + +#include + +#include "pwrdev.h" +#include "pwrmonitor.h" + +int pmgr_send_pmgr_tables_to_pmu(struct gk20a *g); + +u32 pmgr_pmu_pwr_devices_query_blocking( + struct gk20a *g, + u32 pwr_dev_mask, + struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload); + +#endif /* NVGPU_PMGRPMU_H */ diff --git a/include/pmgr/pwrdev.c b/include/pmgr/pwrdev.c new file mode 100644 index 0000000..c1bf084 --- /dev/null +++ b/include/pmgr/pwrdev.c @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "pwrdev.h" +#include "boardobj/boardobjgrp.h" +#include "boardobj/boardobjgrp_e32.h" +#include "gp106/bios_gp106.h" + +static int _pwr_device_pmudata_instget(struct gk20a *g, + struct nv_pmu_boardobjgrp *pmuboardobjgrp, + struct nv_pmu_boardobj **ppboardobjpmudata, + u8 idx) +{ + struct nv_pmu_pmgr_pwr_device_desc_table *ppmgrdevice = + (struct nv_pmu_pmgr_pwr_device_desc_table *)pmuboardobjgrp; + + nvgpu_log_info(g, " "); + + /*check whether pmuboardobjgrp has a valid boardobj in index*/ + if (((u32)BIT(idx) & + ppmgrdevice->hdr.data.super.obj_mask.super.data[0]) == 0U) { + return -EINVAL; + } + + *ppboardobjpmudata = (struct nv_pmu_boardobj *) + &ppmgrdevice->devices[idx].data.board_obj; + + nvgpu_log_info(g, " Done"); + + return 0; +} + +static int _pwr_domains_pmudatainit_ina3221(struct gk20a *g, + struct boardobj *board_obj_ptr, + struct nv_pmu_boardobj *ppmudata) +{ + struct nv_pmu_pmgr_pwr_device_desc_ina3221 *ina3221_desc; + struct pwr_device_ina3221 *ina3221; + int status = 0; + u32 indx; + + status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); + if (status) { + nvgpu_err(g, + "error updating pmu boardobjgrp for pwr domain 0x%x", + status); + goto done; + } + + ina3221 = (struct pwr_device_ina3221 *)board_obj_ptr; + ina3221_desc = (struct nv_pmu_pmgr_pwr_device_desc_ina3221 *) ppmudata; + + ina3221_desc->super.power_corr_factor = ina3221->super.power_corr_factor; + ina3221_desc->i2c_dev_idx = ina3221->super.i2c_dev_idx; + ina3221_desc->configuration = ina3221->configuration; + ina3221_desc->mask_enable = ina3221->mask_enable; + /* configure NV_PMU_THERM_EVENT_EXT_OVERT */ + ina3221_desc->event_mask = (1 << 0); + ina3221_desc->curr_correct_m = ina3221->curr_correct_m; + ina3221_desc->curr_correct_b = ina3221->curr_correct_b; + + for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) { + ina3221_desc->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx]; + } + +done: + return status; +} + +static struct boardobj *construct_pwr_device(struct gk20a *g, + void *pargs, u16 pargs_size, u8 type) +{ + struct boardobj *board_obj_ptr = NULL; + int status; + u32 indx; + struct pwr_device_ina3221 *pwrdev; + struct pwr_device_ina3221 *ina3221 = (struct pwr_device_ina3221*)pargs; + + status = boardobj_construct_super(g, &board_obj_ptr, + pargs_size, pargs); + if (status) { + return NULL; + } + + pwrdev = (struct pwr_device_ina3221*)board_obj_ptr; + + /* Set Super class interfaces */ + board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_ina3221; + pwrdev->super.power_rail = ina3221->super.power_rail; + pwrdev->super.i2c_dev_idx = ina3221->super.i2c_dev_idx; + pwrdev->super.power_corr_factor = (1 << 12); + pwrdev->super.bIs_inforom_config = false; + + /* Set INA3221-specific information */ + pwrdev->configuration = ina3221->configuration; + pwrdev->mask_enable = ina3221->mask_enable; + pwrdev->gpio_function = ina3221->gpio_function; + pwrdev->curr_correct_m = ina3221->curr_correct_m; + pwrdev->curr_correct_b = ina3221->curr_correct_b; + + for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) { + pwrdev->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx]; + } + + nvgpu_log_info(g, " Done"); + + return board_obj_ptr; +} + +static int devinit_get_pwr_device_table(struct gk20a *g, + struct pwr_devices *ppwrdeviceobjs) +{ + int status = 0; + u8 *pwr_device_table_ptr = NULL; + u8 *curr_pwr_device_table_ptr = NULL; + struct boardobj *boardobj; + struct pwr_sensors_2x_header pwr_sensor_table_header = { 0 }; + struct pwr_sensors_2x_entry pwr_sensor_table_entry = { 0 }; + u32 index; + u32 obj_index = 0; + u16 pwr_device_size; + union { + struct boardobj boardobj; + struct pwr_device pwrdev; + struct pwr_device_ina3221 ina3221; + } pwr_device_data; + + nvgpu_log_info(g, " "); + + pwr_device_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, + g->bios.perf_token, POWER_SENSORS_TABLE); + if (pwr_device_table_ptr == NULL) { + status = -EINVAL; + goto done; + } + + memcpy(&pwr_sensor_table_header, pwr_device_table_ptr, + VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08); + + if (pwr_sensor_table_header.version != + VBIOS_POWER_SENSORS_VERSION_2X) { + status = -EINVAL; + goto done; + } + + if (pwr_sensor_table_header.header_size < + VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08) { + status = -EINVAL; + goto done; + } + + if (pwr_sensor_table_header.table_entry_size != + VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15) { + status = -EINVAL; + goto done; + } + + curr_pwr_device_table_ptr = (pwr_device_table_ptr + + VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08); + + for (index = 0; index < pwr_sensor_table_header.num_table_entries; index++) { + bool use_fxp8_8 = false; + u8 i2c_dev_idx; + u8 device_type; + + curr_pwr_device_table_ptr += (pwr_sensor_table_header.table_entry_size * index); + + pwr_sensor_table_entry.flags0 = *curr_pwr_device_table_ptr; + + memcpy(&pwr_sensor_table_entry.class_param0, + (curr_pwr_device_table_ptr + 1), + (VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15 - 1U)); + + device_type = (u8)BIOS_GET_FIELD( + pwr_sensor_table_entry.flags0, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS); + + if (device_type == NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_I2C) { + i2c_dev_idx = (u8)BIOS_GET_FIELD( + pwr_sensor_table_entry.class_param0, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_INDEX); + use_fxp8_8 = (u8)BIOS_GET_FIELD( + pwr_sensor_table_entry.class_param0, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_USE_FXP8_8); + + pwr_device_data.ina3221.super.i2c_dev_idx = i2c_dev_idx; + pwr_device_data.ina3221.r_shuntm_ohm[0].use_fxp8_8 = use_fxp8_8; + pwr_device_data.ina3221.r_shuntm_ohm[1].use_fxp8_8 = use_fxp8_8; + pwr_device_data.ina3221.r_shuntm_ohm[2].use_fxp8_8 = use_fxp8_8; + pwr_device_data.ina3221.r_shuntm_ohm[0].rshunt_value = + (u16)BIOS_GET_FIELD( + pwr_sensor_table_entry.sensor_param0, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT0_MOHM); + + pwr_device_data.ina3221.r_shuntm_ohm[1].rshunt_value = + (u16)BIOS_GET_FIELD( + pwr_sensor_table_entry.sensor_param0, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT1_MOHM); + + pwr_device_data.ina3221.r_shuntm_ohm[2].rshunt_value = + (u16)BIOS_GET_FIELD( + pwr_sensor_table_entry.sensor_param1, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_RSHUNT2_MOHM); + pwr_device_data.ina3221.configuration = + (u16)BIOS_GET_FIELD( + pwr_sensor_table_entry.sensor_param1, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_CONFIGURATION); + + pwr_device_data.ina3221.mask_enable = + (u16)BIOS_GET_FIELD( + pwr_sensor_table_entry.sensor_param2, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_MASKENABLE); + + pwr_device_data.ina3221.gpio_function = + (u8)BIOS_GET_FIELD( + pwr_sensor_table_entry.sensor_param2, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_GPIOFUNCTION); + + pwr_device_data.ina3221.curr_correct_m = + (u16)BIOS_GET_FIELD( + pwr_sensor_table_entry.sensor_param3, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_M); + + pwr_device_data.ina3221.curr_correct_b = + (u16)BIOS_GET_FIELD( + pwr_sensor_table_entry.sensor_param3, + NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_B); + + if (!pwr_device_data.ina3221.curr_correct_m) { + pwr_device_data.ina3221.curr_correct_m = (1 << 12); + } + pwr_device_size = sizeof(struct pwr_device_ina3221); + } else { + continue; + } + + pwr_device_data.boardobj.type = CTRL_PMGR_PWR_DEVICE_TYPE_INA3221; + pwr_device_data.pwrdev.power_rail = (u8)0; + + boardobj = construct_pwr_device(g, &pwr_device_data, + pwr_device_size, pwr_device_data.boardobj.type); + + if (!boardobj) { + nvgpu_err(g, + "unable to create pwr device for %d type %d", index, pwr_device_data.boardobj.type); + status = -EINVAL; + goto done; + } + + status = boardobjgrp_objinsert(&ppwrdeviceobjs->super.super, + boardobj, obj_index); + + if (status) { + nvgpu_err(g, + "unable to insert pwr device boardobj for %d", index); + status = -EINVAL; + goto done; + } + + ++obj_index; + } + +done: + nvgpu_log_info(g, " done status %x", status); + return status; +} + +int pmgr_device_sw_setup(struct gk20a *g) +{ + int status; + struct boardobjgrp *pboardobjgrp = NULL; + struct pwr_devices *ppwrdeviceobjs; + + /* Construct the Super Class and override the Interfaces */ + status = boardobjgrpconstruct_e32(g, &g->pmgr_pmu.pmgr_deviceobjs.super); + if (status) { + nvgpu_err(g, + "error creating boardobjgrp for pmgr devices, status - 0x%x", + status); + goto done; + } + + pboardobjgrp = &g->pmgr_pmu.pmgr_deviceobjs.super.super; + ppwrdeviceobjs = &(g->pmgr_pmu.pmgr_deviceobjs); + + /* Override the Interfaces */ + pboardobjgrp->pmudatainstget = _pwr_device_pmudata_instget; + + status = devinit_get_pwr_device_table(g, ppwrdeviceobjs); + if (status) { + goto done; + } + +done: + nvgpu_log_info(g, " done status %x", status); + return status; +} diff --git a/include/pmgr/pwrdev.h b/include/pmgr/pwrdev.h new file mode 100644 index 0000000..4bcf65a --- /dev/null +++ b/include/pmgr/pwrdev.h @@ -0,0 +1,60 @@ +/* + * general power device structures & definitions + * + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NVGPU_PMGR_PWRDEV_H +#define NVGPU_PMGR_PWRDEV_H + +#include "boardobj/boardobj.h" +#include +#include "ctrl/ctrlpmgr.h" + +#define PWRDEV_I2CDEV_DEVICE_INDEX_NONE (0xFF) + +#define PWR_DEVICE_PROV_NUM_DEFAULT 1 + +struct pwr_device { + struct boardobj super; + u8 power_rail; + u8 i2c_dev_idx; + bool bIs_inforom_config; + u32 power_corr_factor; +}; + +struct pwr_devices { + struct boardobjgrp_e32 super; +}; + +struct pwr_device_ina3221 { + struct pwr_device super; + struct ctrl_pmgr_pwr_device_info_rshunt + r_shuntm_ohm[NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM]; + u16 configuration; + u16 mask_enable; + u8 gpio_function; + u16 curr_correct_m; + s16 curr_correct_b; +} ; + +int pmgr_device_sw_setup(struct gk20a *g); + +#endif /* NVGPU_PMGR_PWRDEV_H */ diff --git a/include/pmgr/pwrmonitor.c b/include/pmgr/pwrmonitor.c new file mode 100644 index 0000000..710ae85 --- /dev/null +++ b/include/pmgr/pwrmonitor.c @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "pwrdev.h" +#include "boardobj/boardobjgrp.h" +#include "boardobj/boardobjgrp_e32.h" +#include "gp106/bios_gp106.h" + +static int _pwr_channel_pmudata_instget(struct gk20a *g, + struct nv_pmu_boardobjgrp *pmuboardobjgrp, + struct nv_pmu_boardobj **ppboardobjpmudata, + u8 idx) +{ + struct nv_pmu_pmgr_pwr_channel_desc *ppmgrchannel = + (struct nv_pmu_pmgr_pwr_channel_desc *)pmuboardobjgrp; + + nvgpu_log_info(g, " "); + + /*check whether pmuboardobjgrp has a valid boardobj in index*/ + if (((u32)BIT(idx) & + ppmgrchannel->hdr.data.super.obj_mask.super.data[0]) == 0U) { + return -EINVAL; + } + + *ppboardobjpmudata = (struct nv_pmu_boardobj *) + &ppmgrchannel->channels[idx].data.board_obj; + + /* handle Global/common data here as we need index */ + ppmgrchannel->channels[idx].data.pwr_channel.ch_idx = idx; + + nvgpu_log_info(g, " Done"); + + return 0; +} + +static int _pwr_channel_rels_pmudata_instget(struct gk20a *g, + struct nv_pmu_boardobjgrp *pmuboardobjgrp, + struct nv_pmu_boardobj **ppboardobjpmudata, + u8 idx) +{ + struct nv_pmu_pmgr_pwr_chrelationship_desc *ppmgrchrels = + (struct nv_pmu_pmgr_pwr_chrelationship_desc *)pmuboardobjgrp; + + nvgpu_log_info(g, " "); + + /*check whether pmuboardobjgrp has a valid boardobj in index*/ + if (((u32)BIT(idx) & + ppmgrchrels->hdr.data.super.obj_mask.super.data[0]) == 0U) { + return -EINVAL; + } + + *ppboardobjpmudata = (struct nv_pmu_boardobj *) + &ppmgrchrels->ch_rels[idx].data.board_obj; + + nvgpu_log_info(g, " Done"); + + return 0; +} + +static u32 _pwr_channel_state_init(struct gk20a *g) +{ + u8 indx = 0; + struct pwr_channel *pchannel; + u32 objmask = + g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask; + + /* Initialize each PWR_CHANNEL's dependent channel mask */ + BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx, objmask) { + pchannel = PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, indx); + if (pchannel == NULL) { + nvgpu_err(g, + "PMGR_PWR_MONITOR_GET_PWR_CHANNEL-failed %d", indx); + return -EINVAL; + } + pchannel->dependent_ch_mask =0; + } + BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END + + return 0; +} + +static bool _pwr_channel_implements(struct pwr_channel *pchannel, + u8 type) +{ + return (type == BOARDOBJ_GET_TYPE(pchannel)); +} + +static int _pwr_domains_pmudatainit_sensor(struct gk20a *g, + struct boardobj *board_obj_ptr, + struct nv_pmu_boardobj *ppmudata) +{ + struct nv_pmu_pmgr_pwr_channel_sensor *pmu_sensor_data; + struct pwr_channel_sensor *sensor; + int status = 0; + + status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); + if (status) { + nvgpu_err(g, + "error updating pmu boardobjgrp for pwr sensor 0x%x", + status); + goto done; + } + + sensor = (struct pwr_channel_sensor *)board_obj_ptr; + pmu_sensor_data = (struct nv_pmu_pmgr_pwr_channel_sensor *) ppmudata; + + pmu_sensor_data->super.pwr_rail = sensor->super.pwr_rail; + pmu_sensor_data->super.volt_fixedu_v = sensor->super.volt_fixed_uv; + pmu_sensor_data->super.pwr_corr_slope = sensor->super.pwr_corr_slope; + pmu_sensor_data->super.pwr_corr_offsetm_w = sensor->super.pwr_corr_offset_mw; + pmu_sensor_data->super.curr_corr_slope = sensor->super.curr_corr_slope; + pmu_sensor_data->super.curr_corr_offsetm_a = sensor->super.curr_corr_offset_ma; + pmu_sensor_data->super.dependent_ch_mask = sensor->super.dependent_ch_mask; + pmu_sensor_data->super.ch_idx = 0; + + pmu_sensor_data->pwr_dev_idx = sensor->pwr_dev_idx; + pmu_sensor_data->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx; + +done: + return status; +} + +static struct boardobj *construct_pwr_topology(struct gk20a *g, + void *pargs, u16 pargs_size, u8 type) +{ + struct boardobj *board_obj_ptr = NULL; + int status; + struct pwr_channel_sensor *pwrchannel; + struct pwr_channel_sensor *sensor = (struct pwr_channel_sensor*)pargs; + + status = boardobj_construct_super(g, &board_obj_ptr, + pargs_size, pargs); + if (status) { + return NULL; + } + + pwrchannel = (struct pwr_channel_sensor*)board_obj_ptr; + + /* Set Super class interfaces */ + board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_sensor; + + pwrchannel->super.pwr_rail = sensor->super.pwr_rail; + pwrchannel->super.volt_fixed_uv = sensor->super.volt_fixed_uv; + pwrchannel->super.pwr_corr_slope = sensor->super.pwr_corr_slope; + pwrchannel->super.pwr_corr_offset_mw = sensor->super.pwr_corr_offset_mw; + pwrchannel->super.curr_corr_slope = sensor->super.curr_corr_slope; + pwrchannel->super.curr_corr_offset_ma = sensor->super.curr_corr_offset_ma; + pwrchannel->super.dependent_ch_mask = 0; + + pwrchannel->pwr_dev_idx = sensor->pwr_dev_idx; + pwrchannel->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx; + + nvgpu_log_info(g, " Done"); + + return board_obj_ptr; +} + +static int devinit_get_pwr_topology_table(struct gk20a *g, + struct pmgr_pwr_monitor *ppwrmonitorobjs) +{ + int status = 0; + u8 *pwr_topology_table_ptr = NULL; + u8 *curr_pwr_topology_table_ptr = NULL; + struct boardobj *boardobj; + struct pwr_topology_2x_header pwr_topology_table_header = { 0 }; + struct pwr_topology_2x_entry pwr_topology_table_entry = { 0 }; + u32 index; + u32 obj_index = 0; + u16 pwr_topology_size; + union { + struct boardobj boardobj; + struct pwr_channel pwrchannel; + struct pwr_channel_sensor sensor; + } pwr_topology_data; + + nvgpu_log_info(g, " "); + + pwr_topology_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, + g->bios.perf_token, POWER_TOPOLOGY_TABLE); + if (pwr_topology_table_ptr == NULL) { + status = -EINVAL; + goto done; + } + + memcpy(&pwr_topology_table_header, pwr_topology_table_ptr, + VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06); + + if (pwr_topology_table_header.version != + VBIOS_POWER_TOPOLOGY_VERSION_2X) { + status = -EINVAL; + goto done; + } + + g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = false; + + if (pwr_topology_table_header.header_size < + VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06) { + status = -EINVAL; + goto done; + } + + if (pwr_topology_table_header.table_entry_size != + VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16) { + status = -EINVAL; + goto done; + } + + curr_pwr_topology_table_ptr = (pwr_topology_table_ptr + + VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06); + + for (index = 0; index < pwr_topology_table_header.num_table_entries; + index++) { + u8 class_type; + + curr_pwr_topology_table_ptr += (pwr_topology_table_header.table_entry_size * index); + + pwr_topology_table_entry.flags0 = *curr_pwr_topology_table_ptr; + pwr_topology_table_entry.pwr_rail = *(curr_pwr_topology_table_ptr + 1); + + memcpy(&pwr_topology_table_entry.param0, + (curr_pwr_topology_table_ptr + 2), + (VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16 - 2U)); + + class_type = (u8)BIOS_GET_FIELD( + pwr_topology_table_entry.flags0, + NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS); + + if (class_type == NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_SENSOR) { + pwr_topology_data.sensor.pwr_dev_idx = (u8)BIOS_GET_FIELD( + pwr_topology_table_entry.param1, + NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_INDEX); + pwr_topology_data.sensor.pwr_dev_prov_idx = (u8)BIOS_GET_FIELD( + pwr_topology_table_entry.param1, + NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_PROVIDER_INDEX); + + pwr_topology_size = sizeof(struct pwr_channel_sensor); + } else { + continue; + } + + /* Initialize data for the parent class */ + pwr_topology_data.boardobj.type = CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR; + pwr_topology_data.pwrchannel.pwr_rail = (u8)pwr_topology_table_entry.pwr_rail; + pwr_topology_data.pwrchannel.volt_fixed_uv = pwr_topology_table_entry.param0; + pwr_topology_data.pwrchannel.pwr_corr_slope = (1 << 12); + pwr_topology_data.pwrchannel.pwr_corr_offset_mw = 0; + pwr_topology_data.pwrchannel.curr_corr_slope = + (u32)pwr_topology_table_entry.curr_corr_slope; + pwr_topology_data.pwrchannel.curr_corr_offset_ma = + (s32)pwr_topology_table_entry.curr_corr_offset; + + boardobj = construct_pwr_topology(g, &pwr_topology_data, + pwr_topology_size, pwr_topology_data.boardobj.type); + + if (!boardobj) { + nvgpu_err(g, + "unable to create pwr topology for %d type %d", + index, pwr_topology_data.boardobj.type); + status = -EINVAL; + goto done; + } + + status = boardobjgrp_objinsert(&ppwrmonitorobjs->pwr_channels.super, + boardobj, obj_index); + + if (status) { + nvgpu_err(g, + "unable to insert pwr topology boardobj for %d", index); + status = -EINVAL; + goto done; + } + + ++obj_index; + } + +done: + nvgpu_log_info(g, " done status %x", status); + return status; +} + +int pmgr_monitor_sw_setup(struct gk20a *g) +{ + int status; + struct boardobjgrp *pboardobjgrp = NULL; + struct pwr_channel *pchannel; + struct pmgr_pwr_monitor *ppwrmonitorobjs; + u8 indx = 0; + + /* Construct the Super Class and override the Interfaces */ + status = boardobjgrpconstruct_e32(g, + &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels); + if (status) { + nvgpu_err(g, + "error creating boardobjgrp for pmgr channel, status - 0x%x", + status); + goto done; + } + + pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super); + + /* Override the Interfaces */ + pboardobjgrp->pmudatainstget = _pwr_channel_pmudata_instget; + + /* Construct the Super Class and override the Interfaces */ + status = boardobjgrpconstruct_e32(g, + &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels); + if (status) { + nvgpu_err(g, + "error creating boardobjgrp for pmgr channel relationship, status - 0x%x", + status); + goto done; + } + + pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super); + + /* Override the Interfaces */ + pboardobjgrp->pmudatainstget = _pwr_channel_rels_pmudata_instget; + + /* Initialize the Total GPU Power Channel Mask to 0 */ + g->pmgr_pmu.pmgr_monitorobjs.pmu_data.channels.hdr.data.total_gpu_power_channel_mask = 0; + g->pmgr_pmu.pmgr_monitorobjs.total_gpu_channel_idx = + CTRL_PMGR_PWR_CHANNEL_INDEX_INVALID; + + /* Supported topology table version 1.0 */ + g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = true; + + ppwrmonitorobjs = &(g->pmgr_pmu.pmgr_monitorobjs); + + status = devinit_get_pwr_topology_table(g, ppwrmonitorobjs); + if (status) { + goto done; + } + + status = _pwr_channel_state_init(g); + if (status) { + goto done; + } + + /* Initialise physicalChannelMask */ + g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask = 0; + + pboardobjgrp = &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super; + + BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_channel *, pchannel, indx) { + if (_pwr_channel_implements(pchannel, + CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR)) { + g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask |= BIT(indx); + } + } + +done: + nvgpu_log_info(g, " done status %x", status); + return status; +} diff --git a/include/pmgr/pwrmonitor.h b/include/pmgr/pwrmonitor.h new file mode 100644 index 0000000..bf4c76f --- /dev/null +++ b/include/pmgr/pwrmonitor.h @@ -0,0 +1,69 @@ +/* + * general power channel structures & definitions + * + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NVGPU_PMGR_PWRMONITOR_H +#define NVGPU_PMGR_PWRMONITOR_H + +#include +#include "boardobj/boardobjgrp.h" +#include "boardobj/boardobj.h" +#include "ctrl/ctrlpmgr.h" + +struct pwr_channel { + struct boardobj super; + u8 pwr_rail; + u32 volt_fixed_uv; + u32 pwr_corr_slope; + s32 pwr_corr_offset_mw; + u32 curr_corr_slope; + s32 curr_corr_offset_ma; + u32 dependent_ch_mask; +}; + +struct pwr_chrelationship { + struct boardobj super; + u8 chIdx; +}; + +struct pwr_channel_sensor { + struct pwr_channel super; + u8 pwr_dev_idx; + u8 pwr_dev_prov_idx; +}; + +struct pmgr_pwr_monitor { + bool b_is_topology_tbl_ver_1x; + struct boardobjgrp_e32 pwr_channels; + struct boardobjgrp_e32 pwr_ch_rels; + u8 total_gpu_channel_idx; + u32 physical_channel_mask; + struct nv_pmu_pmgr_pwr_monitor_pack pmu_data; +}; + +#define PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, channel_idx) \ + ((struct pwr_channel *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ + &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super), (channel_idx))) + +int pmgr_monitor_sw_setup(struct gk20a *g); + +#endif /* NVGPU_PMGR_PWRMONITOR_H */ diff --git a/include/pmgr/pwrpolicy.c b/include/pmgr/pwrpolicy.c new file mode 100644 index 0000000..3bf6f32 --- /dev/null +++ b/include/pmgr/pwrpolicy.c @@ -0,0 +1,782 @@ +/* + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "pwrpolicy.h" +#include "boardobj/boardobjgrp.h" +#include "boardobj/boardobjgrp_e32.h" +#include "gp106/bios_gp106.h" + +#define _pwr_policy_limitarboutputget_helper(p_limit_arb) (p_limit_arb)->output +#define _pwr_policy_limitdeltaapply(limit, delta) ((u32)max(((s32)limit) + (delta), 0)) + +static u32 _pwr_policy_limitarbinputset_helper(struct gk20a *g, + struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb, + u8 client_idx, + u32 limit_value) +{ + u8 indx; + bool b_found = false; + u32 status = 0; + u32 output = limit_value; + + for (indx = 0; indx< p_limit_arb->num_inputs; indx++) { + if (p_limit_arb->inputs[indx].pwr_policy_idx == client_idx) { + p_limit_arb->inputs[indx].limit_value = limit_value; + b_found = true; + } else if (p_limit_arb->b_arb_max) { + output = max(output, p_limit_arb->inputs[indx].limit_value); + } else { + output = min(output, p_limit_arb->inputs[indx].limit_value); + } + } + + if (!b_found) { + if (p_limit_arb->num_inputs < + CTRL_PMGR_PWR_POLICY_MAX_LIMIT_INPUTS) { + p_limit_arb->inputs[ + p_limit_arb->num_inputs].pwr_policy_idx = client_idx; + p_limit_arb->inputs[ + p_limit_arb->num_inputs].limit_value = limit_value; + p_limit_arb->num_inputs++; + } else { + nvgpu_err(g, "No entries remaining for clientIdx=%d", + client_idx); + status = -EINVAL; + } + } + + if (!status) { + p_limit_arb->output = output; + } + + return status; +} + +static u32 _pwr_policy_limitid_translate(struct gk20a *g, + struct pwr_policy *ppolicy, + enum pwr_policy_limit_id limit_id, + struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb, + struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb_sec) +{ + u32 status = 0; + + switch (limit_id) { + case PWR_POLICY_LIMIT_ID_MIN: + *p_limit_arb = &ppolicy->limit_arb_min; + break; + + case PWR_POLICY_LIMIT_ID_RATED: + *p_limit_arb = &ppolicy->limit_arb_rated; + + if (p_limit_arb_sec != NULL) { + *p_limit_arb_sec = &ppolicy->limit_arb_curr; + } + break; + + case PWR_POLICY_LIMIT_ID_MAX: + *p_limit_arb = &ppolicy->limit_arb_max; + break; + + case PWR_POLICY_LIMIT_ID_CURR: + *p_limit_arb = &ppolicy->limit_arb_curr; + break; + + case PWR_POLICY_LIMIT_ID_BATT: + *p_limit_arb = &ppolicy->limit_arb_batt; + break; + + default: + nvgpu_err(g, "Unsupported limitId=%d", + limit_id); + status = -EINVAL; + break; + } + + return status; +} + +static u32 _pwr_policy_limitarbinputset(struct gk20a *g, + struct pwr_policy *ppolicy, + enum pwr_policy_limit_id limit_id, + u8 client_idx, + u32 limit) +{ + u32 status = 0; + struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL; + struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb_sec = NULL; + + status = _pwr_policy_limitid_translate(g, + ppolicy, + limit_id, + &p_limit_arb, + &p_limit_arb_sec); + if (status) { + goto exit; + } + + status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb, client_idx, limit); + if (status) { + nvgpu_err(g, + "Error setting client limit value: status=0x%08x, limitId=0x%x, clientIdx=0x%x, limit=%d", + status, limit_id, client_idx, limit); + goto exit; + } + + if (NULL != p_limit_arb_sec) { + status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb_sec, + CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, + _pwr_policy_limitarboutputget_helper(p_limit_arb)); + } + +exit: + return status; +} + +static inline void _pwr_policy_limitarbconstruct( + struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb, + bool b_arb_max) +{ + p_limit_arb->num_inputs = 0; + p_limit_arb->b_arb_max = b_arb_max; +} + +static u32 _pwr_policy_limitarboutputget(struct gk20a *g, + struct pwr_policy *ppolicy, + enum pwr_policy_limit_id limit_id) +{ + u32 status = 0; + struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL; + + status = _pwr_policy_limitid_translate(g, + ppolicy, + limit_id, + &p_limit_arb, + NULL); + if (status) { + return 0; + } + + return _pwr_policy_limitarboutputget_helper(p_limit_arb); +} + +static int _pwr_domains_pmudatainit_hw_threshold(struct gk20a *g, + struct boardobj *board_obj_ptr, + struct nv_pmu_boardobj *ppmudata) +{ + struct nv_pmu_pmgr_pwr_policy_hw_threshold *pmu_hw_threshold_data; + struct pwr_policy_hw_threshold *p_hw_threshold; + struct pwr_policy *p_pwr_policy; + struct nv_pmu_pmgr_pwr_policy *pmu_pwr_policy; + int status = 0; + + status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); + if (status) { + nvgpu_err(g, + "error updating pmu boardobjgrp for pwr sensor 0x%x", + status); + status = -ENOMEM; + goto done; + } + + p_hw_threshold = (struct pwr_policy_hw_threshold *)board_obj_ptr; + pmu_hw_threshold_data = (struct nv_pmu_pmgr_pwr_policy_hw_threshold *) ppmudata; + pmu_pwr_policy = (struct nv_pmu_pmgr_pwr_policy *) ppmudata; + p_pwr_policy = (struct pwr_policy *)&(p_hw_threshold->super.super); + + pmu_pwr_policy->ch_idx = 0; + pmu_pwr_policy->limit_unit = p_pwr_policy->limit_unit; + pmu_pwr_policy->num_limit_inputs = p_pwr_policy->num_limit_inputs; + + pmu_pwr_policy->limit_min = _pwr_policy_limitdeltaapply( + _pwr_policy_limitarboutputget(g, p_pwr_policy, + PWR_POLICY_LIMIT_ID_MIN), + p_pwr_policy->limit_delta); + + pmu_pwr_policy->limit_max = _pwr_policy_limitdeltaapply( + _pwr_policy_limitarboutputget(g, p_pwr_policy, + PWR_POLICY_LIMIT_ID_MAX), + p_pwr_policy->limit_delta); + + pmu_pwr_policy->limit_curr = _pwr_policy_limitdeltaapply( + _pwr_policy_limitarboutputget(g, p_pwr_policy, + PWR_POLICY_LIMIT_ID_CURR), + p_pwr_policy->limit_delta); + + memcpy(&pmu_pwr_policy->integral, &p_pwr_policy->integral, + sizeof(struct ctrl_pmgr_pwr_policy_info_integral)); + + pmu_pwr_policy->sample_mult = p_pwr_policy->sample_mult; + pmu_pwr_policy->filter_type = p_pwr_policy->filter_type; + pmu_pwr_policy->filter_param = p_pwr_policy->filter_param; + + pmu_hw_threshold_data->threshold_idx = p_hw_threshold->threshold_idx; + pmu_hw_threshold_data->low_threshold_idx = p_hw_threshold->low_threshold_idx; + pmu_hw_threshold_data->b_use_low_threshold = p_hw_threshold->b_use_low_threshold; + pmu_hw_threshold_data->low_threshold_value = p_hw_threshold->low_threshold_value; + + if (BOARDOBJ_GET_TYPE(board_obj_ptr) == + CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD) { + struct nv_pmu_pmgr_pwr_policy_sw_threshold *pmu_sw_threshold_data; + struct pwr_policy_sw_threshold *p_sw_threshold; + + p_sw_threshold = (struct pwr_policy_sw_threshold *)board_obj_ptr; + pmu_sw_threshold_data = + (struct nv_pmu_pmgr_pwr_policy_sw_threshold *) ppmudata; + pmu_sw_threshold_data->event_id = + p_sw_threshold->event_id; + } +done: + return status; +} + +static struct boardobj *construct_pwr_policy(struct gk20a *g, + void *pargs, u16 pargs_size, u8 type) +{ + struct boardobj *board_obj_ptr = NULL; + int status; + struct pwr_policy_hw_threshold *pwrpolicyhwthreshold; + struct pwr_policy *pwrpolicy; + struct pwr_policy *pwrpolicyparams = (struct pwr_policy*)pargs; + struct pwr_policy_hw_threshold *hwthreshold = (struct pwr_policy_hw_threshold*)pargs; + + status = boardobj_construct_super(g, &board_obj_ptr, + pargs_size, pargs); + if (status) { + return NULL; + } + + pwrpolicyhwthreshold = (struct pwr_policy_hw_threshold*)board_obj_ptr; + pwrpolicy = (struct pwr_policy *)board_obj_ptr; + + nvgpu_log_fn(g, "min=%u rated=%u max=%u", + pwrpolicyparams->limit_min, + pwrpolicyparams->limit_rated, + pwrpolicyparams->limit_max); + + /* Set Super class interfaces */ + board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_hw_threshold; + + pwrpolicy->ch_idx = pwrpolicyparams->ch_idx; + pwrpolicy->num_limit_inputs = 0; + pwrpolicy->limit_unit = pwrpolicyparams->limit_unit; + pwrpolicy->filter_type = (enum ctrl_pmgr_pwr_policy_filter_type)(pwrpolicyparams->filter_type); + pwrpolicy->sample_mult = pwrpolicyparams->sample_mult; + switch (pwrpolicy->filter_type) + { + case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_NONE: + break; + + case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_BLOCK: + pwrpolicy->filter_param.block.block_size = + pwrpolicyparams->filter_param.block.block_size; + break; + + case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE: + pwrpolicy->filter_param.moving_avg.window_size = + pwrpolicyparams->filter_param.moving_avg.window_size; + break; + + case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_IIR: + pwrpolicy->filter_param.iir.divisor = pwrpolicyparams->filter_param.iir.divisor; + break; + + default: + nvgpu_err(g, "Error: unrecognized Power Policy filter type: %d", + pwrpolicy->filter_type); + } + + _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_curr, false); + + pwrpolicy->limit_delta = 0; + + _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_min, true); + status = _pwr_policy_limitarbinputset(g, + pwrpolicy, + PWR_POLICY_LIMIT_ID_MIN, + CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, + pwrpolicyparams->limit_min); + + _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_max, false); + status = _pwr_policy_limitarbinputset(g, + pwrpolicy, + PWR_POLICY_LIMIT_ID_MAX, + CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, + pwrpolicyparams->limit_max); + + _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_rated, false); + status = _pwr_policy_limitarbinputset(g, + pwrpolicy, + PWR_POLICY_LIMIT_ID_RATED, + CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, + pwrpolicyparams->limit_rated); + + _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_batt, false); + status = _pwr_policy_limitarbinputset(g, + pwrpolicy, + PWR_POLICY_LIMIT_ID_BATT, + CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, + ((pwrpolicyparams->limit_batt != 0U) ? + pwrpolicyparams->limit_batt: + CTRL_PMGR_PWR_POLICY_LIMIT_MAX)); + + memcpy(&pwrpolicy->integral, &pwrpolicyparams->integral, + sizeof(struct ctrl_pmgr_pwr_policy_info_integral)); + + pwrpolicyhwthreshold->threshold_idx = hwthreshold->threshold_idx; + pwrpolicyhwthreshold->b_use_low_threshold = hwthreshold->b_use_low_threshold; + pwrpolicyhwthreshold->low_threshold_idx = hwthreshold->low_threshold_idx; + pwrpolicyhwthreshold->low_threshold_value = hwthreshold->low_threshold_value; + + if (type == CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD) { + struct pwr_policy_sw_threshold *pwrpolicyswthreshold; + struct pwr_policy_sw_threshold *swthreshold = + (struct pwr_policy_sw_threshold*)pargs; + + pwrpolicyswthreshold = (struct pwr_policy_sw_threshold*)board_obj_ptr; + pwrpolicyswthreshold->event_id = swthreshold->event_id; + } + + nvgpu_log_info(g, " Done"); + + return board_obj_ptr; +} + +static int _pwr_policy_construct_WAR_SW_Threshold_policy(struct gk20a *g, + struct pmgr_pwr_policy *ppwrpolicyobjs, + union pwr_policy_data_union *ppwrpolicydata, + u16 pwr_policy_size, + u32 obj_index) +{ + int status = 0; + struct boardobj *boardobj; + + /* WARN policy */ + ppwrpolicydata->pwrpolicy.limit_unit = 0; + ppwrpolicydata->pwrpolicy.limit_min = 10000; + ppwrpolicydata->pwrpolicy.limit_rated = 100000; + ppwrpolicydata->pwrpolicy.limit_max = 100000; + ppwrpolicydata->sw_threshold.threshold_idx = 1; + ppwrpolicydata->pwrpolicy.filter_type = + CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE; + ppwrpolicydata->pwrpolicy.sample_mult = 5; + + /* Filled the entry.filterParam value in the filterParam */ + ppwrpolicydata->pwrpolicy.filter_param.moving_avg.window_size = 10; + + ppwrpolicydata->sw_threshold.event_id = 0x01; + + ppwrpolicydata->boardobj.type = CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD; + + boardobj = construct_pwr_policy(g, ppwrpolicydata, + pwr_policy_size, ppwrpolicydata->boardobj.type); + + if (!boardobj) { + nvgpu_err(g, + "unable to create pwr policy for type %d", ppwrpolicydata->boardobj.type); + status = -EINVAL; + goto done; + } + + status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super, + boardobj, obj_index); + + if (status) { + nvgpu_err(g, + "unable to insert pwr policy boardobj for %d", obj_index); + status = -EINVAL; + goto done; + } +done: + return status; +} + +struct pwr_policy_3x_header_unpacked { + u8 version; + u8 header_size; + u8 table_entry_size; + u8 num_table_entries; + u16 base_sample_period; + u16 min_client_sample_period; + u8 table_rel_entry_size; + u8 num_table_rel_entries; + u8 tgp_policy_idx; + u8 rtp_policy_idx; + u8 mxm_policy_idx; + u8 dnotifier_policy_idx; + u32 d2_limit; + u32 d3_limit; + u32 d4_limit; + u32 d5_limit; + u8 low_sampling_mult; + u8 pwr_tgt_policy_idx; + u8 pwr_tgt_floor_policy_idx; + u8 sm_bus_policy_idx; + u8 table_viol_entry_size; + u8 num_table_viol_entries; +}; + +#define __UNPACK_FIELD(unpacked, packed, field) \ + __builtin_memcpy(&unpacked->field, &packed->field, \ + sizeof(unpacked->field)) + +static inline void devinit_unpack_pwr_policy_header( + struct pwr_policy_3x_header_unpacked *unpacked, + struct pwr_policy_3x_header_struct *packed) +{ + __UNPACK_FIELD(unpacked, packed, version); + __UNPACK_FIELD(unpacked, packed, header_size); + __UNPACK_FIELD(unpacked, packed, table_entry_size); + __UNPACK_FIELD(unpacked, packed, num_table_entries); + __UNPACK_FIELD(unpacked, packed, base_sample_period); + __UNPACK_FIELD(unpacked, packed, min_client_sample_period); + __UNPACK_FIELD(unpacked, packed, table_rel_entry_size); + __UNPACK_FIELD(unpacked, packed, num_table_rel_entries); + __UNPACK_FIELD(unpacked, packed, tgp_policy_idx); + __UNPACK_FIELD(unpacked, packed, rtp_policy_idx); + __UNPACK_FIELD(unpacked, packed, mxm_policy_idx); + __UNPACK_FIELD(unpacked, packed, dnotifier_policy_idx); + __UNPACK_FIELD(unpacked, packed, d2_limit); + __UNPACK_FIELD(unpacked, packed, d3_limit); + __UNPACK_FIELD(unpacked, packed, d4_limit); + __UNPACK_FIELD(unpacked, packed, d5_limit); + __UNPACK_FIELD(unpacked, packed, low_sampling_mult); + __UNPACK_FIELD(unpacked, packed, pwr_tgt_policy_idx); + __UNPACK_FIELD(unpacked, packed, pwr_tgt_floor_policy_idx); + __UNPACK_FIELD(unpacked, packed, sm_bus_policy_idx); + __UNPACK_FIELD(unpacked, packed, table_viol_entry_size); + __UNPACK_FIELD(unpacked, packed, num_table_viol_entries); +} + +struct pwr_policy_3x_entry_unpacked { + u8 flags0; + u8 ch_idx; + u32 limit_min; + u32 limit_rated; + u32 limit_max; + u32 param0; + u32 param1; + u32 param2; + u32 param3; + u32 limit_batt; + u8 flags1; + u8 past_length; + u8 next_length; + u16 ratio_min; + u16 ratio_max; + u8 sample_mult; + u32 filter_param; +}; + +static inline void devinit_unpack_pwr_policy_entry( + struct pwr_policy_3x_entry_unpacked *unpacked, + struct pwr_policy_3x_entry_struct *packed) +{ + __UNPACK_FIELD(unpacked, packed, flags0); + __UNPACK_FIELD(unpacked, packed, ch_idx); + __UNPACK_FIELD(unpacked, packed, limit_min); + __UNPACK_FIELD(unpacked, packed, limit_rated); + __UNPACK_FIELD(unpacked, packed, limit_max); + __UNPACK_FIELD(unpacked, packed, param0); + __UNPACK_FIELD(unpacked, packed, param1); + __UNPACK_FIELD(unpacked, packed, param2); + __UNPACK_FIELD(unpacked, packed, param3); + __UNPACK_FIELD(unpacked, packed, limit_batt); + __UNPACK_FIELD(unpacked, packed, flags1); + __UNPACK_FIELD(unpacked, packed, past_length); + __UNPACK_FIELD(unpacked, packed, next_length); + __UNPACK_FIELD(unpacked, packed, ratio_min); + __UNPACK_FIELD(unpacked, packed, ratio_max); + __UNPACK_FIELD(unpacked, packed, sample_mult); + __UNPACK_FIELD(unpacked, packed, filter_param); +} + +static int devinit_get_pwr_policy_table(struct gk20a *g, + struct pmgr_pwr_policy *ppwrpolicyobjs) +{ + int status = 0; + u8 *ptr = NULL; + struct boardobj *boardobj; + struct pwr_policy_3x_header_struct *packed_hdr; + struct pwr_policy_3x_header_unpacked hdr; + u32 index; + u32 obj_index = 0; + u16 pwr_policy_size; + bool integral_control = false; + u32 hw_threshold_policy_index = 0; + union pwr_policy_data_union pwr_policy_data; + + nvgpu_log_info(g, " "); + + ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, + g->bios.perf_token, POWER_CAPPING_TABLE); + if (ptr == NULL) { + status = -EINVAL; + goto done; + } + + packed_hdr = (struct pwr_policy_3x_header_struct *)ptr; + + if (packed_hdr->version != + VBIOS_POWER_POLICY_VERSION_3X) { + status = -EINVAL; + goto done; + } + + if (packed_hdr->header_size < + VBIOS_POWER_POLICY_3X_HEADER_SIZE_25) { + status = -EINVAL; + goto done; + } + + if (packed_hdr->table_entry_size < + VBIOS_POWER_POLICY_3X_ENTRY_SIZE_2E) { + status = -EINVAL; + goto done; + } + + /* unpack power policy table header */ + devinit_unpack_pwr_policy_header(&hdr, packed_hdr); + + ptr += (u32)hdr.header_size; + + for (index = 0; index < hdr.num_table_entries; + index++, ptr += (u32)hdr.table_entry_size) { + + struct pwr_policy_3x_entry_struct *packed_entry; + struct pwr_policy_3x_entry_unpacked entry; + + u8 class_type; + + packed_entry = (struct pwr_policy_3x_entry_struct *)ptr; + + class_type = (u8)BIOS_GET_FIELD( + packed_entry->flags0, + NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS); + + if (class_type != NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_HW_THRESHOLD) { + continue; + } + + /* unpack power policy table entry */ + devinit_unpack_pwr_policy_entry(&entry, packed_entry); + + ppwrpolicyobjs->version = + CTRL_PMGR_PWR_POLICY_TABLE_VERSION_3X; + ppwrpolicyobjs->base_sample_period = hdr.base_sample_period; + ppwrpolicyobjs->min_client_sample_period = + hdr.min_client_sample_period; + ppwrpolicyobjs->low_sampling_mult = hdr.low_sampling_mult; + + ppwrpolicyobjs->policy_idxs[1] = hdr.tgp_policy_idx; + ppwrpolicyobjs->policy_idxs[0] = hdr.rtp_policy_idx; + ppwrpolicyobjs->policy_idxs[2] = hdr.mxm_policy_idx; + ppwrpolicyobjs->policy_idxs[3] = hdr.dnotifier_policy_idx; + ppwrpolicyobjs->ext_limits[0].limit = hdr.d2_limit; + ppwrpolicyobjs->ext_limits[1].limit = hdr.d3_limit; + ppwrpolicyobjs->ext_limits[2].limit = hdr.d4_limit; + ppwrpolicyobjs->ext_limits[3].limit = hdr.d5_limit; + ppwrpolicyobjs->policy_idxs[4] = hdr.pwr_tgt_policy_idx; + ppwrpolicyobjs->policy_idxs[5] = hdr.pwr_tgt_floor_policy_idx; + ppwrpolicyobjs->policy_idxs[6] = hdr.sm_bus_policy_idx; + + integral_control = (bool)BIOS_GET_FIELD(entry.flags1, + NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_INTEGRAL_CONTROL); + + if (integral_control == 0x01) { + pwr_policy_data.pwrpolicy.integral.past_sample_count = + entry.past_length; + pwr_policy_data.pwrpolicy.integral.next_sample_count = + entry.next_length; + pwr_policy_data.pwrpolicy.integral.ratio_limit_max = + entry.ratio_max; + pwr_policy_data.pwrpolicy.integral.ratio_limit_min = + entry.ratio_min; + } else { + memset(&(pwr_policy_data.pwrpolicy.integral), 0x0, + sizeof(struct ctrl_pmgr_pwr_policy_info_integral)); + } + pwr_policy_data.hw_threshold.threshold_idx = (u8) + BIOS_GET_FIELD(entry.param0, + NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_THRES_IDX); + + pwr_policy_data.hw_threshold.b_use_low_threshold = + BIOS_GET_FIELD(entry.param0, + NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_USE); + + if (pwr_policy_data.hw_threshold.b_use_low_threshold) { + pwr_policy_data.hw_threshold.low_threshold_idx = (u8) + BIOS_GET_FIELD(entry.param0, + NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_IDX); + + pwr_policy_data.hw_threshold.low_threshold_value = (u16) + BIOS_GET_FIELD(entry.param1, + NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM1_HW_THRESHOLD_LOW_THRESHOLD_VAL); + } + + pwr_policy_size = sizeof(struct pwr_policy_hw_threshold); + + /* Initialize data for the parent class */ + pwr_policy_data.boardobj.type = + CTRL_PMGR_PWR_POLICY_TYPE_HW_THRESHOLD; + pwr_policy_data.pwrpolicy.ch_idx = entry.ch_idx; + pwr_policy_data.pwrpolicy.limit_unit = (u8) + BIOS_GET_FIELD(entry.flags0, + NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT); + pwr_policy_data.pwrpolicy.filter_type = + (enum ctrl_pmgr_pwr_policy_filter_type) + BIOS_GET_FIELD(entry.flags1, + NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE); + + pwr_policy_data.pwrpolicy.limit_min = entry.limit_min; + pwr_policy_data.pwrpolicy.limit_rated = entry.limit_rated; + pwr_policy_data.pwrpolicy.limit_max = entry.limit_max; + pwr_policy_data.pwrpolicy.limit_batt = entry.limit_batt; + + pwr_policy_data.pwrpolicy.sample_mult = (u8)entry.sample_mult; + + /* Filled the entry.filterParam value in the filterParam */ + pwr_policy_data.pwrpolicy.filter_param.block.block_size = 0; + pwr_policy_data.pwrpolicy.filter_param.moving_avg.window_size = 0; + pwr_policy_data.pwrpolicy.filter_param.iir.divisor = 0; + + hw_threshold_policy_index |= + BIT(pwr_policy_data.hw_threshold.threshold_idx); + + boardobj = construct_pwr_policy(g, &pwr_policy_data, + pwr_policy_size, pwr_policy_data.boardobj.type); + + if (!boardobj) { + nvgpu_err(g, + "unable to create pwr policy for %d type %d", + index, pwr_policy_data.boardobj.type); + status = -EINVAL; + goto done; + } + + status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super, + boardobj, obj_index); + + if (status) { + nvgpu_err(g, + "unable to insert pwr policy boardobj for %d", + index); + status = -EINVAL; + goto done; + } + + ++obj_index; + } + + if (g->hardcode_sw_threshold) { + status = _pwr_policy_construct_WAR_SW_Threshold_policy(g, + ppwrpolicyobjs, + &pwr_policy_data, + sizeof(struct pwr_policy_sw_threshold), + obj_index); + if (status) { + nvgpu_err(g, "unable to construct_WAR_policy"); + status = -EINVAL; + goto done; + } + ++obj_index; + } + +done: + nvgpu_log_info(g, " done status %x", status); + return status; +} + +int pmgr_policy_sw_setup(struct gk20a *g) +{ + int status; + struct boardobjgrp *pboardobjgrp = NULL; + struct pwr_policy *ppolicy; + struct pmgr_pwr_policy *ppwrpolicyobjs; + u8 indx = 0; + + /* Construct the Super Class and override the Interfaces */ + status = boardobjgrpconstruct_e32(g, + &g->pmgr_pmu.pmgr_policyobjs.pwr_policies); + if (status) { + nvgpu_err(g, + "error creating boardobjgrp for pmgr policy, status - 0x%x", + status); + goto done; + } + + status = boardobjgrpconstruct_e32(g, + &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels); + if (status) { + nvgpu_err(g, + "error creating boardobjgrp for pmgr policy rels, status - 0x%x", + status); + goto done; + } + + status = boardobjgrpconstruct_e32(g, + &g->pmgr_pmu.pmgr_policyobjs.pwr_violations); + if (status) { + nvgpu_err(g, + "error creating boardobjgrp for pmgr violations, status - 0x%x", + status); + goto done; + } + + memset(g->pmgr_pmu.pmgr_policyobjs.policy_idxs, CTRL_PMGR_PWR_POLICY_INDEX_INVALID, + sizeof(u8) * CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES); + + /* Initialize external power limit policy indexes to _INVALID/0xFF */ + for (indx = 0; indx < PWR_POLICY_EXT_POWER_STATE_ID_COUNT; indx++) { + g->pmgr_pmu.pmgr_policyobjs.ext_limits[indx].policy_table_idx = + CTRL_PMGR_PWR_POLICY_INDEX_INVALID; + } + + /* Initialize external power state to _D1 */ + g->pmgr_pmu.pmgr_policyobjs.ext_power_state = 0xFFFFFFFF; + + ppwrpolicyobjs = &(g->pmgr_pmu.pmgr_policyobjs); + pboardobjgrp = &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super); + + status = devinit_get_pwr_policy_table(g, ppwrpolicyobjs); + if (status) { + goto done; + } + + g->pmgr_pmu.pmgr_policyobjs.b_enabled = true; + + BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_policy *, ppolicy, indx) { + PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy); + } + + g->pmgr_pmu.pmgr_policyobjs.global_ceiling.values[0] = + 0xFF; + + g->pmgr_pmu.pmgr_policyobjs.client_work_item.b_pending = false; + +done: + nvgpu_log_info(g, " done status %x", status); + return status; +} diff --git a/include/pmgr/pwrpolicy.h b/include/pmgr/pwrpolicy.h new file mode 100644 index 0000000..74f4937 --- /dev/null +++ b/include/pmgr/pwrpolicy.h @@ -0,0 +1,136 @@ +/* + * general power channel structures & definitions + * + * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef NVGPU_PMGR_PWRPOLICY_H +#define NVGPU_PMGR_PWRPOLICY_H + +#include +#include "boardobj/boardobjgrp.h" +#include "boardobj/boardobj.h" +#include "ctrl/ctrlpmgr.h" + +#define PWR_POLICY_EXT_POWER_STATE_ID_COUNT 0x4U + +enum pwr_policy_limit_id { + PWR_POLICY_LIMIT_ID_MIN = 0x00000000, + PWR_POLICY_LIMIT_ID_RATED, + PWR_POLICY_LIMIT_ID_MAX, + PWR_POLICY_LIMIT_ID_CURR, + PWR_POLICY_LIMIT_ID_BATT, +}; + +struct pwr_policy { + struct boardobj super; + u8 ch_idx; + u8 num_limit_inputs; + u8 limit_unit; + s32 limit_delta; + u32 limit_min; + u32 limit_rated; + u32 limit_max; + u32 limit_batt; + struct ctrl_pmgr_pwr_policy_info_integral integral; + struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_min; + struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_rated; + struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_max; + struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_batt; + struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_curr; + u8 sample_mult; + enum ctrl_pmgr_pwr_policy_filter_type filter_type; + union ctrl_pmgr_pwr_policy_filter_param filter_param; +}; + +struct pwr_policy_ext_limit { + u8 policy_table_idx; + u32 limit; +}; + +struct pwr_policy_batt_workitem { + u32 power_state; + bool b_full_deflection; +}; + +struct pwr_policy_client_workitem { + u32 limit; + bool b_pending; +}; + +struct pwr_policy_relationship { + struct boardobj super; + u8 policy_idx; +}; + +struct pmgr_pwr_policy { + u8 version; + bool b_enabled; + struct nv_pmu_perf_domain_group_limits global_ceiling; + u8 policy_idxs[CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES]; + struct pwr_policy_ext_limit ext_limits[PWR_POLICY_EXT_POWER_STATE_ID_COUNT]; + s32 ext_power_state; + u16 base_sample_period; + u16 min_client_sample_period; + u8 low_sampling_mult; + struct boardobjgrp_e32 pwr_policies; + struct boardobjgrp_e32 pwr_policy_rels; + struct boardobjgrp_e32 pwr_violations; + struct pwr_policy_client_workitem client_work_item; +}; + +struct pwr_policy_limit { + struct pwr_policy super; +}; + +struct pwr_policy_hw_threshold { + struct pwr_policy_limit super; + u8 threshold_idx; + u8 low_threshold_idx; + bool b_use_low_threshold; + u16 low_threshold_value; +}; + +struct pwr_policy_sw_threshold { + struct pwr_policy_limit super; + u8 threshold_idx; + u8 low_threshold_idx; + bool b_use_low_threshold; + u16 low_threshold_value; + u8 event_id; +}; + +union pwr_policy_data_union { + struct boardobj boardobj; + struct pwr_policy pwrpolicy; + struct pwr_policy_hw_threshold hw_threshold; + struct pwr_policy_sw_threshold sw_threshold; +} ; + +#define PMGR_GET_PWR_POLICY(g, policy_idx) \ + ((struct pwr_policy *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ + &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super), (policy_idx))) + +#define PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy) \ + ((ppolicy)->num_limit_inputs++) + +int pmgr_policy_sw_setup(struct gk20a *g); + +#endif /* NVGPU_PMGR_PWRPOLICY_H */ -- cgit v1.2.2