diff options
Diffstat (limited to 'include/pmgr')
| -rw-r--r-- | include/pmgr/pmgr.c | 111 | ||||
| -rw-r--r-- | include/pmgr/pmgr.h | 43 | ||||
| -rw-r--r-- | include/pmgr/pmgrpmu.c | 546 | ||||
| -rw-r--r-- | include/pmgr/pmgrpmu.h | 39 | ||||
| -rw-r--r-- | include/pmgr/pwrdev.c | 319 | ||||
| -rw-r--r-- | include/pmgr/pwrdev.h | 60 | ||||
| -rw-r--r-- | include/pmgr/pwrmonitor.c | 376 | ||||
| -rw-r--r-- | include/pmgr/pwrmonitor.h | 69 | ||||
| -rw-r--r-- | include/pmgr/pwrpolicy.c | 782 | ||||
| -rw-r--r-- | include/pmgr/pwrpolicy.h | 136 |
10 files changed, 2481 insertions, 0 deletions
diff --git a/include/pmgr/pmgr.c b/include/pmgr/pmgr.c new file mode 100644 index 0000000..f5be01b --- /dev/null +++ b/include/pmgr/pmgr.c | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <nvgpu/gk20a.h> | ||
| 24 | |||
| 25 | #include "pwrdev.h" | ||
| 26 | #include "pmgrpmu.h" | ||
| 27 | |||
| 28 | int pmgr_pwr_devices_get_power(struct gk20a *g, u32 *val) | ||
| 29 | { | ||
| 30 | struct nv_pmu_pmgr_pwr_devices_query_payload payload; | ||
| 31 | int status; | ||
| 32 | |||
| 33 | status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload); | ||
| 34 | if (status) { | ||
| 35 | nvgpu_err(g, "pmgr_pwr_devices_get_current_power failed %x", | ||
| 36 | status); | ||
| 37 | } | ||
| 38 | |||
| 39 | *val = payload.devices[0].powerm_w; | ||
| 40 | |||
| 41 | return status; | ||
| 42 | } | ||
| 43 | |||
| 44 | int pmgr_pwr_devices_get_current(struct gk20a *g, u32 *val) | ||
| 45 | { | ||
| 46 | struct nv_pmu_pmgr_pwr_devices_query_payload payload; | ||
| 47 | int status; | ||
| 48 | |||
| 49 | status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload); | ||
| 50 | if (status) { | ||
| 51 | nvgpu_err(g, "pmgr_pwr_devices_get_current failed %x", | ||
| 52 | status); | ||
| 53 | } | ||
| 54 | |||
| 55 | *val = payload.devices[0].currentm_a; | ||
| 56 | |||
| 57 | return status; | ||
| 58 | } | ||
| 59 | |||
| 60 | int pmgr_pwr_devices_get_voltage(struct gk20a *g, u32 *val) | ||
| 61 | { | ||
| 62 | struct nv_pmu_pmgr_pwr_devices_query_payload payload; | ||
| 63 | int status; | ||
| 64 | |||
| 65 | status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload); | ||
| 66 | if (status) { | ||
| 67 | nvgpu_err(g, "pmgr_pwr_devices_get_current_voltage failed %x", | ||
| 68 | status); | ||
| 69 | } | ||
| 70 | |||
| 71 | *val = payload.devices[0].voltageu_v; | ||
| 72 | |||
| 73 | return status; | ||
| 74 | } | ||
| 75 | |||
| 76 | u32 pmgr_domain_sw_setup(struct gk20a *g) | ||
| 77 | { | ||
| 78 | u32 status; | ||
| 79 | |||
| 80 | status = pmgr_device_sw_setup(g); | ||
| 81 | if (status) { | ||
| 82 | nvgpu_err(g, | ||
| 83 | "error creating boardobjgrp for pmgr devices, status - 0x%x", | ||
| 84 | status); | ||
| 85 | goto exit; | ||
| 86 | } | ||
| 87 | |||
| 88 | status = pmgr_monitor_sw_setup(g); | ||
| 89 | if (status) { | ||
| 90 | nvgpu_err(g, | ||
| 91 | "error creating boardobjgrp for pmgr monitor, status - 0x%x", | ||
| 92 | status); | ||
| 93 | goto exit; | ||
| 94 | } | ||
| 95 | |||
| 96 | status = pmgr_policy_sw_setup(g); | ||
| 97 | if (status) { | ||
| 98 | nvgpu_err(g, | ||
| 99 | "error creating boardobjgrp for pmgr policy, status - 0x%x", | ||
| 100 | status); | ||
| 101 | goto exit; | ||
| 102 | } | ||
| 103 | |||
| 104 | exit: | ||
| 105 | return status; | ||
| 106 | } | ||
| 107 | |||
| 108 | int pmgr_domain_pmu_setup(struct gk20a *g) | ||
| 109 | { | ||
| 110 | return pmgr_send_pmgr_tables_to_pmu(g); | ||
| 111 | } | ||
diff --git a/include/pmgr/pmgr.h b/include/pmgr/pmgr.h new file mode 100644 index 0000000..9b142de --- /dev/null +++ b/include/pmgr/pmgr.h | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | /* | ||
| 2 | * general power device structures & definitions | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 22 | * DEALINGS IN THE SOFTWARE. | ||
| 23 | */ | ||
| 24 | #ifndef NVGPU_PMGR_H | ||
| 25 | #define NVGPU_PMGR_H | ||
| 26 | |||
| 27 | #include "pwrdev.h" | ||
| 28 | #include "pwrmonitor.h" | ||
| 29 | #include "pwrpolicy.h" | ||
| 30 | |||
| 31 | struct pmgr_pmupstate { | ||
| 32 | struct pwr_devices pmgr_deviceobjs; | ||
| 33 | struct pmgr_pwr_monitor pmgr_monitorobjs; | ||
| 34 | struct pmgr_pwr_policy pmgr_policyobjs; | ||
| 35 | }; | ||
| 36 | |||
| 37 | u32 pmgr_domain_sw_setup(struct gk20a *g); | ||
| 38 | int pmgr_domain_pmu_setup(struct gk20a *g); | ||
| 39 | int pmgr_pwr_devices_get_current(struct gk20a *g, u32 *val); | ||
| 40 | int pmgr_pwr_devices_get_voltage(struct gk20a *g, u32 *val); | ||
| 41 | int pmgr_pwr_devices_get_power(struct gk20a *g, u32 *val); | ||
| 42 | |||
| 43 | #endif /* NVGPU_PMGR_H */ | ||
diff --git a/include/pmgr/pmgrpmu.c b/include/pmgr/pmgrpmu.c new file mode 100644 index 0000000..b6947f2 --- /dev/null +++ b/include/pmgr/pmgrpmu.c | |||
| @@ -0,0 +1,546 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <nvgpu/kmem.h> | ||
| 24 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> | ||
| 25 | #include <nvgpu/pmu.h> | ||
| 26 | #include <nvgpu/gk20a.h> | ||
| 27 | |||
| 28 | #include "gp106/bios_gp106.h" | ||
| 29 | |||
| 30 | #include "boardobj/boardobjgrp.h" | ||
| 31 | #include "boardobj/boardobjgrp_e32.h" | ||
| 32 | |||
| 33 | #include "pwrdev.h" | ||
| 34 | #include "pmgrpmu.h" | ||
| 35 | |||
| 36 | struct pmgr_pmucmdhandler_params { | ||
| 37 | u32 success; | ||
| 38 | }; | ||
| 39 | |||
| 40 | static void pmgr_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg, | ||
| 41 | void *param, u32 handle, u32 status) | ||
| 42 | { | ||
| 43 | struct pmgr_pmucmdhandler_params *phandlerparams = | ||
| 44 | (struct pmgr_pmucmdhandler_params *)param; | ||
| 45 | |||
| 46 | if ((msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_SET_OBJECT) && | ||
| 47 | (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_QUERY) && | ||
| 48 | (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_LOAD)) { | ||
| 49 | nvgpu_err(g, "unknow msg %x", msg->msg.pmgr.msg_type); | ||
| 50 | return; | ||
| 51 | } | ||
| 52 | |||
| 53 | if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_SET_OBJECT) { | ||
| 54 | if ((msg->msg.pmgr.set_object.b_success != 1) || | ||
| 55 | (msg->msg.pmgr.set_object.flcnstatus != 0U)) { | ||
| 56 | nvgpu_err(g, "pmgr msg failed %x %x %x %x", | ||
| 57 | msg->msg.pmgr.set_object.msg_type, | ||
| 58 | msg->msg.pmgr.set_object.b_success, | ||
| 59 | msg->msg.pmgr.set_object.flcnstatus, | ||
| 60 | msg->msg.pmgr.set_object.object_type); | ||
| 61 | return; | ||
| 62 | } | ||
| 63 | } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_QUERY) { | ||
| 64 | if ((msg->msg.pmgr.query.b_success != 1) || | ||
| 65 | (msg->msg.pmgr.query.flcnstatus != 0U)) { | ||
| 66 | nvgpu_err(g, "pmgr msg failed %x %x %x %x", | ||
| 67 | msg->msg.pmgr.query.msg_type, | ||
| 68 | msg->msg.pmgr.query.b_success, | ||
| 69 | msg->msg.pmgr.query.flcnstatus, | ||
| 70 | msg->msg.pmgr.query.cmd_type); | ||
| 71 | return; | ||
| 72 | } | ||
| 73 | } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_LOAD) { | ||
| 74 | if ((msg->msg.pmgr.query.b_success != 1) || | ||
| 75 | (msg->msg.pmgr.query.flcnstatus != 0U)) { | ||
| 76 | nvgpu_err(g, "pmgr msg failed %x %x %x", | ||
| 77 | msg->msg.pmgr.load.msg_type, | ||
| 78 | msg->msg.pmgr.load.b_success, | ||
| 79 | msg->msg.pmgr.load.flcnstatus); | ||
| 80 | return; | ||
| 81 | } | ||
| 82 | } | ||
| 83 | |||
| 84 | phandlerparams->success = 1; | ||
| 85 | } | ||
| 86 | |||
| 87 | static u32 pmgr_pmu_set_object(struct gk20a *g, | ||
| 88 | u8 type, | ||
| 89 | u16 dmem_size, | ||
| 90 | u16 fb_size, | ||
| 91 | void *pobj) | ||
| 92 | { | ||
| 93 | struct pmu_cmd cmd; | ||
| 94 | struct pmu_payload payload; | ||
| 95 | struct nv_pmu_pmgr_cmd_set_object *pcmd; | ||
| 96 | u32 status; | ||
| 97 | u32 seqdesc; | ||
| 98 | struct pmgr_pmucmdhandler_params handlerparams; | ||
| 99 | |||
| 100 | memset(&payload, 0, sizeof(struct pmu_payload)); | ||
| 101 | memset(&cmd, 0, sizeof(struct pmu_cmd)); | ||
| 102 | memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params)); | ||
| 103 | |||
| 104 | cmd.hdr.unit_id = PMU_UNIT_PMGR; | ||
| 105 | cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_set_object) + | ||
| 106 | (u32)sizeof(struct pmu_hdr);; | ||
| 107 | |||
| 108 | pcmd = &cmd.cmd.pmgr.set_object; | ||
| 109 | pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_SET_OBJECT; | ||
| 110 | pcmd->object_type = type; | ||
| 111 | |||
| 112 | payload.in.buf = pobj; | ||
| 113 | payload.in.size = dmem_size; | ||
| 114 | payload.in.fb_size = fb_size; | ||
| 115 | payload.in.offset = NV_PMU_PMGR_SET_OBJECT_ALLOC_OFFSET; | ||
| 116 | |||
| 117 | /* Setup the handler params to communicate back results.*/ | ||
| 118 | handlerparams.success = 0; | ||
| 119 | |||
| 120 | status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, | ||
| 121 | PMU_COMMAND_QUEUE_LPQ, | ||
| 122 | pmgr_pmucmdhandler, | ||
| 123 | (void *)&handlerparams, | ||
| 124 | &seqdesc, ~0); | ||
| 125 | if (status) { | ||
| 126 | nvgpu_err(g, | ||
| 127 | "unable to post pmgr cmd for unit %x cmd id %x obj type %x", | ||
| 128 | cmd.hdr.unit_id, pcmd->cmd_type, pcmd->object_type); | ||
| 129 | goto exit; | ||
| 130 | } | ||
| 131 | |||
| 132 | pmu_wait_message_cond(&g->pmu, | ||
| 133 | gk20a_get_gr_idle_timeout(g), | ||
| 134 | &handlerparams.success, 1); | ||
| 135 | |||
| 136 | if (handlerparams.success == 0U) { | ||
| 137 | nvgpu_err(g, "could not process cmd"); | ||
| 138 | status = -ETIMEDOUT; | ||
| 139 | goto exit; | ||
| 140 | } | ||
| 141 | |||
| 142 | exit: | ||
| 143 | return status; | ||
| 144 | } | ||
| 145 | |||
| 146 | static u32 pmgr_send_i2c_device_topology_to_pmu(struct gk20a *g) | ||
| 147 | { | ||
| 148 | struct nv_pmu_pmgr_i2c_device_desc_table i2c_desc_table; | ||
| 149 | u32 idx = g->ina3221_dcb_index; | ||
| 150 | u32 status = 0; | ||
| 151 | |||
| 152 | /* INA3221 I2C device info */ | ||
| 153 | i2c_desc_table.dev_mask = (1UL << idx); | ||
| 154 | |||
| 155 | /* INA3221 */ | ||
| 156 | i2c_desc_table.devices[idx].super.type = 0x4E; | ||
| 157 | |||
| 158 | i2c_desc_table.devices[idx].dcb_index = idx; | ||
| 159 | i2c_desc_table.devices[idx].i2c_address = g->ina3221_i2c_address; | ||
| 160 | i2c_desc_table.devices[idx].i2c_flags = 0xC2F; | ||
| 161 | i2c_desc_table.devices[idx].i2c_port = g->ina3221_i2c_port; | ||
| 162 | |||
| 163 | /* Pass the table down the PMU as an object */ | ||
| 164 | status = pmgr_pmu_set_object( | ||
| 165 | g, | ||
| 166 | NV_PMU_PMGR_OBJECT_I2C_DEVICE_DESC_TABLE, | ||
| 167 | (u16)sizeof(struct nv_pmu_pmgr_i2c_device_desc_table), | ||
| 168 | PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED, | ||
| 169 | &i2c_desc_table); | ||
| 170 | |||
| 171 | if (status) { | ||
| 172 | nvgpu_err(g, "pmgr_pmu_set_object failed %x", | ||
| 173 | status); | ||
| 174 | } | ||
| 175 | |||
| 176 | return status; | ||
| 177 | } | ||
| 178 | |||
| 179 | static int pmgr_send_pwr_device_topology_to_pmu(struct gk20a *g) | ||
| 180 | { | ||
| 181 | struct nv_pmu_pmgr_pwr_device_desc_table *pwr_desc_table; | ||
| 182 | struct nv_pmu_pmgr_pwr_device_desc_table_header *ppwr_desc_header; | ||
| 183 | int status = 0; | ||
| 184 | |||
| 185 | /* Set the BA-device-independent HW information */ | ||
| 186 | pwr_desc_table = nvgpu_kzalloc(g, sizeof(*pwr_desc_table)); | ||
| 187 | if (!pwr_desc_table) { | ||
| 188 | return -ENOMEM; | ||
| 189 | } | ||
| 190 | |||
| 191 | ppwr_desc_header = &(pwr_desc_table->hdr.data); | ||
| 192 | ppwr_desc_header->ba_info.b_initialized_and_used = false; | ||
| 193 | |||
| 194 | /* populate the table */ | ||
| 195 | boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&ppwr_desc_header->super, | ||
| 196 | g->pmgr_pmu.pmgr_deviceobjs.super.super.objmask); | ||
| 197 | |||
| 198 | status = boardobjgrp_pmudatainit_legacy(g, | ||
| 199 | &g->pmgr_pmu.pmgr_deviceobjs.super.super, | ||
| 200 | (struct nv_pmu_boardobjgrp_super *)pwr_desc_table); | ||
| 201 | |||
| 202 | if (status) { | ||
| 203 | nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x", | ||
| 204 | status); | ||
| 205 | goto exit; | ||
| 206 | } | ||
| 207 | |||
| 208 | /* Pass the table down the PMU as an object */ | ||
| 209 | status = pmgr_pmu_set_object( | ||
| 210 | g, | ||
| 211 | NV_PMU_PMGR_OBJECT_PWR_DEVICE_DESC_TABLE, | ||
| 212 | (u16)sizeof( | ||
| 213 | union nv_pmu_pmgr_pwr_device_dmem_size), | ||
| 214 | (u16)sizeof(struct nv_pmu_pmgr_pwr_device_desc_table), | ||
| 215 | pwr_desc_table); | ||
| 216 | |||
| 217 | if (status) { | ||
| 218 | nvgpu_err(g, "pmgr_pmu_set_object failed %x", | ||
| 219 | status); | ||
| 220 | } | ||
| 221 | |||
| 222 | exit: | ||
| 223 | nvgpu_kfree(g, pwr_desc_table); | ||
| 224 | return status; | ||
| 225 | } | ||
| 226 | |||
| 227 | static int pmgr_send_pwr_mointer_to_pmu(struct gk20a *g) | ||
| 228 | { | ||
| 229 | struct nv_pmu_pmgr_pwr_monitor_pack *pwr_monitor_pack = NULL; | ||
| 230 | struct nv_pmu_pmgr_pwr_channel_header *pwr_channel_hdr; | ||
| 231 | struct nv_pmu_pmgr_pwr_chrelationship_header *pwr_chrelationship_header; | ||
| 232 | u32 max_dmem_size; | ||
| 233 | int status = 0; | ||
| 234 | |||
| 235 | pwr_monitor_pack = nvgpu_kzalloc(g, sizeof(*pwr_monitor_pack)); | ||
| 236 | if (!pwr_monitor_pack) { | ||
| 237 | return -ENOMEM; | ||
| 238 | } | ||
| 239 | |||
| 240 | /* Copy all the global settings from the RM copy */ | ||
| 241 | pwr_channel_hdr = &(pwr_monitor_pack->channels.hdr.data); | ||
| 242 | *pwr_monitor_pack = g->pmgr_pmu.pmgr_monitorobjs.pmu_data; | ||
| 243 | |||
| 244 | boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_channel_hdr->super, | ||
| 245 | g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask); | ||
| 246 | |||
| 247 | /* Copy in each channel */ | ||
| 248 | status = boardobjgrp_pmudatainit_legacy(g, | ||
| 249 | &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super, | ||
| 250 | (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack->channels)); | ||
| 251 | |||
| 252 | if (status) { | ||
| 253 | nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x", | ||
| 254 | status); | ||
| 255 | goto exit; | ||
| 256 | } | ||
| 257 | |||
| 258 | /* Copy in each channel relationship */ | ||
| 259 | pwr_chrelationship_header = &(pwr_monitor_pack->ch_rels.hdr.data); | ||
| 260 | |||
| 261 | boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_chrelationship_header->super, | ||
| 262 | g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super.objmask); | ||
| 263 | |||
| 264 | pwr_channel_hdr->physical_channel_mask = g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask; | ||
| 265 | pwr_channel_hdr->type = NV_PMU_PMGR_PWR_MONITOR_TYPE_NO_POLLING; | ||
| 266 | |||
| 267 | status = boardobjgrp_pmudatainit_legacy(g, | ||
| 268 | &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super, | ||
| 269 | (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack->ch_rels)); | ||
| 270 | |||
| 271 | if (status) { | ||
| 272 | nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x", | ||
| 273 | status); | ||
| 274 | goto exit; | ||
| 275 | } | ||
| 276 | |||
| 277 | /* Calculate the max Dmem buffer size */ | ||
| 278 | max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_monitor_dmem_size); | ||
| 279 | |||
| 280 | /* Pass the table down the PMU as an object */ | ||
| 281 | status = pmgr_pmu_set_object( | ||
| 282 | g, | ||
| 283 | NV_PMU_PMGR_OBJECT_PWR_MONITOR, | ||
| 284 | (u16)max_dmem_size, | ||
| 285 | (u16)sizeof(struct nv_pmu_pmgr_pwr_monitor_pack), | ||
| 286 | pwr_monitor_pack); | ||
| 287 | |||
| 288 | if (status) { | ||
| 289 | nvgpu_err(g, "pmgr_pmu_set_object failed %x", | ||
| 290 | status); | ||
| 291 | } | ||
| 292 | |||
| 293 | exit: | ||
| 294 | nvgpu_kfree(g, pwr_monitor_pack); | ||
| 295 | return status; | ||
| 296 | } | ||
| 297 | |||
| 298 | static int pmgr_send_pwr_policy_to_pmu(struct gk20a *g) | ||
| 299 | { | ||
| 300 | struct nv_pmu_pmgr_pwr_policy_pack *ppwrpack = NULL; | ||
| 301 | struct pwr_policy *ppolicy = NULL; | ||
| 302 | int status = 0; | ||
| 303 | u8 indx; | ||
| 304 | u32 max_dmem_size; | ||
| 305 | |||
| 306 | ppwrpack = nvgpu_kzalloc(g, sizeof(struct nv_pmu_pmgr_pwr_policy_pack)); | ||
| 307 | if (!ppwrpack) { | ||
| 308 | nvgpu_err(g, "pwr policy alloc failed %x", | ||
| 309 | status); | ||
| 310 | status = -ENOMEM; | ||
| 311 | goto exit; | ||
| 312 | } | ||
| 313 | |||
| 314 | ppwrpack->policies.hdr.data.version = g->pmgr_pmu.pmgr_policyobjs.version; | ||
| 315 | ppwrpack->policies.hdr.data.b_enabled = g->pmgr_pmu.pmgr_policyobjs.b_enabled; | ||
| 316 | |||
| 317 | boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *) | ||
| 318 | &ppwrpack->policies.hdr.data.super, | ||
| 319 | g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super.objmask); | ||
| 320 | |||
| 321 | memset(&ppwrpack->policies.hdr.data.reserved_pmu_policy_mask, | ||
| 322 | 0, | ||
| 323 | sizeof(ppwrpack->policies.hdr.data.reserved_pmu_policy_mask)); | ||
| 324 | |||
| 325 | ppwrpack->policies.hdr.data.base_sample_period = | ||
| 326 | g->pmgr_pmu.pmgr_policyobjs.base_sample_period; | ||
| 327 | ppwrpack->policies.hdr.data.min_client_sample_period = | ||
| 328 | g->pmgr_pmu.pmgr_policyobjs.min_client_sample_period; | ||
| 329 | ppwrpack->policies.hdr.data.low_sampling_mult = | ||
| 330 | g->pmgr_pmu.pmgr_policyobjs.low_sampling_mult; | ||
| 331 | |||
| 332 | memcpy(&ppwrpack->policies.hdr.data.global_ceiling, | ||
| 333 | &g->pmgr_pmu.pmgr_policyobjs.global_ceiling, | ||
| 334 | sizeof(struct nv_pmu_perf_domain_group_limits)); | ||
| 335 | |||
| 336 | memcpy(&ppwrpack->policies.hdr.data.semantic_policy_tbl, | ||
| 337 | &g->pmgr_pmu.pmgr_policyobjs.policy_idxs, | ||
| 338 | sizeof(g->pmgr_pmu.pmgr_policyobjs.policy_idxs)); | ||
| 339 | |||
| 340 | BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx, | ||
| 341 | ppwrpack->policies.hdr.data.super.obj_mask.super.data[0]) { | ||
| 342 | ppolicy = PMGR_GET_PWR_POLICY(g, indx); | ||
| 343 | |||
| 344 | status = ((struct boardobj *)ppolicy)->pmudatainit(g, (struct boardobj *)ppolicy, | ||
| 345 | (struct nv_pmu_boardobj *)&(ppwrpack->policies.policies[indx].data)); | ||
| 346 | if (status) { | ||
| 347 | nvgpu_err(g, "pmudatainit failed %x indx %x", | ||
| 348 | status, indx); | ||
| 349 | status = -ENOMEM; | ||
| 350 | goto exit; | ||
| 351 | } | ||
| 352 | } | ||
| 353 | BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END; | ||
| 354 | |||
| 355 | boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *) | ||
| 356 | &ppwrpack->policy_rels.hdr.data.super, | ||
| 357 | g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super.objmask); | ||
| 358 | |||
| 359 | boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *) | ||
| 360 | &ppwrpack->violations.hdr.data.super, | ||
| 361 | g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super.objmask); | ||
| 362 | |||
| 363 | max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_policy_dmem_size); | ||
| 364 | |||
| 365 | /* Pass the table down the PMU as an object */ | ||
| 366 | status = pmgr_pmu_set_object( | ||
| 367 | g, | ||
| 368 | NV_PMU_PMGR_OBJECT_PWR_POLICY, | ||
| 369 | (u16)max_dmem_size, | ||
| 370 | (u16)sizeof(struct nv_pmu_pmgr_pwr_policy_pack), | ||
| 371 | ppwrpack); | ||
| 372 | |||
| 373 | if (status) { | ||
| 374 | nvgpu_err(g, "pmgr_pmu_set_object failed %x", | ||
| 375 | status); | ||
| 376 | } | ||
| 377 | |||
| 378 | exit: | ||
| 379 | if (ppwrpack) { | ||
| 380 | nvgpu_kfree(g, ppwrpack); | ||
| 381 | } | ||
| 382 | |||
| 383 | return status; | ||
| 384 | } | ||
| 385 | |||
| 386 | u32 pmgr_pmu_pwr_devices_query_blocking( | ||
| 387 | struct gk20a *g, | ||
| 388 | u32 pwr_dev_mask, | ||
| 389 | struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload) | ||
| 390 | { | ||
| 391 | struct pmu_cmd cmd; | ||
| 392 | struct pmu_payload payload; | ||
| 393 | struct nv_pmu_pmgr_cmd_pwr_devices_query *pcmd; | ||
| 394 | u32 status; | ||
| 395 | u32 seqdesc; | ||
| 396 | struct pmgr_pmucmdhandler_params handlerparams; | ||
| 397 | |||
| 398 | memset(&payload, 0, sizeof(struct pmu_payload)); | ||
| 399 | memset(&cmd, 0, sizeof(struct pmu_cmd)); | ||
| 400 | memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params)); | ||
| 401 | |||
| 402 | cmd.hdr.unit_id = PMU_UNIT_PMGR; | ||
| 403 | cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_pwr_devices_query) + | ||
| 404 | (u32)sizeof(struct pmu_hdr); | ||
| 405 | |||
| 406 | pcmd = &cmd.cmd.pmgr.pwr_dev_query; | ||
| 407 | pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_PWR_DEVICES_QUERY; | ||
| 408 | pcmd->dev_mask = pwr_dev_mask; | ||
| 409 | |||
| 410 | payload.out.buf = ppayload; | ||
| 411 | payload.out.size = sizeof(struct nv_pmu_pmgr_pwr_devices_query_payload); | ||
| 412 | payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED; | ||
| 413 | payload.out.offset = NV_PMU_PMGR_PWR_DEVICES_QUERY_ALLOC_OFFSET; | ||
| 414 | |||
| 415 | /* Setup the handler params to communicate back results.*/ | ||
| 416 | handlerparams.success = 0; | ||
| 417 | |||
| 418 | status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload, | ||
| 419 | PMU_COMMAND_QUEUE_LPQ, | ||
| 420 | pmgr_pmucmdhandler, | ||
| 421 | (void *)&handlerparams, | ||
| 422 | &seqdesc, ~0); | ||
| 423 | if (status) { | ||
| 424 | nvgpu_err(g, | ||
| 425 | "unable to post pmgr query cmd for unit %x cmd id %x dev mask %x", | ||
| 426 | cmd.hdr.unit_id, pcmd->cmd_type, pcmd->dev_mask); | ||
| 427 | goto exit; | ||
| 428 | } | ||
| 429 | |||
| 430 | pmu_wait_message_cond(&g->pmu, | ||
| 431 | gk20a_get_gr_idle_timeout(g), | ||
| 432 | &handlerparams.success, 1); | ||
| 433 | |||
| 434 | if (handlerparams.success == 0U) { | ||
| 435 | nvgpu_err(g, "could not process cmd"); | ||
| 436 | status = -ETIMEDOUT; | ||
| 437 | goto exit; | ||
| 438 | } | ||
| 439 | |||
| 440 | exit: | ||
| 441 | return status; | ||
| 442 | } | ||
| 443 | |||
| 444 | static u32 pmgr_pmu_load_blocking(struct gk20a *g) | ||
| 445 | { | ||
| 446 | struct pmu_cmd cmd = { {0} }; | ||
| 447 | struct nv_pmu_pmgr_cmd_load *pcmd; | ||
| 448 | u32 status; | ||
| 449 | u32 seqdesc; | ||
| 450 | struct pmgr_pmucmdhandler_params handlerparams = {0}; | ||
| 451 | |||
| 452 | cmd.hdr.unit_id = PMU_UNIT_PMGR; | ||
| 453 | cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_load) + | ||
| 454 | (u32)sizeof(struct pmu_hdr); | ||
| 455 | |||
| 456 | pcmd = &cmd.cmd.pmgr.load; | ||
| 457 | pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_LOAD; | ||
| 458 | |||
| 459 | /* Setup the handler params to communicate back results.*/ | ||
| 460 | handlerparams.success = 0; | ||
| 461 | |||
| 462 | status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, | ||
| 463 | PMU_COMMAND_QUEUE_LPQ, | ||
| 464 | pmgr_pmucmdhandler, | ||
| 465 | (void *)&handlerparams, | ||
| 466 | &seqdesc, ~0); | ||
| 467 | if (status) { | ||
| 468 | nvgpu_err(g, | ||
| 469 | "unable to post pmgr load cmd for unit %x cmd id %x", | ||
| 470 | cmd.hdr.unit_id, pcmd->cmd_type); | ||
| 471 | goto exit; | ||
| 472 | } | ||
| 473 | |||
| 474 | pmu_wait_message_cond(&g->pmu, | ||
| 475 | gk20a_get_gr_idle_timeout(g), | ||
| 476 | &handlerparams.success, 1); | ||
| 477 | |||
| 478 | if (handlerparams.success == 0U) { | ||
| 479 | nvgpu_err(g, "could not process cmd"); | ||
| 480 | status = -ETIMEDOUT; | ||
| 481 | goto exit; | ||
| 482 | } | ||
| 483 | |||
| 484 | exit: | ||
| 485 | return status; | ||
| 486 | } | ||
| 487 | |||
| 488 | int pmgr_send_pmgr_tables_to_pmu(struct gk20a *g) | ||
| 489 | { | ||
| 490 | int status = 0; | ||
| 491 | |||
| 492 | status = pmgr_send_i2c_device_topology_to_pmu(g); | ||
| 493 | |||
| 494 | if (status) { | ||
| 495 | nvgpu_err(g, | ||
| 496 | "pmgr_send_i2c_device_topology_to_pmu failed %x", | ||
| 497 | status); | ||
| 498 | goto exit; | ||
| 499 | } | ||
| 500 | |||
| 501 | if (!BOARDOBJGRP_IS_EMPTY(&g->pmgr_pmu.pmgr_deviceobjs.super.super)) { | ||
| 502 | status = pmgr_send_pwr_device_topology_to_pmu(g); | ||
| 503 | if (status) { | ||
| 504 | nvgpu_err(g, | ||
| 505 | "pmgr_send_pwr_device_topology_to_pmu failed %x", | ||
| 506 | status); | ||
| 507 | goto exit; | ||
| 508 | } | ||
| 509 | } | ||
| 510 | |||
| 511 | if (!(BOARDOBJGRP_IS_EMPTY( | ||
| 512 | &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super)) || | ||
| 513 | !(BOARDOBJGRP_IS_EMPTY( | ||
| 514 | &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super))) { | ||
| 515 | status = pmgr_send_pwr_mointer_to_pmu(g); | ||
| 516 | if (status) { | ||
| 517 | nvgpu_err(g, | ||
| 518 | "pmgr_send_pwr_mointer_to_pmu failed %x", status); | ||
| 519 | goto exit; | ||
| 520 | } | ||
| 521 | } | ||
| 522 | |||
| 523 | if (!(BOARDOBJGRP_IS_EMPTY( | ||
| 524 | &g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super)) || | ||
| 525 | !(BOARDOBJGRP_IS_EMPTY( | ||
| 526 | &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super)) || | ||
| 527 | !(BOARDOBJGRP_IS_EMPTY( | ||
| 528 | &g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super))) { | ||
| 529 | status = pmgr_send_pwr_policy_to_pmu(g); | ||
| 530 | if (status) { | ||
| 531 | nvgpu_err(g, | ||
| 532 | "pmgr_send_pwr_policy_to_pmu failed %x", status); | ||
| 533 | goto exit; | ||
| 534 | } | ||
| 535 | } | ||
| 536 | |||
| 537 | status = pmgr_pmu_load_blocking(g); | ||
| 538 | if (status) { | ||
| 539 | nvgpu_err(g, | ||
| 540 | "pmgr_send_pwr_mointer_to_pmu failed %x", status); | ||
| 541 | goto exit; | ||
| 542 | } | ||
| 543 | |||
| 544 | exit: | ||
| 545 | return status; | ||
| 546 | } | ||
diff --git a/include/pmgr/pmgrpmu.h b/include/pmgr/pmgrpmu.h new file mode 100644 index 0000000..f4ffaef --- /dev/null +++ b/include/pmgr/pmgrpmu.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | /* | ||
| 2 | * general power device control structures & definitions | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 22 | * DEALINGS IN THE SOFTWARE. | ||
| 23 | */ | ||
| 24 | #ifndef NVGPU_PMGRPMU_H | ||
| 25 | #define NVGPU_PMGRPMU_H | ||
| 26 | |||
| 27 | #include <nvgpu/gk20a.h> | ||
| 28 | |||
| 29 | #include "pwrdev.h" | ||
| 30 | #include "pwrmonitor.h" | ||
| 31 | |||
| 32 | int pmgr_send_pmgr_tables_to_pmu(struct gk20a *g); | ||
| 33 | |||
| 34 | u32 pmgr_pmu_pwr_devices_query_blocking( | ||
| 35 | struct gk20a *g, | ||
| 36 | u32 pwr_dev_mask, | ||
| 37 | struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload); | ||
| 38 | |||
| 39 | #endif /* NVGPU_PMGRPMU_H */ | ||
diff --git a/include/pmgr/pwrdev.c b/include/pmgr/pwrdev.c new file mode 100644 index 0000000..c1bf084 --- /dev/null +++ b/include/pmgr/pwrdev.c | |||
| @@ -0,0 +1,319 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <nvgpu/bios.h> | ||
| 24 | #include <nvgpu/gk20a.h> | ||
| 25 | |||
| 26 | #include "pwrdev.h" | ||
| 27 | #include "boardobj/boardobjgrp.h" | ||
| 28 | #include "boardobj/boardobjgrp_e32.h" | ||
| 29 | #include "gp106/bios_gp106.h" | ||
| 30 | |||
| 31 | static int _pwr_device_pmudata_instget(struct gk20a *g, | ||
| 32 | struct nv_pmu_boardobjgrp *pmuboardobjgrp, | ||
| 33 | struct nv_pmu_boardobj **ppboardobjpmudata, | ||
| 34 | u8 idx) | ||
| 35 | { | ||
| 36 | struct nv_pmu_pmgr_pwr_device_desc_table *ppmgrdevice = | ||
| 37 | (struct nv_pmu_pmgr_pwr_device_desc_table *)pmuboardobjgrp; | ||
| 38 | |||
| 39 | nvgpu_log_info(g, " "); | ||
| 40 | |||
| 41 | /*check whether pmuboardobjgrp has a valid boardobj in index*/ | ||
| 42 | if (((u32)BIT(idx) & | ||
| 43 | ppmgrdevice->hdr.data.super.obj_mask.super.data[0]) == 0U) { | ||
| 44 | return -EINVAL; | ||
| 45 | } | ||
| 46 | |||
| 47 | *ppboardobjpmudata = (struct nv_pmu_boardobj *) | ||
| 48 | &ppmgrdevice->devices[idx].data.board_obj; | ||
| 49 | |||
| 50 | nvgpu_log_info(g, " Done"); | ||
| 51 | |||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | static int _pwr_domains_pmudatainit_ina3221(struct gk20a *g, | ||
| 56 | struct boardobj *board_obj_ptr, | ||
| 57 | struct nv_pmu_boardobj *ppmudata) | ||
| 58 | { | ||
| 59 | struct nv_pmu_pmgr_pwr_device_desc_ina3221 *ina3221_desc; | ||
| 60 | struct pwr_device_ina3221 *ina3221; | ||
| 61 | int status = 0; | ||
| 62 | u32 indx; | ||
| 63 | |||
| 64 | status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); | ||
| 65 | if (status) { | ||
| 66 | nvgpu_err(g, | ||
| 67 | "error updating pmu boardobjgrp for pwr domain 0x%x", | ||
| 68 | status); | ||
| 69 | goto done; | ||
| 70 | } | ||
| 71 | |||
| 72 | ina3221 = (struct pwr_device_ina3221 *)board_obj_ptr; | ||
| 73 | ina3221_desc = (struct nv_pmu_pmgr_pwr_device_desc_ina3221 *) ppmudata; | ||
| 74 | |||
| 75 | ina3221_desc->super.power_corr_factor = ina3221->super.power_corr_factor; | ||
| 76 | ina3221_desc->i2c_dev_idx = ina3221->super.i2c_dev_idx; | ||
| 77 | ina3221_desc->configuration = ina3221->configuration; | ||
| 78 | ina3221_desc->mask_enable = ina3221->mask_enable; | ||
| 79 | /* configure NV_PMU_THERM_EVENT_EXT_OVERT */ | ||
| 80 | ina3221_desc->event_mask = (1 << 0); | ||
| 81 | ina3221_desc->curr_correct_m = ina3221->curr_correct_m; | ||
| 82 | ina3221_desc->curr_correct_b = ina3221->curr_correct_b; | ||
| 83 | |||
| 84 | for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) { | ||
| 85 | ina3221_desc->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx]; | ||
| 86 | } | ||
| 87 | |||
| 88 | done: | ||
| 89 | return status; | ||
| 90 | } | ||
| 91 | |||
| 92 | static struct boardobj *construct_pwr_device(struct gk20a *g, | ||
| 93 | void *pargs, u16 pargs_size, u8 type) | ||
| 94 | { | ||
| 95 | struct boardobj *board_obj_ptr = NULL; | ||
| 96 | int status; | ||
| 97 | u32 indx; | ||
| 98 | struct pwr_device_ina3221 *pwrdev; | ||
| 99 | struct pwr_device_ina3221 *ina3221 = (struct pwr_device_ina3221*)pargs; | ||
| 100 | |||
| 101 | status = boardobj_construct_super(g, &board_obj_ptr, | ||
| 102 | pargs_size, pargs); | ||
| 103 | if (status) { | ||
| 104 | return NULL; | ||
| 105 | } | ||
| 106 | |||
| 107 | pwrdev = (struct pwr_device_ina3221*)board_obj_ptr; | ||
| 108 | |||
| 109 | /* Set Super class interfaces */ | ||
| 110 | board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_ina3221; | ||
| 111 | pwrdev->super.power_rail = ina3221->super.power_rail; | ||
| 112 | pwrdev->super.i2c_dev_idx = ina3221->super.i2c_dev_idx; | ||
| 113 | pwrdev->super.power_corr_factor = (1 << 12); | ||
| 114 | pwrdev->super.bIs_inforom_config = false; | ||
| 115 | |||
| 116 | /* Set INA3221-specific information */ | ||
| 117 | pwrdev->configuration = ina3221->configuration; | ||
| 118 | pwrdev->mask_enable = ina3221->mask_enable; | ||
| 119 | pwrdev->gpio_function = ina3221->gpio_function; | ||
| 120 | pwrdev->curr_correct_m = ina3221->curr_correct_m; | ||
| 121 | pwrdev->curr_correct_b = ina3221->curr_correct_b; | ||
| 122 | |||
| 123 | for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) { | ||
| 124 | pwrdev->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx]; | ||
| 125 | } | ||
| 126 | |||
| 127 | nvgpu_log_info(g, " Done"); | ||
| 128 | |||
| 129 | return board_obj_ptr; | ||
| 130 | } | ||
| 131 | |||
| 132 | static int devinit_get_pwr_device_table(struct gk20a *g, | ||
| 133 | struct pwr_devices *ppwrdeviceobjs) | ||
| 134 | { | ||
| 135 | int status = 0; | ||
| 136 | u8 *pwr_device_table_ptr = NULL; | ||
| 137 | u8 *curr_pwr_device_table_ptr = NULL; | ||
| 138 | struct boardobj *boardobj; | ||
| 139 | struct pwr_sensors_2x_header pwr_sensor_table_header = { 0 }; | ||
| 140 | struct pwr_sensors_2x_entry pwr_sensor_table_entry = { 0 }; | ||
| 141 | u32 index; | ||
| 142 | u32 obj_index = 0; | ||
| 143 | u16 pwr_device_size; | ||
| 144 | union { | ||
| 145 | struct boardobj boardobj; | ||
| 146 | struct pwr_device pwrdev; | ||
| 147 | struct pwr_device_ina3221 ina3221; | ||
| 148 | } pwr_device_data; | ||
| 149 | |||
| 150 | nvgpu_log_info(g, " "); | ||
| 151 | |||
| 152 | pwr_device_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, | ||
| 153 | g->bios.perf_token, POWER_SENSORS_TABLE); | ||
| 154 | if (pwr_device_table_ptr == NULL) { | ||
| 155 | status = -EINVAL; | ||
| 156 | goto done; | ||
| 157 | } | ||
| 158 | |||
| 159 | memcpy(&pwr_sensor_table_header, pwr_device_table_ptr, | ||
| 160 | VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08); | ||
| 161 | |||
| 162 | if (pwr_sensor_table_header.version != | ||
| 163 | VBIOS_POWER_SENSORS_VERSION_2X) { | ||
| 164 | status = -EINVAL; | ||
| 165 | goto done; | ||
| 166 | } | ||
| 167 | |||
| 168 | if (pwr_sensor_table_header.header_size < | ||
| 169 | VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08) { | ||
| 170 | status = -EINVAL; | ||
| 171 | goto done; | ||
| 172 | } | ||
| 173 | |||
| 174 | if (pwr_sensor_table_header.table_entry_size != | ||
| 175 | VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15) { | ||
| 176 | status = -EINVAL; | ||
| 177 | goto done; | ||
| 178 | } | ||
| 179 | |||
| 180 | curr_pwr_device_table_ptr = (pwr_device_table_ptr + | ||
| 181 | VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08); | ||
| 182 | |||
| 183 | for (index = 0; index < pwr_sensor_table_header.num_table_entries; index++) { | ||
| 184 | bool use_fxp8_8 = false; | ||
| 185 | u8 i2c_dev_idx; | ||
| 186 | u8 device_type; | ||
| 187 | |||
| 188 | curr_pwr_device_table_ptr += (pwr_sensor_table_header.table_entry_size * index); | ||
| 189 | |||
| 190 | pwr_sensor_table_entry.flags0 = *curr_pwr_device_table_ptr; | ||
| 191 | |||
| 192 | memcpy(&pwr_sensor_table_entry.class_param0, | ||
| 193 | (curr_pwr_device_table_ptr + 1), | ||
| 194 | (VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15 - 1U)); | ||
| 195 | |||
| 196 | device_type = (u8)BIOS_GET_FIELD( | ||
| 197 | pwr_sensor_table_entry.flags0, | ||
| 198 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS); | ||
| 199 | |||
| 200 | if (device_type == NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_I2C) { | ||
| 201 | i2c_dev_idx = (u8)BIOS_GET_FIELD( | ||
| 202 | pwr_sensor_table_entry.class_param0, | ||
| 203 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_INDEX); | ||
| 204 | use_fxp8_8 = (u8)BIOS_GET_FIELD( | ||
| 205 | pwr_sensor_table_entry.class_param0, | ||
| 206 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_USE_FXP8_8); | ||
| 207 | |||
| 208 | pwr_device_data.ina3221.super.i2c_dev_idx = i2c_dev_idx; | ||
| 209 | pwr_device_data.ina3221.r_shuntm_ohm[0].use_fxp8_8 = use_fxp8_8; | ||
| 210 | pwr_device_data.ina3221.r_shuntm_ohm[1].use_fxp8_8 = use_fxp8_8; | ||
| 211 | pwr_device_data.ina3221.r_shuntm_ohm[2].use_fxp8_8 = use_fxp8_8; | ||
| 212 | pwr_device_data.ina3221.r_shuntm_ohm[0].rshunt_value = | ||
| 213 | (u16)BIOS_GET_FIELD( | ||
| 214 | pwr_sensor_table_entry.sensor_param0, | ||
| 215 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT0_MOHM); | ||
| 216 | |||
| 217 | pwr_device_data.ina3221.r_shuntm_ohm[1].rshunt_value = | ||
| 218 | (u16)BIOS_GET_FIELD( | ||
| 219 | pwr_sensor_table_entry.sensor_param0, | ||
| 220 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT1_MOHM); | ||
| 221 | |||
| 222 | pwr_device_data.ina3221.r_shuntm_ohm[2].rshunt_value = | ||
| 223 | (u16)BIOS_GET_FIELD( | ||
| 224 | pwr_sensor_table_entry.sensor_param1, | ||
| 225 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_RSHUNT2_MOHM); | ||
| 226 | pwr_device_data.ina3221.configuration = | ||
| 227 | (u16)BIOS_GET_FIELD( | ||
| 228 | pwr_sensor_table_entry.sensor_param1, | ||
| 229 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_CONFIGURATION); | ||
| 230 | |||
| 231 | pwr_device_data.ina3221.mask_enable = | ||
| 232 | (u16)BIOS_GET_FIELD( | ||
| 233 | pwr_sensor_table_entry.sensor_param2, | ||
| 234 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_MASKENABLE); | ||
| 235 | |||
| 236 | pwr_device_data.ina3221.gpio_function = | ||
| 237 | (u8)BIOS_GET_FIELD( | ||
| 238 | pwr_sensor_table_entry.sensor_param2, | ||
| 239 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_GPIOFUNCTION); | ||
| 240 | |||
| 241 | pwr_device_data.ina3221.curr_correct_m = | ||
| 242 | (u16)BIOS_GET_FIELD( | ||
| 243 | pwr_sensor_table_entry.sensor_param3, | ||
| 244 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_M); | ||
| 245 | |||
| 246 | pwr_device_data.ina3221.curr_correct_b = | ||
| 247 | (u16)BIOS_GET_FIELD( | ||
| 248 | pwr_sensor_table_entry.sensor_param3, | ||
| 249 | NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_B); | ||
| 250 | |||
| 251 | if (!pwr_device_data.ina3221.curr_correct_m) { | ||
| 252 | pwr_device_data.ina3221.curr_correct_m = (1 << 12); | ||
| 253 | } | ||
| 254 | pwr_device_size = sizeof(struct pwr_device_ina3221); | ||
| 255 | } else { | ||
| 256 | continue; | ||
| 257 | } | ||
| 258 | |||
| 259 | pwr_device_data.boardobj.type = CTRL_PMGR_PWR_DEVICE_TYPE_INA3221; | ||
| 260 | pwr_device_data.pwrdev.power_rail = (u8)0; | ||
| 261 | |||
| 262 | boardobj = construct_pwr_device(g, &pwr_device_data, | ||
| 263 | pwr_device_size, pwr_device_data.boardobj.type); | ||
| 264 | |||
| 265 | if (!boardobj) { | ||
| 266 | nvgpu_err(g, | ||
| 267 | "unable to create pwr device for %d type %d", index, pwr_device_data.boardobj.type); | ||
| 268 | status = -EINVAL; | ||
| 269 | goto done; | ||
| 270 | } | ||
| 271 | |||
| 272 | status = boardobjgrp_objinsert(&ppwrdeviceobjs->super.super, | ||
| 273 | boardobj, obj_index); | ||
| 274 | |||
| 275 | if (status) { | ||
| 276 | nvgpu_err(g, | ||
| 277 | "unable to insert pwr device boardobj for %d", index); | ||
| 278 | status = -EINVAL; | ||
| 279 | goto done; | ||
| 280 | } | ||
| 281 | |||
| 282 | ++obj_index; | ||
| 283 | } | ||
| 284 | |||
| 285 | done: | ||
| 286 | nvgpu_log_info(g, " done status %x", status); | ||
| 287 | return status; | ||
| 288 | } | ||
| 289 | |||
| 290 | int pmgr_device_sw_setup(struct gk20a *g) | ||
| 291 | { | ||
| 292 | int status; | ||
| 293 | struct boardobjgrp *pboardobjgrp = NULL; | ||
| 294 | struct pwr_devices *ppwrdeviceobjs; | ||
| 295 | |||
| 296 | /* Construct the Super Class and override the Interfaces */ | ||
| 297 | status = boardobjgrpconstruct_e32(g, &g->pmgr_pmu.pmgr_deviceobjs.super); | ||
| 298 | if (status) { | ||
| 299 | nvgpu_err(g, | ||
| 300 | "error creating boardobjgrp for pmgr devices, status - 0x%x", | ||
| 301 | status); | ||
| 302 | goto done; | ||
| 303 | } | ||
| 304 | |||
| 305 | pboardobjgrp = &g->pmgr_pmu.pmgr_deviceobjs.super.super; | ||
| 306 | ppwrdeviceobjs = &(g->pmgr_pmu.pmgr_deviceobjs); | ||
| 307 | |||
| 308 | /* Override the Interfaces */ | ||
| 309 | pboardobjgrp->pmudatainstget = _pwr_device_pmudata_instget; | ||
| 310 | |||
| 311 | status = devinit_get_pwr_device_table(g, ppwrdeviceobjs); | ||
| 312 | if (status) { | ||
| 313 | goto done; | ||
| 314 | } | ||
| 315 | |||
| 316 | done: | ||
| 317 | nvgpu_log_info(g, " done status %x", status); | ||
| 318 | return status; | ||
| 319 | } | ||
diff --git a/include/pmgr/pwrdev.h b/include/pmgr/pwrdev.h new file mode 100644 index 0000000..4bcf65a --- /dev/null +++ b/include/pmgr/pwrdev.h | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | /* | ||
| 2 | * general power device structures & definitions | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 22 | * DEALINGS IN THE SOFTWARE. | ||
| 23 | */ | ||
| 24 | #ifndef NVGPU_PMGR_PWRDEV_H | ||
| 25 | #define NVGPU_PMGR_PWRDEV_H | ||
| 26 | |||
| 27 | #include "boardobj/boardobj.h" | ||
| 28 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> | ||
| 29 | #include "ctrl/ctrlpmgr.h" | ||
| 30 | |||
| 31 | #define PWRDEV_I2CDEV_DEVICE_INDEX_NONE (0xFF) | ||
| 32 | |||
| 33 | #define PWR_DEVICE_PROV_NUM_DEFAULT 1 | ||
| 34 | |||
| 35 | struct pwr_device { | ||
| 36 | struct boardobj super; | ||
| 37 | u8 power_rail; | ||
| 38 | u8 i2c_dev_idx; | ||
| 39 | bool bIs_inforom_config; | ||
| 40 | u32 power_corr_factor; | ||
| 41 | }; | ||
| 42 | |||
| 43 | struct pwr_devices { | ||
| 44 | struct boardobjgrp_e32 super; | ||
| 45 | }; | ||
| 46 | |||
| 47 | struct pwr_device_ina3221 { | ||
| 48 | struct pwr_device super; | ||
| 49 | struct ctrl_pmgr_pwr_device_info_rshunt | ||
| 50 | r_shuntm_ohm[NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM]; | ||
| 51 | u16 configuration; | ||
| 52 | u16 mask_enable; | ||
| 53 | u8 gpio_function; | ||
| 54 | u16 curr_correct_m; | ||
| 55 | s16 curr_correct_b; | ||
| 56 | } ; | ||
| 57 | |||
| 58 | int pmgr_device_sw_setup(struct gk20a *g); | ||
| 59 | |||
| 60 | #endif /* NVGPU_PMGR_PWRDEV_H */ | ||
diff --git a/include/pmgr/pwrmonitor.c b/include/pmgr/pwrmonitor.c new file mode 100644 index 0000000..710ae85 --- /dev/null +++ b/include/pmgr/pwrmonitor.c | |||
| @@ -0,0 +1,376 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <nvgpu/bios.h> | ||
| 24 | #include <nvgpu/gk20a.h> | ||
| 25 | |||
| 26 | #include "pwrdev.h" | ||
| 27 | #include "boardobj/boardobjgrp.h" | ||
| 28 | #include "boardobj/boardobjgrp_e32.h" | ||
| 29 | #include "gp106/bios_gp106.h" | ||
| 30 | |||
| 31 | static int _pwr_channel_pmudata_instget(struct gk20a *g, | ||
| 32 | struct nv_pmu_boardobjgrp *pmuboardobjgrp, | ||
| 33 | struct nv_pmu_boardobj **ppboardobjpmudata, | ||
| 34 | u8 idx) | ||
| 35 | { | ||
| 36 | struct nv_pmu_pmgr_pwr_channel_desc *ppmgrchannel = | ||
| 37 | (struct nv_pmu_pmgr_pwr_channel_desc *)pmuboardobjgrp; | ||
| 38 | |||
| 39 | nvgpu_log_info(g, " "); | ||
| 40 | |||
| 41 | /*check whether pmuboardobjgrp has a valid boardobj in index*/ | ||
| 42 | if (((u32)BIT(idx) & | ||
| 43 | ppmgrchannel->hdr.data.super.obj_mask.super.data[0]) == 0U) { | ||
| 44 | return -EINVAL; | ||
| 45 | } | ||
| 46 | |||
| 47 | *ppboardobjpmudata = (struct nv_pmu_boardobj *) | ||
| 48 | &ppmgrchannel->channels[idx].data.board_obj; | ||
| 49 | |||
| 50 | /* handle Global/common data here as we need index */ | ||
| 51 | ppmgrchannel->channels[idx].data.pwr_channel.ch_idx = idx; | ||
| 52 | |||
| 53 | nvgpu_log_info(g, " Done"); | ||
| 54 | |||
| 55 | return 0; | ||
| 56 | } | ||
| 57 | |||
| 58 | static int _pwr_channel_rels_pmudata_instget(struct gk20a *g, | ||
| 59 | struct nv_pmu_boardobjgrp *pmuboardobjgrp, | ||
| 60 | struct nv_pmu_boardobj **ppboardobjpmudata, | ||
| 61 | u8 idx) | ||
| 62 | { | ||
| 63 | struct nv_pmu_pmgr_pwr_chrelationship_desc *ppmgrchrels = | ||
| 64 | (struct nv_pmu_pmgr_pwr_chrelationship_desc *)pmuboardobjgrp; | ||
| 65 | |||
| 66 | nvgpu_log_info(g, " "); | ||
| 67 | |||
| 68 | /*check whether pmuboardobjgrp has a valid boardobj in index*/ | ||
| 69 | if (((u32)BIT(idx) & | ||
| 70 | ppmgrchrels->hdr.data.super.obj_mask.super.data[0]) == 0U) { | ||
| 71 | return -EINVAL; | ||
| 72 | } | ||
| 73 | |||
| 74 | *ppboardobjpmudata = (struct nv_pmu_boardobj *) | ||
| 75 | &ppmgrchrels->ch_rels[idx].data.board_obj; | ||
| 76 | |||
| 77 | nvgpu_log_info(g, " Done"); | ||
| 78 | |||
| 79 | return 0; | ||
| 80 | } | ||
| 81 | |||
| 82 | static u32 _pwr_channel_state_init(struct gk20a *g) | ||
| 83 | { | ||
| 84 | u8 indx = 0; | ||
| 85 | struct pwr_channel *pchannel; | ||
| 86 | u32 objmask = | ||
| 87 | g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask; | ||
| 88 | |||
| 89 | /* Initialize each PWR_CHANNEL's dependent channel mask */ | ||
| 90 | BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx, objmask) { | ||
| 91 | pchannel = PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, indx); | ||
| 92 | if (pchannel == NULL) { | ||
| 93 | nvgpu_err(g, | ||
| 94 | "PMGR_PWR_MONITOR_GET_PWR_CHANNEL-failed %d", indx); | ||
| 95 | return -EINVAL; | ||
| 96 | } | ||
| 97 | pchannel->dependent_ch_mask =0; | ||
| 98 | } | ||
| 99 | BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END | ||
| 100 | |||
| 101 | return 0; | ||
| 102 | } | ||
| 103 | |||
| 104 | static bool _pwr_channel_implements(struct pwr_channel *pchannel, | ||
| 105 | u8 type) | ||
| 106 | { | ||
| 107 | return (type == BOARDOBJ_GET_TYPE(pchannel)); | ||
| 108 | } | ||
| 109 | |||
| 110 | static int _pwr_domains_pmudatainit_sensor(struct gk20a *g, | ||
| 111 | struct boardobj *board_obj_ptr, | ||
| 112 | struct nv_pmu_boardobj *ppmudata) | ||
| 113 | { | ||
| 114 | struct nv_pmu_pmgr_pwr_channel_sensor *pmu_sensor_data; | ||
| 115 | struct pwr_channel_sensor *sensor; | ||
| 116 | int status = 0; | ||
| 117 | |||
| 118 | status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); | ||
| 119 | if (status) { | ||
| 120 | nvgpu_err(g, | ||
| 121 | "error updating pmu boardobjgrp for pwr sensor 0x%x", | ||
| 122 | status); | ||
| 123 | goto done; | ||
| 124 | } | ||
| 125 | |||
| 126 | sensor = (struct pwr_channel_sensor *)board_obj_ptr; | ||
| 127 | pmu_sensor_data = (struct nv_pmu_pmgr_pwr_channel_sensor *) ppmudata; | ||
| 128 | |||
| 129 | pmu_sensor_data->super.pwr_rail = sensor->super.pwr_rail; | ||
| 130 | pmu_sensor_data->super.volt_fixedu_v = sensor->super.volt_fixed_uv; | ||
| 131 | pmu_sensor_data->super.pwr_corr_slope = sensor->super.pwr_corr_slope; | ||
| 132 | pmu_sensor_data->super.pwr_corr_offsetm_w = sensor->super.pwr_corr_offset_mw; | ||
| 133 | pmu_sensor_data->super.curr_corr_slope = sensor->super.curr_corr_slope; | ||
| 134 | pmu_sensor_data->super.curr_corr_offsetm_a = sensor->super.curr_corr_offset_ma; | ||
| 135 | pmu_sensor_data->super.dependent_ch_mask = sensor->super.dependent_ch_mask; | ||
| 136 | pmu_sensor_data->super.ch_idx = 0; | ||
| 137 | |||
| 138 | pmu_sensor_data->pwr_dev_idx = sensor->pwr_dev_idx; | ||
| 139 | pmu_sensor_data->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx; | ||
| 140 | |||
| 141 | done: | ||
| 142 | return status; | ||
| 143 | } | ||
| 144 | |||
| 145 | static struct boardobj *construct_pwr_topology(struct gk20a *g, | ||
| 146 | void *pargs, u16 pargs_size, u8 type) | ||
| 147 | { | ||
| 148 | struct boardobj *board_obj_ptr = NULL; | ||
| 149 | int status; | ||
| 150 | struct pwr_channel_sensor *pwrchannel; | ||
| 151 | struct pwr_channel_sensor *sensor = (struct pwr_channel_sensor*)pargs; | ||
| 152 | |||
| 153 | status = boardobj_construct_super(g, &board_obj_ptr, | ||
| 154 | pargs_size, pargs); | ||
| 155 | if (status) { | ||
| 156 | return NULL; | ||
| 157 | } | ||
| 158 | |||
| 159 | pwrchannel = (struct pwr_channel_sensor*)board_obj_ptr; | ||
| 160 | |||
| 161 | /* Set Super class interfaces */ | ||
| 162 | board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_sensor; | ||
| 163 | |||
| 164 | pwrchannel->super.pwr_rail = sensor->super.pwr_rail; | ||
| 165 | pwrchannel->super.volt_fixed_uv = sensor->super.volt_fixed_uv; | ||
| 166 | pwrchannel->super.pwr_corr_slope = sensor->super.pwr_corr_slope; | ||
| 167 | pwrchannel->super.pwr_corr_offset_mw = sensor->super.pwr_corr_offset_mw; | ||
| 168 | pwrchannel->super.curr_corr_slope = sensor->super.curr_corr_slope; | ||
| 169 | pwrchannel->super.curr_corr_offset_ma = sensor->super.curr_corr_offset_ma; | ||
| 170 | pwrchannel->super.dependent_ch_mask = 0; | ||
| 171 | |||
| 172 | pwrchannel->pwr_dev_idx = sensor->pwr_dev_idx; | ||
| 173 | pwrchannel->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx; | ||
| 174 | |||
| 175 | nvgpu_log_info(g, " Done"); | ||
| 176 | |||
| 177 | return board_obj_ptr; | ||
| 178 | } | ||
| 179 | |||
| 180 | static int devinit_get_pwr_topology_table(struct gk20a *g, | ||
| 181 | struct pmgr_pwr_monitor *ppwrmonitorobjs) | ||
| 182 | { | ||
| 183 | int status = 0; | ||
| 184 | u8 *pwr_topology_table_ptr = NULL; | ||
| 185 | u8 *curr_pwr_topology_table_ptr = NULL; | ||
| 186 | struct boardobj *boardobj; | ||
| 187 | struct pwr_topology_2x_header pwr_topology_table_header = { 0 }; | ||
| 188 | struct pwr_topology_2x_entry pwr_topology_table_entry = { 0 }; | ||
| 189 | u32 index; | ||
| 190 | u32 obj_index = 0; | ||
| 191 | u16 pwr_topology_size; | ||
| 192 | union { | ||
| 193 | struct boardobj boardobj; | ||
| 194 | struct pwr_channel pwrchannel; | ||
| 195 | struct pwr_channel_sensor sensor; | ||
| 196 | } pwr_topology_data; | ||
| 197 | |||
| 198 | nvgpu_log_info(g, " "); | ||
| 199 | |||
| 200 | pwr_topology_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, | ||
| 201 | g->bios.perf_token, POWER_TOPOLOGY_TABLE); | ||
| 202 | if (pwr_topology_table_ptr == NULL) { | ||
| 203 | status = -EINVAL; | ||
| 204 | goto done; | ||
| 205 | } | ||
| 206 | |||
| 207 | memcpy(&pwr_topology_table_header, pwr_topology_table_ptr, | ||
| 208 | VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06); | ||
| 209 | |||
| 210 | if (pwr_topology_table_header.version != | ||
| 211 | VBIOS_POWER_TOPOLOGY_VERSION_2X) { | ||
| 212 | status = -EINVAL; | ||
| 213 | goto done; | ||
| 214 | } | ||
| 215 | |||
| 216 | g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = false; | ||
| 217 | |||
| 218 | if (pwr_topology_table_header.header_size < | ||
| 219 | VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06) { | ||
| 220 | status = -EINVAL; | ||
| 221 | goto done; | ||
| 222 | } | ||
| 223 | |||
| 224 | if (pwr_topology_table_header.table_entry_size != | ||
| 225 | VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16) { | ||
| 226 | status = -EINVAL; | ||
| 227 | goto done; | ||
| 228 | } | ||
| 229 | |||
| 230 | curr_pwr_topology_table_ptr = (pwr_topology_table_ptr + | ||
| 231 | VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06); | ||
| 232 | |||
| 233 | for (index = 0; index < pwr_topology_table_header.num_table_entries; | ||
| 234 | index++) { | ||
| 235 | u8 class_type; | ||
| 236 | |||
| 237 | curr_pwr_topology_table_ptr += (pwr_topology_table_header.table_entry_size * index); | ||
| 238 | |||
| 239 | pwr_topology_table_entry.flags0 = *curr_pwr_topology_table_ptr; | ||
| 240 | pwr_topology_table_entry.pwr_rail = *(curr_pwr_topology_table_ptr + 1); | ||
| 241 | |||
| 242 | memcpy(&pwr_topology_table_entry.param0, | ||
| 243 | (curr_pwr_topology_table_ptr + 2), | ||
| 244 | (VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16 - 2U)); | ||
| 245 | |||
| 246 | class_type = (u8)BIOS_GET_FIELD( | ||
| 247 | pwr_topology_table_entry.flags0, | ||
| 248 | NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS); | ||
| 249 | |||
| 250 | if (class_type == NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_SENSOR) { | ||
| 251 | pwr_topology_data.sensor.pwr_dev_idx = (u8)BIOS_GET_FIELD( | ||
| 252 | pwr_topology_table_entry.param1, | ||
| 253 | NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_INDEX); | ||
| 254 | pwr_topology_data.sensor.pwr_dev_prov_idx = (u8)BIOS_GET_FIELD( | ||
| 255 | pwr_topology_table_entry.param1, | ||
| 256 | NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_PROVIDER_INDEX); | ||
| 257 | |||
| 258 | pwr_topology_size = sizeof(struct pwr_channel_sensor); | ||
| 259 | } else { | ||
| 260 | continue; | ||
| 261 | } | ||
| 262 | |||
| 263 | /* Initialize data for the parent class */ | ||
| 264 | pwr_topology_data.boardobj.type = CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR; | ||
| 265 | pwr_topology_data.pwrchannel.pwr_rail = (u8)pwr_topology_table_entry.pwr_rail; | ||
| 266 | pwr_topology_data.pwrchannel.volt_fixed_uv = pwr_topology_table_entry.param0; | ||
| 267 | pwr_topology_data.pwrchannel.pwr_corr_slope = (1 << 12); | ||
| 268 | pwr_topology_data.pwrchannel.pwr_corr_offset_mw = 0; | ||
| 269 | pwr_topology_data.pwrchannel.curr_corr_slope = | ||
| 270 | (u32)pwr_topology_table_entry.curr_corr_slope; | ||
| 271 | pwr_topology_data.pwrchannel.curr_corr_offset_ma = | ||
| 272 | (s32)pwr_topology_table_entry.curr_corr_offset; | ||
| 273 | |||
| 274 | boardobj = construct_pwr_topology(g, &pwr_topology_data, | ||
| 275 | pwr_topology_size, pwr_topology_data.boardobj.type); | ||
| 276 | |||
| 277 | if (!boardobj) { | ||
| 278 | nvgpu_err(g, | ||
| 279 | "unable to create pwr topology for %d type %d", | ||
| 280 | index, pwr_topology_data.boardobj.type); | ||
| 281 | status = -EINVAL; | ||
| 282 | goto done; | ||
| 283 | } | ||
| 284 | |||
| 285 | status = boardobjgrp_objinsert(&ppwrmonitorobjs->pwr_channels.super, | ||
| 286 | boardobj, obj_index); | ||
| 287 | |||
| 288 | if (status) { | ||
| 289 | nvgpu_err(g, | ||
| 290 | "unable to insert pwr topology boardobj for %d", index); | ||
| 291 | status = -EINVAL; | ||
| 292 | goto done; | ||
| 293 | } | ||
| 294 | |||
| 295 | ++obj_index; | ||
| 296 | } | ||
| 297 | |||
| 298 | done: | ||
| 299 | nvgpu_log_info(g, " done status %x", status); | ||
| 300 | return status; | ||
| 301 | } | ||
| 302 | |||
| 303 | int pmgr_monitor_sw_setup(struct gk20a *g) | ||
| 304 | { | ||
| 305 | int status; | ||
| 306 | struct boardobjgrp *pboardobjgrp = NULL; | ||
| 307 | struct pwr_channel *pchannel; | ||
| 308 | struct pmgr_pwr_monitor *ppwrmonitorobjs; | ||
| 309 | u8 indx = 0; | ||
| 310 | |||
| 311 | /* Construct the Super Class and override the Interfaces */ | ||
| 312 | status = boardobjgrpconstruct_e32(g, | ||
| 313 | &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels); | ||
| 314 | if (status) { | ||
| 315 | nvgpu_err(g, | ||
| 316 | "error creating boardobjgrp for pmgr channel, status - 0x%x", | ||
| 317 | status); | ||
| 318 | goto done; | ||
| 319 | } | ||
| 320 | |||
| 321 | pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super); | ||
| 322 | |||
| 323 | /* Override the Interfaces */ | ||
| 324 | pboardobjgrp->pmudatainstget = _pwr_channel_pmudata_instget; | ||
| 325 | |||
| 326 | /* Construct the Super Class and override the Interfaces */ | ||
| 327 | status = boardobjgrpconstruct_e32(g, | ||
| 328 | &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels); | ||
| 329 | if (status) { | ||
| 330 | nvgpu_err(g, | ||
| 331 | "error creating boardobjgrp for pmgr channel relationship, status - 0x%x", | ||
| 332 | status); | ||
| 333 | goto done; | ||
| 334 | } | ||
| 335 | |||
| 336 | pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super); | ||
| 337 | |||
| 338 | /* Override the Interfaces */ | ||
| 339 | pboardobjgrp->pmudatainstget = _pwr_channel_rels_pmudata_instget; | ||
| 340 | |||
| 341 | /* Initialize the Total GPU Power Channel Mask to 0 */ | ||
| 342 | g->pmgr_pmu.pmgr_monitorobjs.pmu_data.channels.hdr.data.total_gpu_power_channel_mask = 0; | ||
| 343 | g->pmgr_pmu.pmgr_monitorobjs.total_gpu_channel_idx = | ||
| 344 | CTRL_PMGR_PWR_CHANNEL_INDEX_INVALID; | ||
| 345 | |||
| 346 | /* Supported topology table version 1.0 */ | ||
| 347 | g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = true; | ||
| 348 | |||
| 349 | ppwrmonitorobjs = &(g->pmgr_pmu.pmgr_monitorobjs); | ||
| 350 | |||
| 351 | status = devinit_get_pwr_topology_table(g, ppwrmonitorobjs); | ||
| 352 | if (status) { | ||
| 353 | goto done; | ||
| 354 | } | ||
| 355 | |||
| 356 | status = _pwr_channel_state_init(g); | ||
| 357 | if (status) { | ||
| 358 | goto done; | ||
| 359 | } | ||
| 360 | |||
| 361 | /* Initialise physicalChannelMask */ | ||
| 362 | g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask = 0; | ||
| 363 | |||
| 364 | pboardobjgrp = &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super; | ||
| 365 | |||
| 366 | BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_channel *, pchannel, indx) { | ||
| 367 | if (_pwr_channel_implements(pchannel, | ||
| 368 | CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR)) { | ||
| 369 | g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask |= BIT(indx); | ||
| 370 | } | ||
| 371 | } | ||
| 372 | |||
| 373 | done: | ||
| 374 | nvgpu_log_info(g, " done status %x", status); | ||
| 375 | return status; | ||
| 376 | } | ||
diff --git a/include/pmgr/pwrmonitor.h b/include/pmgr/pwrmonitor.h new file mode 100644 index 0000000..bf4c76f --- /dev/null +++ b/include/pmgr/pwrmonitor.h | |||
| @@ -0,0 +1,69 @@ | |||
| 1 | /* | ||
| 2 | * general power channel structures & definitions | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 22 | * DEALINGS IN THE SOFTWARE. | ||
| 23 | */ | ||
| 24 | #ifndef NVGPU_PMGR_PWRMONITOR_H | ||
| 25 | #define NVGPU_PMGR_PWRMONITOR_H | ||
| 26 | |||
| 27 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> | ||
| 28 | #include "boardobj/boardobjgrp.h" | ||
| 29 | #include "boardobj/boardobj.h" | ||
| 30 | #include "ctrl/ctrlpmgr.h" | ||
| 31 | |||
| 32 | struct pwr_channel { | ||
| 33 | struct boardobj super; | ||
| 34 | u8 pwr_rail; | ||
| 35 | u32 volt_fixed_uv; | ||
| 36 | u32 pwr_corr_slope; | ||
| 37 | s32 pwr_corr_offset_mw; | ||
| 38 | u32 curr_corr_slope; | ||
| 39 | s32 curr_corr_offset_ma; | ||
| 40 | u32 dependent_ch_mask; | ||
| 41 | }; | ||
| 42 | |||
| 43 | struct pwr_chrelationship { | ||
| 44 | struct boardobj super; | ||
| 45 | u8 chIdx; | ||
| 46 | }; | ||
| 47 | |||
| 48 | struct pwr_channel_sensor { | ||
| 49 | struct pwr_channel super; | ||
| 50 | u8 pwr_dev_idx; | ||
| 51 | u8 pwr_dev_prov_idx; | ||
| 52 | }; | ||
| 53 | |||
| 54 | struct pmgr_pwr_monitor { | ||
| 55 | bool b_is_topology_tbl_ver_1x; | ||
| 56 | struct boardobjgrp_e32 pwr_channels; | ||
| 57 | struct boardobjgrp_e32 pwr_ch_rels; | ||
| 58 | u8 total_gpu_channel_idx; | ||
| 59 | u32 physical_channel_mask; | ||
| 60 | struct nv_pmu_pmgr_pwr_monitor_pack pmu_data; | ||
| 61 | }; | ||
| 62 | |||
| 63 | #define PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, channel_idx) \ | ||
| 64 | ((struct pwr_channel *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ | ||
| 65 | &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super), (channel_idx))) | ||
| 66 | |||
| 67 | int pmgr_monitor_sw_setup(struct gk20a *g); | ||
| 68 | |||
| 69 | #endif /* NVGPU_PMGR_PWRMONITOR_H */ | ||
diff --git a/include/pmgr/pwrpolicy.c b/include/pmgr/pwrpolicy.c new file mode 100644 index 0000000..3bf6f32 --- /dev/null +++ b/include/pmgr/pwrpolicy.c | |||
| @@ -0,0 +1,782 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 20 | * DEALINGS IN THE SOFTWARE. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #include <nvgpu/bios.h> | ||
| 24 | #include <nvgpu/bug.h> | ||
| 25 | #include <nvgpu/gk20a.h> | ||
| 26 | |||
| 27 | #include "pwrpolicy.h" | ||
| 28 | #include "boardobj/boardobjgrp.h" | ||
| 29 | #include "boardobj/boardobjgrp_e32.h" | ||
| 30 | #include "gp106/bios_gp106.h" | ||
| 31 | |||
| 32 | #define _pwr_policy_limitarboutputget_helper(p_limit_arb) (p_limit_arb)->output | ||
| 33 | #define _pwr_policy_limitdeltaapply(limit, delta) ((u32)max(((s32)limit) + (delta), 0)) | ||
| 34 | |||
| 35 | static u32 _pwr_policy_limitarbinputset_helper(struct gk20a *g, | ||
| 36 | struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb, | ||
| 37 | u8 client_idx, | ||
| 38 | u32 limit_value) | ||
| 39 | { | ||
| 40 | u8 indx; | ||
| 41 | bool b_found = false; | ||
| 42 | u32 status = 0; | ||
| 43 | u32 output = limit_value; | ||
| 44 | |||
| 45 | for (indx = 0; indx< p_limit_arb->num_inputs; indx++) { | ||
| 46 | if (p_limit_arb->inputs[indx].pwr_policy_idx == client_idx) { | ||
| 47 | p_limit_arb->inputs[indx].limit_value = limit_value; | ||
| 48 | b_found = true; | ||
| 49 | } else if (p_limit_arb->b_arb_max) { | ||
| 50 | output = max(output, p_limit_arb->inputs[indx].limit_value); | ||
| 51 | } else { | ||
| 52 | output = min(output, p_limit_arb->inputs[indx].limit_value); | ||
| 53 | } | ||
| 54 | } | ||
| 55 | |||
| 56 | if (!b_found) { | ||
| 57 | if (p_limit_arb->num_inputs < | ||
| 58 | CTRL_PMGR_PWR_POLICY_MAX_LIMIT_INPUTS) { | ||
| 59 | p_limit_arb->inputs[ | ||
| 60 | p_limit_arb->num_inputs].pwr_policy_idx = client_idx; | ||
| 61 | p_limit_arb->inputs[ | ||
| 62 | p_limit_arb->num_inputs].limit_value = limit_value; | ||
| 63 | p_limit_arb->num_inputs++; | ||
| 64 | } else { | ||
| 65 | nvgpu_err(g, "No entries remaining for clientIdx=%d", | ||
| 66 | client_idx); | ||
| 67 | status = -EINVAL; | ||
| 68 | } | ||
| 69 | } | ||
| 70 | |||
| 71 | if (!status) { | ||
| 72 | p_limit_arb->output = output; | ||
| 73 | } | ||
| 74 | |||
| 75 | return status; | ||
| 76 | } | ||
| 77 | |||
| 78 | static u32 _pwr_policy_limitid_translate(struct gk20a *g, | ||
| 79 | struct pwr_policy *ppolicy, | ||
| 80 | enum pwr_policy_limit_id limit_id, | ||
| 81 | struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb, | ||
| 82 | struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb_sec) | ||
| 83 | { | ||
| 84 | u32 status = 0; | ||
| 85 | |||
| 86 | switch (limit_id) { | ||
| 87 | case PWR_POLICY_LIMIT_ID_MIN: | ||
| 88 | *p_limit_arb = &ppolicy->limit_arb_min; | ||
| 89 | break; | ||
| 90 | |||
| 91 | case PWR_POLICY_LIMIT_ID_RATED: | ||
| 92 | *p_limit_arb = &ppolicy->limit_arb_rated; | ||
| 93 | |||
| 94 | if (p_limit_arb_sec != NULL) { | ||
| 95 | *p_limit_arb_sec = &ppolicy->limit_arb_curr; | ||
| 96 | } | ||
| 97 | break; | ||
| 98 | |||
| 99 | case PWR_POLICY_LIMIT_ID_MAX: | ||
| 100 | *p_limit_arb = &ppolicy->limit_arb_max; | ||
| 101 | break; | ||
| 102 | |||
| 103 | case PWR_POLICY_LIMIT_ID_CURR: | ||
| 104 | *p_limit_arb = &ppolicy->limit_arb_curr; | ||
| 105 | break; | ||
| 106 | |||
| 107 | case PWR_POLICY_LIMIT_ID_BATT: | ||
| 108 | *p_limit_arb = &ppolicy->limit_arb_batt; | ||
| 109 | break; | ||
| 110 | |||
| 111 | default: | ||
| 112 | nvgpu_err(g, "Unsupported limitId=%d", | ||
| 113 | limit_id); | ||
| 114 | status = -EINVAL; | ||
| 115 | break; | ||
| 116 | } | ||
| 117 | |||
| 118 | return status; | ||
| 119 | } | ||
| 120 | |||
| 121 | static u32 _pwr_policy_limitarbinputset(struct gk20a *g, | ||
| 122 | struct pwr_policy *ppolicy, | ||
| 123 | enum pwr_policy_limit_id limit_id, | ||
| 124 | u8 client_idx, | ||
| 125 | u32 limit) | ||
| 126 | { | ||
| 127 | u32 status = 0; | ||
| 128 | struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL; | ||
| 129 | struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb_sec = NULL; | ||
| 130 | |||
| 131 | status = _pwr_policy_limitid_translate(g, | ||
| 132 | ppolicy, | ||
| 133 | limit_id, | ||
| 134 | &p_limit_arb, | ||
| 135 | &p_limit_arb_sec); | ||
| 136 | if (status) { | ||
| 137 | goto exit; | ||
| 138 | } | ||
| 139 | |||
| 140 | status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb, client_idx, limit); | ||
| 141 | if (status) { | ||
| 142 | nvgpu_err(g, | ||
| 143 | "Error setting client limit value: status=0x%08x, limitId=0x%x, clientIdx=0x%x, limit=%d", | ||
| 144 | status, limit_id, client_idx, limit); | ||
| 145 | goto exit; | ||
| 146 | } | ||
| 147 | |||
| 148 | if (NULL != p_limit_arb_sec) { | ||
| 149 | status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb_sec, | ||
| 150 | CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, | ||
| 151 | _pwr_policy_limitarboutputget_helper(p_limit_arb)); | ||
| 152 | } | ||
| 153 | |||
| 154 | exit: | ||
| 155 | return status; | ||
| 156 | } | ||
| 157 | |||
| 158 | static inline void _pwr_policy_limitarbconstruct( | ||
| 159 | struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb, | ||
| 160 | bool b_arb_max) | ||
| 161 | { | ||
| 162 | p_limit_arb->num_inputs = 0; | ||
| 163 | p_limit_arb->b_arb_max = b_arb_max; | ||
| 164 | } | ||
| 165 | |||
| 166 | static u32 _pwr_policy_limitarboutputget(struct gk20a *g, | ||
| 167 | struct pwr_policy *ppolicy, | ||
| 168 | enum pwr_policy_limit_id limit_id) | ||
| 169 | { | ||
| 170 | u32 status = 0; | ||
| 171 | struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL; | ||
| 172 | |||
| 173 | status = _pwr_policy_limitid_translate(g, | ||
| 174 | ppolicy, | ||
| 175 | limit_id, | ||
| 176 | &p_limit_arb, | ||
| 177 | NULL); | ||
| 178 | if (status) { | ||
| 179 | return 0; | ||
| 180 | } | ||
| 181 | |||
| 182 | return _pwr_policy_limitarboutputget_helper(p_limit_arb); | ||
| 183 | } | ||
| 184 | |||
| 185 | static int _pwr_domains_pmudatainit_hw_threshold(struct gk20a *g, | ||
| 186 | struct boardobj *board_obj_ptr, | ||
| 187 | struct nv_pmu_boardobj *ppmudata) | ||
| 188 | { | ||
| 189 | struct nv_pmu_pmgr_pwr_policy_hw_threshold *pmu_hw_threshold_data; | ||
| 190 | struct pwr_policy_hw_threshold *p_hw_threshold; | ||
| 191 | struct pwr_policy *p_pwr_policy; | ||
| 192 | struct nv_pmu_pmgr_pwr_policy *pmu_pwr_policy; | ||
| 193 | int status = 0; | ||
| 194 | |||
| 195 | status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata); | ||
| 196 | if (status) { | ||
| 197 | nvgpu_err(g, | ||
| 198 | "error updating pmu boardobjgrp for pwr sensor 0x%x", | ||
| 199 | status); | ||
| 200 | status = -ENOMEM; | ||
| 201 | goto done; | ||
| 202 | } | ||
| 203 | |||
| 204 | p_hw_threshold = (struct pwr_policy_hw_threshold *)board_obj_ptr; | ||
| 205 | pmu_hw_threshold_data = (struct nv_pmu_pmgr_pwr_policy_hw_threshold *) ppmudata; | ||
| 206 | pmu_pwr_policy = (struct nv_pmu_pmgr_pwr_policy *) ppmudata; | ||
| 207 | p_pwr_policy = (struct pwr_policy *)&(p_hw_threshold->super.super); | ||
| 208 | |||
| 209 | pmu_pwr_policy->ch_idx = 0; | ||
| 210 | pmu_pwr_policy->limit_unit = p_pwr_policy->limit_unit; | ||
| 211 | pmu_pwr_policy->num_limit_inputs = p_pwr_policy->num_limit_inputs; | ||
| 212 | |||
| 213 | pmu_pwr_policy->limit_min = _pwr_policy_limitdeltaapply( | ||
| 214 | _pwr_policy_limitarboutputget(g, p_pwr_policy, | ||
| 215 | PWR_POLICY_LIMIT_ID_MIN), | ||
| 216 | p_pwr_policy->limit_delta); | ||
| 217 | |||
| 218 | pmu_pwr_policy->limit_max = _pwr_policy_limitdeltaapply( | ||
| 219 | _pwr_policy_limitarboutputget(g, p_pwr_policy, | ||
| 220 | PWR_POLICY_LIMIT_ID_MAX), | ||
| 221 | p_pwr_policy->limit_delta); | ||
| 222 | |||
| 223 | pmu_pwr_policy->limit_curr = _pwr_policy_limitdeltaapply( | ||
| 224 | _pwr_policy_limitarboutputget(g, p_pwr_policy, | ||
| 225 | PWR_POLICY_LIMIT_ID_CURR), | ||
| 226 | p_pwr_policy->limit_delta); | ||
| 227 | |||
| 228 | memcpy(&pmu_pwr_policy->integral, &p_pwr_policy->integral, | ||
| 229 | sizeof(struct ctrl_pmgr_pwr_policy_info_integral)); | ||
| 230 | |||
| 231 | pmu_pwr_policy->sample_mult = p_pwr_policy->sample_mult; | ||
| 232 | pmu_pwr_policy->filter_type = p_pwr_policy->filter_type; | ||
| 233 | pmu_pwr_policy->filter_param = p_pwr_policy->filter_param; | ||
| 234 | |||
| 235 | pmu_hw_threshold_data->threshold_idx = p_hw_threshold->threshold_idx; | ||
| 236 | pmu_hw_threshold_data->low_threshold_idx = p_hw_threshold->low_threshold_idx; | ||
| 237 | pmu_hw_threshold_data->b_use_low_threshold = p_hw_threshold->b_use_low_threshold; | ||
| 238 | pmu_hw_threshold_data->low_threshold_value = p_hw_threshold->low_threshold_value; | ||
| 239 | |||
| 240 | if (BOARDOBJ_GET_TYPE(board_obj_ptr) == | ||
| 241 | CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD) { | ||
| 242 | struct nv_pmu_pmgr_pwr_policy_sw_threshold *pmu_sw_threshold_data; | ||
| 243 | struct pwr_policy_sw_threshold *p_sw_threshold; | ||
| 244 | |||
| 245 | p_sw_threshold = (struct pwr_policy_sw_threshold *)board_obj_ptr; | ||
| 246 | pmu_sw_threshold_data = | ||
| 247 | (struct nv_pmu_pmgr_pwr_policy_sw_threshold *) ppmudata; | ||
| 248 | pmu_sw_threshold_data->event_id = | ||
| 249 | p_sw_threshold->event_id; | ||
| 250 | } | ||
| 251 | done: | ||
| 252 | return status; | ||
| 253 | } | ||
| 254 | |||
| 255 | static struct boardobj *construct_pwr_policy(struct gk20a *g, | ||
| 256 | void *pargs, u16 pargs_size, u8 type) | ||
| 257 | { | ||
| 258 | struct boardobj *board_obj_ptr = NULL; | ||
| 259 | int status; | ||
| 260 | struct pwr_policy_hw_threshold *pwrpolicyhwthreshold; | ||
| 261 | struct pwr_policy *pwrpolicy; | ||
| 262 | struct pwr_policy *pwrpolicyparams = (struct pwr_policy*)pargs; | ||
| 263 | struct pwr_policy_hw_threshold *hwthreshold = (struct pwr_policy_hw_threshold*)pargs; | ||
| 264 | |||
| 265 | status = boardobj_construct_super(g, &board_obj_ptr, | ||
| 266 | pargs_size, pargs); | ||
| 267 | if (status) { | ||
| 268 | return NULL; | ||
| 269 | } | ||
| 270 | |||
| 271 | pwrpolicyhwthreshold = (struct pwr_policy_hw_threshold*)board_obj_ptr; | ||
| 272 | pwrpolicy = (struct pwr_policy *)board_obj_ptr; | ||
| 273 | |||
| 274 | nvgpu_log_fn(g, "min=%u rated=%u max=%u", | ||
| 275 | pwrpolicyparams->limit_min, | ||
| 276 | pwrpolicyparams->limit_rated, | ||
| 277 | pwrpolicyparams->limit_max); | ||
| 278 | |||
| 279 | /* Set Super class interfaces */ | ||
| 280 | board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_hw_threshold; | ||
| 281 | |||
| 282 | pwrpolicy->ch_idx = pwrpolicyparams->ch_idx; | ||
| 283 | pwrpolicy->num_limit_inputs = 0; | ||
| 284 | pwrpolicy->limit_unit = pwrpolicyparams->limit_unit; | ||
| 285 | pwrpolicy->filter_type = (enum ctrl_pmgr_pwr_policy_filter_type)(pwrpolicyparams->filter_type); | ||
| 286 | pwrpolicy->sample_mult = pwrpolicyparams->sample_mult; | ||
| 287 | switch (pwrpolicy->filter_type) | ||
| 288 | { | ||
| 289 | case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_NONE: | ||
| 290 | break; | ||
| 291 | |||
| 292 | case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_BLOCK: | ||
| 293 | pwrpolicy->filter_param.block.block_size = | ||
| 294 | pwrpolicyparams->filter_param.block.block_size; | ||
| 295 | break; | ||
| 296 | |||
| 297 | case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE: | ||
| 298 | pwrpolicy->filter_param.moving_avg.window_size = | ||
| 299 | pwrpolicyparams->filter_param.moving_avg.window_size; | ||
| 300 | break; | ||
| 301 | |||
| 302 | case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_IIR: | ||
| 303 | pwrpolicy->filter_param.iir.divisor = pwrpolicyparams->filter_param.iir.divisor; | ||
| 304 | break; | ||
| 305 | |||
| 306 | default: | ||
| 307 | nvgpu_err(g, "Error: unrecognized Power Policy filter type: %d", | ||
| 308 | pwrpolicy->filter_type); | ||
| 309 | } | ||
| 310 | |||
| 311 | _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_curr, false); | ||
| 312 | |||
| 313 | pwrpolicy->limit_delta = 0; | ||
| 314 | |||
| 315 | _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_min, true); | ||
| 316 | status = _pwr_policy_limitarbinputset(g, | ||
| 317 | pwrpolicy, | ||
| 318 | PWR_POLICY_LIMIT_ID_MIN, | ||
| 319 | CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, | ||
| 320 | pwrpolicyparams->limit_min); | ||
| 321 | |||
| 322 | _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_max, false); | ||
| 323 | status = _pwr_policy_limitarbinputset(g, | ||
| 324 | pwrpolicy, | ||
| 325 | PWR_POLICY_LIMIT_ID_MAX, | ||
| 326 | CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, | ||
| 327 | pwrpolicyparams->limit_max); | ||
| 328 | |||
| 329 | _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_rated, false); | ||
| 330 | status = _pwr_policy_limitarbinputset(g, | ||
| 331 | pwrpolicy, | ||
| 332 | PWR_POLICY_LIMIT_ID_RATED, | ||
| 333 | CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, | ||
| 334 | pwrpolicyparams->limit_rated); | ||
| 335 | |||
| 336 | _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_batt, false); | ||
| 337 | status = _pwr_policy_limitarbinputset(g, | ||
| 338 | pwrpolicy, | ||
| 339 | PWR_POLICY_LIMIT_ID_BATT, | ||
| 340 | CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM, | ||
| 341 | ((pwrpolicyparams->limit_batt != 0U) ? | ||
| 342 | pwrpolicyparams->limit_batt: | ||
| 343 | CTRL_PMGR_PWR_POLICY_LIMIT_MAX)); | ||
| 344 | |||
| 345 | memcpy(&pwrpolicy->integral, &pwrpolicyparams->integral, | ||
| 346 | sizeof(struct ctrl_pmgr_pwr_policy_info_integral)); | ||
| 347 | |||
| 348 | pwrpolicyhwthreshold->threshold_idx = hwthreshold->threshold_idx; | ||
| 349 | pwrpolicyhwthreshold->b_use_low_threshold = hwthreshold->b_use_low_threshold; | ||
| 350 | pwrpolicyhwthreshold->low_threshold_idx = hwthreshold->low_threshold_idx; | ||
| 351 | pwrpolicyhwthreshold->low_threshold_value = hwthreshold->low_threshold_value; | ||
| 352 | |||
| 353 | if (type == CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD) { | ||
| 354 | struct pwr_policy_sw_threshold *pwrpolicyswthreshold; | ||
| 355 | struct pwr_policy_sw_threshold *swthreshold = | ||
| 356 | (struct pwr_policy_sw_threshold*)pargs; | ||
| 357 | |||
| 358 | pwrpolicyswthreshold = (struct pwr_policy_sw_threshold*)board_obj_ptr; | ||
| 359 | pwrpolicyswthreshold->event_id = swthreshold->event_id; | ||
| 360 | } | ||
| 361 | |||
| 362 | nvgpu_log_info(g, " Done"); | ||
| 363 | |||
| 364 | return board_obj_ptr; | ||
| 365 | } | ||
| 366 | |||
| 367 | static int _pwr_policy_construct_WAR_SW_Threshold_policy(struct gk20a *g, | ||
| 368 | struct pmgr_pwr_policy *ppwrpolicyobjs, | ||
| 369 | union pwr_policy_data_union *ppwrpolicydata, | ||
| 370 | u16 pwr_policy_size, | ||
| 371 | u32 obj_index) | ||
| 372 | { | ||
| 373 | int status = 0; | ||
| 374 | struct boardobj *boardobj; | ||
| 375 | |||
| 376 | /* WARN policy */ | ||
| 377 | ppwrpolicydata->pwrpolicy.limit_unit = 0; | ||
| 378 | ppwrpolicydata->pwrpolicy.limit_min = 10000; | ||
| 379 | ppwrpolicydata->pwrpolicy.limit_rated = 100000; | ||
| 380 | ppwrpolicydata->pwrpolicy.limit_max = 100000; | ||
| 381 | ppwrpolicydata->sw_threshold.threshold_idx = 1; | ||
| 382 | ppwrpolicydata->pwrpolicy.filter_type = | ||
| 383 | CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE; | ||
| 384 | ppwrpolicydata->pwrpolicy.sample_mult = 5; | ||
| 385 | |||
| 386 | /* Filled the entry.filterParam value in the filterParam */ | ||
| 387 | ppwrpolicydata->pwrpolicy.filter_param.moving_avg.window_size = 10; | ||
| 388 | |||
| 389 | ppwrpolicydata->sw_threshold.event_id = 0x01; | ||
| 390 | |||
| 391 | ppwrpolicydata->boardobj.type = CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD; | ||
| 392 | |||
| 393 | boardobj = construct_pwr_policy(g, ppwrpolicydata, | ||
| 394 | pwr_policy_size, ppwrpolicydata->boardobj.type); | ||
| 395 | |||
| 396 | if (!boardobj) { | ||
| 397 | nvgpu_err(g, | ||
| 398 | "unable to create pwr policy for type %d", ppwrpolicydata->boardobj.type); | ||
| 399 | status = -EINVAL; | ||
| 400 | goto done; | ||
| 401 | } | ||
| 402 | |||
| 403 | status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super, | ||
| 404 | boardobj, obj_index); | ||
| 405 | |||
| 406 | if (status) { | ||
| 407 | nvgpu_err(g, | ||
| 408 | "unable to insert pwr policy boardobj for %d", obj_index); | ||
| 409 | status = -EINVAL; | ||
| 410 | goto done; | ||
| 411 | } | ||
| 412 | done: | ||
| 413 | return status; | ||
| 414 | } | ||
| 415 | |||
| 416 | struct pwr_policy_3x_header_unpacked { | ||
| 417 | u8 version; | ||
| 418 | u8 header_size; | ||
| 419 | u8 table_entry_size; | ||
| 420 | u8 num_table_entries; | ||
| 421 | u16 base_sample_period; | ||
| 422 | u16 min_client_sample_period; | ||
| 423 | u8 table_rel_entry_size; | ||
| 424 | u8 num_table_rel_entries; | ||
| 425 | u8 tgp_policy_idx; | ||
| 426 | u8 rtp_policy_idx; | ||
| 427 | u8 mxm_policy_idx; | ||
| 428 | u8 dnotifier_policy_idx; | ||
| 429 | u32 d2_limit; | ||
| 430 | u32 d3_limit; | ||
| 431 | u32 d4_limit; | ||
| 432 | u32 d5_limit; | ||
| 433 | u8 low_sampling_mult; | ||
| 434 | u8 pwr_tgt_policy_idx; | ||
| 435 | u8 pwr_tgt_floor_policy_idx; | ||
| 436 | u8 sm_bus_policy_idx; | ||
| 437 | u8 table_viol_entry_size; | ||
| 438 | u8 num_table_viol_entries; | ||
| 439 | }; | ||
| 440 | |||
| 441 | #define __UNPACK_FIELD(unpacked, packed, field) \ | ||
| 442 | __builtin_memcpy(&unpacked->field, &packed->field, \ | ||
| 443 | sizeof(unpacked->field)) | ||
| 444 | |||
| 445 | static inline void devinit_unpack_pwr_policy_header( | ||
| 446 | struct pwr_policy_3x_header_unpacked *unpacked, | ||
| 447 | struct pwr_policy_3x_header_struct *packed) | ||
| 448 | { | ||
| 449 | __UNPACK_FIELD(unpacked, packed, version); | ||
| 450 | __UNPACK_FIELD(unpacked, packed, header_size); | ||
| 451 | __UNPACK_FIELD(unpacked, packed, table_entry_size); | ||
| 452 | __UNPACK_FIELD(unpacked, packed, num_table_entries); | ||
| 453 | __UNPACK_FIELD(unpacked, packed, base_sample_period); | ||
| 454 | __UNPACK_FIELD(unpacked, packed, min_client_sample_period); | ||
| 455 | __UNPACK_FIELD(unpacked, packed, table_rel_entry_size); | ||
| 456 | __UNPACK_FIELD(unpacked, packed, num_table_rel_entries); | ||
| 457 | __UNPACK_FIELD(unpacked, packed, tgp_policy_idx); | ||
| 458 | __UNPACK_FIELD(unpacked, packed, rtp_policy_idx); | ||
| 459 | __UNPACK_FIELD(unpacked, packed, mxm_policy_idx); | ||
| 460 | __UNPACK_FIELD(unpacked, packed, dnotifier_policy_idx); | ||
| 461 | __UNPACK_FIELD(unpacked, packed, d2_limit); | ||
| 462 | __UNPACK_FIELD(unpacked, packed, d3_limit); | ||
| 463 | __UNPACK_FIELD(unpacked, packed, d4_limit); | ||
| 464 | __UNPACK_FIELD(unpacked, packed, d5_limit); | ||
| 465 | __UNPACK_FIELD(unpacked, packed, low_sampling_mult); | ||
| 466 | __UNPACK_FIELD(unpacked, packed, pwr_tgt_policy_idx); | ||
| 467 | __UNPACK_FIELD(unpacked, packed, pwr_tgt_floor_policy_idx); | ||
| 468 | __UNPACK_FIELD(unpacked, packed, sm_bus_policy_idx); | ||
| 469 | __UNPACK_FIELD(unpacked, packed, table_viol_entry_size); | ||
| 470 | __UNPACK_FIELD(unpacked, packed, num_table_viol_entries); | ||
| 471 | } | ||
| 472 | |||
| 473 | struct pwr_policy_3x_entry_unpacked { | ||
| 474 | u8 flags0; | ||
| 475 | u8 ch_idx; | ||
| 476 | u32 limit_min; | ||
| 477 | u32 limit_rated; | ||
| 478 | u32 limit_max; | ||
| 479 | u32 param0; | ||
| 480 | u32 param1; | ||
| 481 | u32 param2; | ||
| 482 | u32 param3; | ||
| 483 | u32 limit_batt; | ||
| 484 | u8 flags1; | ||
| 485 | u8 past_length; | ||
| 486 | u8 next_length; | ||
| 487 | u16 ratio_min; | ||
| 488 | u16 ratio_max; | ||
| 489 | u8 sample_mult; | ||
| 490 | u32 filter_param; | ||
| 491 | }; | ||
| 492 | |||
| 493 | static inline void devinit_unpack_pwr_policy_entry( | ||
| 494 | struct pwr_policy_3x_entry_unpacked *unpacked, | ||
| 495 | struct pwr_policy_3x_entry_struct *packed) | ||
| 496 | { | ||
| 497 | __UNPACK_FIELD(unpacked, packed, flags0); | ||
| 498 | __UNPACK_FIELD(unpacked, packed, ch_idx); | ||
| 499 | __UNPACK_FIELD(unpacked, packed, limit_min); | ||
| 500 | __UNPACK_FIELD(unpacked, packed, limit_rated); | ||
| 501 | __UNPACK_FIELD(unpacked, packed, limit_max); | ||
| 502 | __UNPACK_FIELD(unpacked, packed, param0); | ||
| 503 | __UNPACK_FIELD(unpacked, packed, param1); | ||
| 504 | __UNPACK_FIELD(unpacked, packed, param2); | ||
| 505 | __UNPACK_FIELD(unpacked, packed, param3); | ||
| 506 | __UNPACK_FIELD(unpacked, packed, limit_batt); | ||
| 507 | __UNPACK_FIELD(unpacked, packed, flags1); | ||
| 508 | __UNPACK_FIELD(unpacked, packed, past_length); | ||
| 509 | __UNPACK_FIELD(unpacked, packed, next_length); | ||
| 510 | __UNPACK_FIELD(unpacked, packed, ratio_min); | ||
| 511 | __UNPACK_FIELD(unpacked, packed, ratio_max); | ||
| 512 | __UNPACK_FIELD(unpacked, packed, sample_mult); | ||
| 513 | __UNPACK_FIELD(unpacked, packed, filter_param); | ||
| 514 | } | ||
| 515 | |||
| 516 | static int devinit_get_pwr_policy_table(struct gk20a *g, | ||
| 517 | struct pmgr_pwr_policy *ppwrpolicyobjs) | ||
| 518 | { | ||
| 519 | int status = 0; | ||
| 520 | u8 *ptr = NULL; | ||
| 521 | struct boardobj *boardobj; | ||
| 522 | struct pwr_policy_3x_header_struct *packed_hdr; | ||
| 523 | struct pwr_policy_3x_header_unpacked hdr; | ||
| 524 | u32 index; | ||
| 525 | u32 obj_index = 0; | ||
| 526 | u16 pwr_policy_size; | ||
| 527 | bool integral_control = false; | ||
| 528 | u32 hw_threshold_policy_index = 0; | ||
| 529 | union pwr_policy_data_union pwr_policy_data; | ||
| 530 | |||
| 531 | nvgpu_log_info(g, " "); | ||
| 532 | |||
| 533 | ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g, | ||
| 534 | g->bios.perf_token, POWER_CAPPING_TABLE); | ||
| 535 | if (ptr == NULL) { | ||
| 536 | status = -EINVAL; | ||
| 537 | goto done; | ||
| 538 | } | ||
| 539 | |||
| 540 | packed_hdr = (struct pwr_policy_3x_header_struct *)ptr; | ||
| 541 | |||
| 542 | if (packed_hdr->version != | ||
| 543 | VBIOS_POWER_POLICY_VERSION_3X) { | ||
| 544 | status = -EINVAL; | ||
| 545 | goto done; | ||
| 546 | } | ||
| 547 | |||
| 548 | if (packed_hdr->header_size < | ||
| 549 | VBIOS_POWER_POLICY_3X_HEADER_SIZE_25) { | ||
| 550 | status = -EINVAL; | ||
| 551 | goto done; | ||
| 552 | } | ||
| 553 | |||
| 554 | if (packed_hdr->table_entry_size < | ||
| 555 | VBIOS_POWER_POLICY_3X_ENTRY_SIZE_2E) { | ||
| 556 | status = -EINVAL; | ||
| 557 | goto done; | ||
| 558 | } | ||
| 559 | |||
| 560 | /* unpack power policy table header */ | ||
| 561 | devinit_unpack_pwr_policy_header(&hdr, packed_hdr); | ||
| 562 | |||
| 563 | ptr += (u32)hdr.header_size; | ||
| 564 | |||
| 565 | for (index = 0; index < hdr.num_table_entries; | ||
| 566 | index++, ptr += (u32)hdr.table_entry_size) { | ||
| 567 | |||
| 568 | struct pwr_policy_3x_entry_struct *packed_entry; | ||
| 569 | struct pwr_policy_3x_entry_unpacked entry; | ||
| 570 | |||
| 571 | u8 class_type; | ||
| 572 | |||
| 573 | packed_entry = (struct pwr_policy_3x_entry_struct *)ptr; | ||
| 574 | |||
| 575 | class_type = (u8)BIOS_GET_FIELD( | ||
| 576 | packed_entry->flags0, | ||
| 577 | NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS); | ||
| 578 | |||
| 579 | if (class_type != NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_HW_THRESHOLD) { | ||
| 580 | continue; | ||
| 581 | } | ||
| 582 | |||
| 583 | /* unpack power policy table entry */ | ||
| 584 | devinit_unpack_pwr_policy_entry(&entry, packed_entry); | ||
| 585 | |||
| 586 | ppwrpolicyobjs->version = | ||
| 587 | CTRL_PMGR_PWR_POLICY_TABLE_VERSION_3X; | ||
| 588 | ppwrpolicyobjs->base_sample_period = hdr.base_sample_period; | ||
| 589 | ppwrpolicyobjs->min_client_sample_period = | ||
| 590 | hdr.min_client_sample_period; | ||
| 591 | ppwrpolicyobjs->low_sampling_mult = hdr.low_sampling_mult; | ||
| 592 | |||
| 593 | ppwrpolicyobjs->policy_idxs[1] = hdr.tgp_policy_idx; | ||
| 594 | ppwrpolicyobjs->policy_idxs[0] = hdr.rtp_policy_idx; | ||
| 595 | ppwrpolicyobjs->policy_idxs[2] = hdr.mxm_policy_idx; | ||
| 596 | ppwrpolicyobjs->policy_idxs[3] = hdr.dnotifier_policy_idx; | ||
| 597 | ppwrpolicyobjs->ext_limits[0].limit = hdr.d2_limit; | ||
| 598 | ppwrpolicyobjs->ext_limits[1].limit = hdr.d3_limit; | ||
| 599 | ppwrpolicyobjs->ext_limits[2].limit = hdr.d4_limit; | ||
| 600 | ppwrpolicyobjs->ext_limits[3].limit = hdr.d5_limit; | ||
| 601 | ppwrpolicyobjs->policy_idxs[4] = hdr.pwr_tgt_policy_idx; | ||
| 602 | ppwrpolicyobjs->policy_idxs[5] = hdr.pwr_tgt_floor_policy_idx; | ||
| 603 | ppwrpolicyobjs->policy_idxs[6] = hdr.sm_bus_policy_idx; | ||
| 604 | |||
| 605 | integral_control = (bool)BIOS_GET_FIELD(entry.flags1, | ||
| 606 | NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_INTEGRAL_CONTROL); | ||
| 607 | |||
| 608 | if (integral_control == 0x01) { | ||
| 609 | pwr_policy_data.pwrpolicy.integral.past_sample_count = | ||
| 610 | entry.past_length; | ||
| 611 | pwr_policy_data.pwrpolicy.integral.next_sample_count = | ||
| 612 | entry.next_length; | ||
| 613 | pwr_policy_data.pwrpolicy.integral.ratio_limit_max = | ||
| 614 | entry.ratio_max; | ||
| 615 | pwr_policy_data.pwrpolicy.integral.ratio_limit_min = | ||
| 616 | entry.ratio_min; | ||
| 617 | } else { | ||
| 618 | memset(&(pwr_policy_data.pwrpolicy.integral), 0x0, | ||
| 619 | sizeof(struct ctrl_pmgr_pwr_policy_info_integral)); | ||
| 620 | } | ||
| 621 | pwr_policy_data.hw_threshold.threshold_idx = (u8) | ||
| 622 | BIOS_GET_FIELD(entry.param0, | ||
| 623 | NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_THRES_IDX); | ||
| 624 | |||
| 625 | pwr_policy_data.hw_threshold.b_use_low_threshold = | ||
| 626 | BIOS_GET_FIELD(entry.param0, | ||
| 627 | NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_USE); | ||
| 628 | |||
| 629 | if (pwr_policy_data.hw_threshold.b_use_low_threshold) { | ||
| 630 | pwr_policy_data.hw_threshold.low_threshold_idx = (u8) | ||
| 631 | BIOS_GET_FIELD(entry.param0, | ||
| 632 | NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_IDX); | ||
| 633 | |||
| 634 | pwr_policy_data.hw_threshold.low_threshold_value = (u16) | ||
| 635 | BIOS_GET_FIELD(entry.param1, | ||
| 636 | NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM1_HW_THRESHOLD_LOW_THRESHOLD_VAL); | ||
| 637 | } | ||
| 638 | |||
| 639 | pwr_policy_size = sizeof(struct pwr_policy_hw_threshold); | ||
| 640 | |||
| 641 | /* Initialize data for the parent class */ | ||
| 642 | pwr_policy_data.boardobj.type = | ||
| 643 | CTRL_PMGR_PWR_POLICY_TYPE_HW_THRESHOLD; | ||
| 644 | pwr_policy_data.pwrpolicy.ch_idx = entry.ch_idx; | ||
| 645 | pwr_policy_data.pwrpolicy.limit_unit = (u8) | ||
| 646 | BIOS_GET_FIELD(entry.flags0, | ||
| 647 | NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT); | ||
| 648 | pwr_policy_data.pwrpolicy.filter_type = | ||
| 649 | (enum ctrl_pmgr_pwr_policy_filter_type) | ||
| 650 | BIOS_GET_FIELD(entry.flags1, | ||
| 651 | NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE); | ||
| 652 | |||
| 653 | pwr_policy_data.pwrpolicy.limit_min = entry.limit_min; | ||
| 654 | pwr_policy_data.pwrpolicy.limit_rated = entry.limit_rated; | ||
| 655 | pwr_policy_data.pwrpolicy.limit_max = entry.limit_max; | ||
| 656 | pwr_policy_data.pwrpolicy.limit_batt = entry.limit_batt; | ||
| 657 | |||
| 658 | pwr_policy_data.pwrpolicy.sample_mult = (u8)entry.sample_mult; | ||
| 659 | |||
| 660 | /* Filled the entry.filterParam value in the filterParam */ | ||
| 661 | pwr_policy_data.pwrpolicy.filter_param.block.block_size = 0; | ||
| 662 | pwr_policy_data.pwrpolicy.filter_param.moving_avg.window_size = 0; | ||
| 663 | pwr_policy_data.pwrpolicy.filter_param.iir.divisor = 0; | ||
| 664 | |||
| 665 | hw_threshold_policy_index |= | ||
| 666 | BIT(pwr_policy_data.hw_threshold.threshold_idx); | ||
| 667 | |||
| 668 | boardobj = construct_pwr_policy(g, &pwr_policy_data, | ||
| 669 | pwr_policy_size, pwr_policy_data.boardobj.type); | ||
| 670 | |||
| 671 | if (!boardobj) { | ||
| 672 | nvgpu_err(g, | ||
| 673 | "unable to create pwr policy for %d type %d", | ||
| 674 | index, pwr_policy_data.boardobj.type); | ||
| 675 | status = -EINVAL; | ||
| 676 | goto done; | ||
| 677 | } | ||
| 678 | |||
| 679 | status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super, | ||
| 680 | boardobj, obj_index); | ||
| 681 | |||
| 682 | if (status) { | ||
| 683 | nvgpu_err(g, | ||
| 684 | "unable to insert pwr policy boardobj for %d", | ||
| 685 | index); | ||
| 686 | status = -EINVAL; | ||
| 687 | goto done; | ||
| 688 | } | ||
| 689 | |||
| 690 | ++obj_index; | ||
| 691 | } | ||
| 692 | |||
| 693 | if (g->hardcode_sw_threshold) { | ||
| 694 | status = _pwr_policy_construct_WAR_SW_Threshold_policy(g, | ||
| 695 | ppwrpolicyobjs, | ||
| 696 | &pwr_policy_data, | ||
| 697 | sizeof(struct pwr_policy_sw_threshold), | ||
| 698 | obj_index); | ||
| 699 | if (status) { | ||
| 700 | nvgpu_err(g, "unable to construct_WAR_policy"); | ||
| 701 | status = -EINVAL; | ||
| 702 | goto done; | ||
| 703 | } | ||
| 704 | ++obj_index; | ||
| 705 | } | ||
| 706 | |||
| 707 | done: | ||
| 708 | nvgpu_log_info(g, " done status %x", status); | ||
| 709 | return status; | ||
| 710 | } | ||
| 711 | |||
| 712 | int pmgr_policy_sw_setup(struct gk20a *g) | ||
| 713 | { | ||
| 714 | int status; | ||
| 715 | struct boardobjgrp *pboardobjgrp = NULL; | ||
| 716 | struct pwr_policy *ppolicy; | ||
| 717 | struct pmgr_pwr_policy *ppwrpolicyobjs; | ||
| 718 | u8 indx = 0; | ||
| 719 | |||
| 720 | /* Construct the Super Class and override the Interfaces */ | ||
| 721 | status = boardobjgrpconstruct_e32(g, | ||
| 722 | &g->pmgr_pmu.pmgr_policyobjs.pwr_policies); | ||
| 723 | if (status) { | ||
| 724 | nvgpu_err(g, | ||
| 725 | "error creating boardobjgrp for pmgr policy, status - 0x%x", | ||
| 726 | status); | ||
| 727 | goto done; | ||
| 728 | } | ||
| 729 | |||
| 730 | status = boardobjgrpconstruct_e32(g, | ||
| 731 | &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels); | ||
| 732 | if (status) { | ||
| 733 | nvgpu_err(g, | ||
| 734 | "error creating boardobjgrp for pmgr policy rels, status - 0x%x", | ||
| 735 | status); | ||
| 736 | goto done; | ||
| 737 | } | ||
| 738 | |||
| 739 | status = boardobjgrpconstruct_e32(g, | ||
| 740 | &g->pmgr_pmu.pmgr_policyobjs.pwr_violations); | ||
| 741 | if (status) { | ||
| 742 | nvgpu_err(g, | ||
| 743 | "error creating boardobjgrp for pmgr violations, status - 0x%x", | ||
| 744 | status); | ||
| 745 | goto done; | ||
| 746 | } | ||
| 747 | |||
| 748 | memset(g->pmgr_pmu.pmgr_policyobjs.policy_idxs, CTRL_PMGR_PWR_POLICY_INDEX_INVALID, | ||
| 749 | sizeof(u8) * CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES); | ||
| 750 | |||
| 751 | /* Initialize external power limit policy indexes to _INVALID/0xFF */ | ||
| 752 | for (indx = 0; indx < PWR_POLICY_EXT_POWER_STATE_ID_COUNT; indx++) { | ||
| 753 | g->pmgr_pmu.pmgr_policyobjs.ext_limits[indx].policy_table_idx = | ||
| 754 | CTRL_PMGR_PWR_POLICY_INDEX_INVALID; | ||
| 755 | } | ||
| 756 | |||
| 757 | /* Initialize external power state to _D1 */ | ||
| 758 | g->pmgr_pmu.pmgr_policyobjs.ext_power_state = 0xFFFFFFFF; | ||
| 759 | |||
| 760 | ppwrpolicyobjs = &(g->pmgr_pmu.pmgr_policyobjs); | ||
| 761 | pboardobjgrp = &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super); | ||
| 762 | |||
| 763 | status = devinit_get_pwr_policy_table(g, ppwrpolicyobjs); | ||
| 764 | if (status) { | ||
| 765 | goto done; | ||
| 766 | } | ||
| 767 | |||
| 768 | g->pmgr_pmu.pmgr_policyobjs.b_enabled = true; | ||
| 769 | |||
| 770 | BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_policy *, ppolicy, indx) { | ||
| 771 | PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy); | ||
| 772 | } | ||
| 773 | |||
| 774 | g->pmgr_pmu.pmgr_policyobjs.global_ceiling.values[0] = | ||
| 775 | 0xFF; | ||
| 776 | |||
| 777 | g->pmgr_pmu.pmgr_policyobjs.client_work_item.b_pending = false; | ||
| 778 | |||
| 779 | done: | ||
| 780 | nvgpu_log_info(g, " done status %x", status); | ||
| 781 | return status; | ||
| 782 | } | ||
diff --git a/include/pmgr/pwrpolicy.h b/include/pmgr/pwrpolicy.h new file mode 100644 index 0000000..74f4937 --- /dev/null +++ b/include/pmgr/pwrpolicy.h | |||
| @@ -0,0 +1,136 @@ | |||
| 1 | /* | ||
| 2 | * general power channel structures & definitions | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the "Software"), | ||
| 8 | * to deal in the Software without restriction, including without limitation | ||
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 11 | * Software is furnished to do so, subject to the following conditions: | ||
| 12 | * | ||
| 13 | * The above copyright notice and this permission notice shall be included in | ||
| 14 | * all copies or substantial portions of the Software. | ||
| 15 | * | ||
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
| 22 | * DEALINGS IN THE SOFTWARE. | ||
| 23 | */ | ||
| 24 | #ifndef NVGPU_PMGR_PWRPOLICY_H | ||
| 25 | #define NVGPU_PMGR_PWRPOLICY_H | ||
| 26 | |||
| 27 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> | ||
| 28 | #include "boardobj/boardobjgrp.h" | ||
| 29 | #include "boardobj/boardobj.h" | ||
| 30 | #include "ctrl/ctrlpmgr.h" | ||
| 31 | |||
| 32 | #define PWR_POLICY_EXT_POWER_STATE_ID_COUNT 0x4U | ||
| 33 | |||
| 34 | enum pwr_policy_limit_id { | ||
| 35 | PWR_POLICY_LIMIT_ID_MIN = 0x00000000, | ||
| 36 | PWR_POLICY_LIMIT_ID_RATED, | ||
| 37 | PWR_POLICY_LIMIT_ID_MAX, | ||
| 38 | PWR_POLICY_LIMIT_ID_CURR, | ||
| 39 | PWR_POLICY_LIMIT_ID_BATT, | ||
| 40 | }; | ||
| 41 | |||
| 42 | struct pwr_policy { | ||
| 43 | struct boardobj super; | ||
| 44 | u8 ch_idx; | ||
| 45 | u8 num_limit_inputs; | ||
| 46 | u8 limit_unit; | ||
| 47 | s32 limit_delta; | ||
| 48 | u32 limit_min; | ||
| 49 | u32 limit_rated; | ||
| 50 | u32 limit_max; | ||
| 51 | u32 limit_batt; | ||
| 52 | struct ctrl_pmgr_pwr_policy_info_integral integral; | ||
| 53 | struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_min; | ||
| 54 | struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_rated; | ||
| 55 | struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_max; | ||
| 56 | struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_batt; | ||
| 57 | struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_curr; | ||
| 58 | u8 sample_mult; | ||
| 59 | enum ctrl_pmgr_pwr_policy_filter_type filter_type; | ||
| 60 | union ctrl_pmgr_pwr_policy_filter_param filter_param; | ||
| 61 | }; | ||
| 62 | |||
| 63 | struct pwr_policy_ext_limit { | ||
| 64 | u8 policy_table_idx; | ||
| 65 | u32 limit; | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct pwr_policy_batt_workitem { | ||
| 69 | u32 power_state; | ||
| 70 | bool b_full_deflection; | ||
| 71 | }; | ||
| 72 | |||
| 73 | struct pwr_policy_client_workitem { | ||
| 74 | u32 limit; | ||
| 75 | bool b_pending; | ||
| 76 | }; | ||
| 77 | |||
| 78 | struct pwr_policy_relationship { | ||
| 79 | struct boardobj super; | ||
| 80 | u8 policy_idx; | ||
| 81 | }; | ||
| 82 | |||
| 83 | struct pmgr_pwr_policy { | ||
| 84 | u8 version; | ||
| 85 | bool b_enabled; | ||
| 86 | struct nv_pmu_perf_domain_group_limits global_ceiling; | ||
| 87 | u8 policy_idxs[CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES]; | ||
| 88 | struct pwr_policy_ext_limit ext_limits[PWR_POLICY_EXT_POWER_STATE_ID_COUNT]; | ||
| 89 | s32 ext_power_state; | ||
| 90 | u16 base_sample_period; | ||
| 91 | u16 min_client_sample_period; | ||
| 92 | u8 low_sampling_mult; | ||
| 93 | struct boardobjgrp_e32 pwr_policies; | ||
| 94 | struct boardobjgrp_e32 pwr_policy_rels; | ||
| 95 | struct boardobjgrp_e32 pwr_violations; | ||
| 96 | struct pwr_policy_client_workitem client_work_item; | ||
| 97 | }; | ||
| 98 | |||
| 99 | struct pwr_policy_limit { | ||
| 100 | struct pwr_policy super; | ||
| 101 | }; | ||
| 102 | |||
| 103 | struct pwr_policy_hw_threshold { | ||
| 104 | struct pwr_policy_limit super; | ||
| 105 | u8 threshold_idx; | ||
| 106 | u8 low_threshold_idx; | ||
| 107 | bool b_use_low_threshold; | ||
| 108 | u16 low_threshold_value; | ||
| 109 | }; | ||
| 110 | |||
| 111 | struct pwr_policy_sw_threshold { | ||
| 112 | struct pwr_policy_limit super; | ||
| 113 | u8 threshold_idx; | ||
| 114 | u8 low_threshold_idx; | ||
| 115 | bool b_use_low_threshold; | ||
| 116 | u16 low_threshold_value; | ||
| 117 | u8 event_id; | ||
| 118 | }; | ||
| 119 | |||
| 120 | union pwr_policy_data_union { | ||
| 121 | struct boardobj boardobj; | ||
| 122 | struct pwr_policy pwrpolicy; | ||
| 123 | struct pwr_policy_hw_threshold hw_threshold; | ||
| 124 | struct pwr_policy_sw_threshold sw_threshold; | ||
| 125 | } ; | ||
| 126 | |||
| 127 | #define PMGR_GET_PWR_POLICY(g, policy_idx) \ | ||
| 128 | ((struct pwr_policy *)BOARDOBJGRP_OBJ_GET_BY_IDX( \ | ||
| 129 | &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super), (policy_idx))) | ||
| 130 | |||
| 131 | #define PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy) \ | ||
| 132 | ((ppolicy)->num_limit_inputs++) | ||
| 133 | |||
| 134 | int pmgr_policy_sw_setup(struct gk20a *g); | ||
| 135 | |||
| 136 | #endif /* NVGPU_PMGR_PWRPOLICY_H */ | ||
