summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/pmgr
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/pmgr')
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgr.c184
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgr.h43
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgrpmu.c533
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgrpmu.h38
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrdev.c315
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrdev.h60
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrmonitor.c370
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrmonitor.h69
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrpolicy.c781
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrpolicy.h136
10 files changed, 2529 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/pmgr/pmgr.c b/drivers/gpu/nvgpu/pmgr/pmgr.c
new file mode 100644
index 00000000..2a9f9673
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgr.c
@@ -0,0 +1,184 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "gk20a/gk20a.h"
24#include "pwrdev.h"
25#include "pmgrpmu.h"
26
27#ifdef CONFIG_DEBUG_FS
28#include <linux/debugfs.h>
29#include "common/linux/os_linux.h"
30#endif
31
32int pmgr_pwr_devices_get_power(struct gk20a *g, u32 *val)
33{
34 struct nv_pmu_pmgr_pwr_devices_query_payload payload;
35 int status;
36
37 status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload);
38 if (status)
39 nvgpu_err(g, "pmgr_pwr_devices_get_current_power failed %x",
40 status);
41
42 *val = payload.devices[0].powerm_w;
43
44 return status;
45}
46
47int pmgr_pwr_devices_get_current(struct gk20a *g, u32 *val)
48{
49 struct nv_pmu_pmgr_pwr_devices_query_payload payload;
50 int status;
51
52 status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload);
53 if (status)
54 nvgpu_err(g, "pmgr_pwr_devices_get_current failed %x",
55 status);
56
57 *val = payload.devices[0].currentm_a;
58
59 return status;
60}
61
62int pmgr_pwr_devices_get_voltage(struct gk20a *g, u32 *val)
63{
64 struct nv_pmu_pmgr_pwr_devices_query_payload payload;
65 int status;
66
67 status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload);
68 if (status)
69 nvgpu_err(g, "pmgr_pwr_devices_get_current_voltage failed %x",
70 status);
71
72 *val = payload.devices[0].voltageu_v;
73
74 return status;
75}
76
77#ifdef CONFIG_DEBUG_FS
78static int pmgr_pwr_devices_get_power_u64(void *data, u64 *p)
79{
80 struct gk20a *g = (struct gk20a *)data;
81 int err;
82 u32 val;
83
84 err = pmgr_pwr_devices_get_power(g, &val);
85 *p = val;
86
87 return err;
88}
89
90static int pmgr_pwr_devices_get_current_u64(void *data, u64 *p)
91{
92 struct gk20a *g = (struct gk20a *)data;
93 int err;
94 u32 val;
95
96 err = pmgr_pwr_devices_get_current(g, &val);
97 *p = val;
98
99 return err;
100}
101
102static int pmgr_pwr_devices_get_voltage_u64(void *data, u64 *p)
103{
104 struct gk20a *g = (struct gk20a *)data;
105 int err;
106 u32 val;
107
108 err = pmgr_pwr_devices_get_voltage(g, &val);
109 *p = val;
110
111 return err;
112}
113
114DEFINE_SIMPLE_ATTRIBUTE(
115 pmgr_power_ctrl_fops, pmgr_pwr_devices_get_power_u64, NULL, "%llu\n");
116
117DEFINE_SIMPLE_ATTRIBUTE(
118 pmgr_current_ctrl_fops, pmgr_pwr_devices_get_current_u64, NULL, "%llu\n");
119
120DEFINE_SIMPLE_ATTRIBUTE(
121 pmgr_voltage_ctrl_fops, pmgr_pwr_devices_get_voltage_u64, NULL, "%llu\n");
122
123static void pmgr_debugfs_init(struct gk20a *g)
124{
125 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
126 struct dentry *dbgentry;
127
128 dbgentry = debugfs_create_file(
129 "power", S_IRUGO, l->debugfs, g, &pmgr_power_ctrl_fops);
130 if (!dbgentry)
131 nvgpu_err(g, "debugfs entry create failed for power");
132
133 dbgentry = debugfs_create_file(
134 "current", S_IRUGO, l->debugfs, g, &pmgr_current_ctrl_fops);
135 if (!dbgentry)
136 nvgpu_err(g, "debugfs entry create failed for current");
137
138 dbgentry = debugfs_create_file(
139 "voltage", S_IRUGO, l->debugfs, g, &pmgr_voltage_ctrl_fops);
140 if (!dbgentry)
141 nvgpu_err(g, "debugfs entry create failed for voltage");
142}
143#endif
144
145u32 pmgr_domain_sw_setup(struct gk20a *g)
146{
147 u32 status;
148
149 status = pmgr_device_sw_setup(g);
150 if (status) {
151 nvgpu_err(g,
152 "error creating boardobjgrp for pmgr devices, status - 0x%x",
153 status);
154 goto exit;
155 }
156
157 status = pmgr_monitor_sw_setup(g);
158 if (status) {
159 nvgpu_err(g,
160 "error creating boardobjgrp for pmgr monitor, status - 0x%x",
161 status);
162 goto exit;
163 }
164
165 status = pmgr_policy_sw_setup(g);
166 if (status) {
167 nvgpu_err(g,
168 "error creating boardobjgrp for pmgr policy, status - 0x%x",
169 status);
170 goto exit;
171 }
172
173#ifdef CONFIG_DEBUG_FS
174 pmgr_debugfs_init(g);
175#endif
176
177exit:
178 return status;
179}
180
181u32 pmgr_domain_pmu_setup(struct gk20a *g)
182{
183 return pmgr_send_pmgr_tables_to_pmu(g);
184}
diff --git a/drivers/gpu/nvgpu/pmgr/pmgr.h b/drivers/gpu/nvgpu/pmgr/pmgr.h
new file mode 100644
index 00000000..85b1bbd1
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgr.h
@@ -0,0 +1,43 @@
1/*
2 * general power device structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _PMGR_H_
25#define _PMGR_H_
26
27#include "pwrdev.h"
28#include "pwrmonitor.h"
29#include "pwrpolicy.h"
30
31struct pmgr_pmupstate {
32 struct pwr_devices pmgr_deviceobjs;
33 struct pmgr_pwr_monitor pmgr_monitorobjs;
34 struct pmgr_pwr_policy pmgr_policyobjs;
35};
36
37u32 pmgr_domain_sw_setup(struct gk20a *g);
38u32 pmgr_domain_pmu_setup(struct gk20a *g);
39int pmgr_pwr_devices_get_current(struct gk20a *g, u32 *val);
40int pmgr_pwr_devices_get_voltage(struct gk20a *g, u32 *val);
41int pmgr_pwr_devices_get_power(struct gk20a *g, u32 *val);
42
43#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pmgrpmu.c b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c
new file mode 100644
index 00000000..6913c280
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c
@@ -0,0 +1,533 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/kmem.h>
24#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
25#include <nvgpu/pmu.h>
26
27#include "gk20a/gk20a.h"
28#include "gp106/bios_gp106.h"
29#include "common/linux/os_linux.h"
30#include "common/linux/platform_gk20a.h"
31
32#include "boardobj/boardobjgrp.h"
33#include "boardobj/boardobjgrp_e32.h"
34
35#include "pwrdev.h"
36#include "pmgrpmu.h"
37
38struct pmgr_pmucmdhandler_params {
39 u32 success;
40};
41
42static void pmgr_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
43 void *param, u32 handle, u32 status)
44{
45 struct pmgr_pmucmdhandler_params *phandlerparams =
46 (struct pmgr_pmucmdhandler_params *)param;
47
48 if ((msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_SET_OBJECT) &&
49 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_QUERY) &&
50 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_LOAD)) {
51 nvgpu_err(g, "unknow msg %x", msg->msg.pmgr.msg_type);
52 return;
53 }
54
55 if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_SET_OBJECT) {
56 if ((msg->msg.pmgr.set_object.b_success != 1) ||
57 (msg->msg.pmgr.set_object.flcnstatus != 0) ) {
58 nvgpu_err(g, "pmgr msg failed %x %x %x %x",
59 msg->msg.pmgr.set_object.msg_type,
60 msg->msg.pmgr.set_object.b_success,
61 msg->msg.pmgr.set_object.flcnstatus,
62 msg->msg.pmgr.set_object.object_type);
63 return;
64 }
65 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_QUERY) {
66 if ((msg->msg.pmgr.query.b_success != 1) ||
67 (msg->msg.pmgr.query.flcnstatus != 0) ) {
68 nvgpu_err(g, "pmgr msg failed %x %x %x %x",
69 msg->msg.pmgr.query.msg_type,
70 msg->msg.pmgr.query.b_success,
71 msg->msg.pmgr.query.flcnstatus,
72 msg->msg.pmgr.query.cmd_type);
73 return;
74 }
75 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_LOAD) {
76 if ((msg->msg.pmgr.query.b_success != 1) ||
77 (msg->msg.pmgr.query.flcnstatus != 0) ) {
78 nvgpu_err(g, "pmgr msg failed %x %x %x",
79 msg->msg.pmgr.load.msg_type,
80 msg->msg.pmgr.load.b_success,
81 msg->msg.pmgr.load.flcnstatus);
82 return;
83 }
84 }
85
86 phandlerparams->success = 1;
87}
88
89static u32 pmgr_pmu_set_object(struct gk20a *g,
90 u8 type,
91 u16 dmem_size,
92 u16 fb_size,
93 void *pobj)
94{
95 struct pmu_cmd cmd;
96 struct pmu_payload payload;
97 struct nv_pmu_pmgr_cmd_set_object *pcmd;
98 u32 status;
99 u32 seqdesc;
100 struct pmgr_pmucmdhandler_params handlerparams;
101
102 memset(&payload, 0, sizeof(struct pmu_payload));
103 memset(&cmd, 0, sizeof(struct pmu_cmd));
104 memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params));
105
106 cmd.hdr.unit_id = PMU_UNIT_PMGR;
107 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_set_object) +
108 (u32)sizeof(struct pmu_hdr);;
109
110 pcmd = &cmd.cmd.pmgr.set_object;
111 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_SET_OBJECT;
112 pcmd->object_type = type;
113
114 payload.in.buf = pobj;
115 payload.in.size = dmem_size;
116 payload.in.fb_size = fb_size;
117 payload.in.offset = NV_PMU_PMGR_SET_OBJECT_ALLOC_OFFSET;
118
119 /* Setup the handler params to communicate back results.*/
120 handlerparams.success = 0;
121
122 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
123 PMU_COMMAND_QUEUE_LPQ,
124 pmgr_pmucmdhandler,
125 (void *)&handlerparams,
126 &seqdesc, ~0);
127 if (status) {
128 nvgpu_err(g,
129 "unable to post pmgr cmd for unit %x cmd id %x obj type %x",
130 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->object_type);
131 goto exit;
132 }
133
134 pmu_wait_message_cond(&g->pmu,
135 gk20a_get_gr_idle_timeout(g),
136 &handlerparams.success, 1);
137
138 if (handlerparams.success == 0) {
139 nvgpu_err(g, "could not process cmd");
140 status = -ETIMEDOUT;
141 goto exit;
142 }
143
144exit:
145 return status;
146}
147
148static u32 pmgr_send_i2c_device_topology_to_pmu(struct gk20a *g)
149{
150 struct nv_pmu_pmgr_i2c_device_desc_table i2c_desc_table;
151 struct gk20a_platform *platform = gk20a_get_platform(dev_from_gk20a(g));
152 u32 idx = platform->ina3221_dcb_index;
153 u32 status = 0;
154
155 /* INA3221 I2C device info */
156 i2c_desc_table.dev_mask = (1UL << idx);
157
158 /* INA3221 */
159 i2c_desc_table.devices[idx].super.type = 0x4E;
160
161 i2c_desc_table.devices[idx].dcb_index = idx;
162 i2c_desc_table.devices[idx].i2c_address = platform->ina3221_i2c_address;
163 i2c_desc_table.devices[idx].i2c_flags = 0xC2F;
164 i2c_desc_table.devices[idx].i2c_port = platform->ina3221_i2c_port;
165
166 /* Pass the table down the PMU as an object */
167 status = pmgr_pmu_set_object(
168 g,
169 NV_PMU_PMGR_OBJECT_I2C_DEVICE_DESC_TABLE,
170 (u16)sizeof(struct nv_pmu_pmgr_i2c_device_desc_table),
171 PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED,
172 &i2c_desc_table);
173
174 if (status)
175 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
176 status);
177
178 return status;
179}
180
181static u32 pmgr_send_pwr_device_topology_to_pmu(struct gk20a *g)
182{
183 struct nv_pmu_pmgr_pwr_device_desc_table pwr_desc_table;
184 struct nv_pmu_pmgr_pwr_device_desc_table_header *ppwr_desc_header;
185 u32 status = 0;
186
187 /* Set the BA-device-independent HW information */
188 ppwr_desc_header = &(pwr_desc_table.hdr.data);
189 ppwr_desc_header->ba_info.b_initialized_and_used = false;
190
191 /* populate the table */
192 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&ppwr_desc_header->super,
193 g->pmgr_pmu.pmgr_deviceobjs.super.super.objmask);
194
195 status = boardobjgrp_pmudatainit_legacy(g,
196 &g->pmgr_pmu.pmgr_deviceobjs.super.super,
197 (struct nv_pmu_boardobjgrp_super *)&pwr_desc_table);
198
199 if (status) {
200 nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x",
201 status);
202 goto exit;
203 }
204
205 /* Pass the table down the PMU as an object */
206 status = pmgr_pmu_set_object(
207 g,
208 NV_PMU_PMGR_OBJECT_PWR_DEVICE_DESC_TABLE,
209 (u16)sizeof(
210 union nv_pmu_pmgr_pwr_device_dmem_size),
211 (u16)sizeof(struct nv_pmu_pmgr_pwr_device_desc_table),
212 &pwr_desc_table);
213
214 if (status)
215 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
216 status);
217
218exit:
219 return status;
220}
221
222static u32 pmgr_send_pwr_mointer_to_pmu(struct gk20a *g)
223{
224 struct nv_pmu_pmgr_pwr_monitor_pack pwr_monitor_pack;
225 struct nv_pmu_pmgr_pwr_channel_header *pwr_channel_hdr;
226 struct nv_pmu_pmgr_pwr_chrelationship_header *pwr_chrelationship_header;
227 u32 max_dmem_size;
228 u32 status = 0;
229
230 /* Copy all the global settings from the RM copy */
231 pwr_channel_hdr = &(pwr_monitor_pack.channels.hdr.data);
232 pwr_monitor_pack = g->pmgr_pmu.pmgr_monitorobjs.pmu_data;
233
234 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_channel_hdr->super,
235 g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask);
236
237 /* Copy in each channel */
238 status = boardobjgrp_pmudatainit_legacy(g,
239 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super,
240 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack.channels));
241
242 if (status) {
243 nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x",
244 status);
245 goto exit;
246 }
247
248 /* Copy in each channel relationship */
249 pwr_chrelationship_header = &(pwr_monitor_pack.ch_rels.hdr.data);
250
251 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_chrelationship_header->super,
252 g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super.objmask);
253
254 pwr_channel_hdr->physical_channel_mask = g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask;
255 pwr_channel_hdr->type = NV_PMU_PMGR_PWR_MONITOR_TYPE_NO_POLLING;
256
257 status = boardobjgrp_pmudatainit_legacy(g,
258 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super,
259 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack.ch_rels));
260
261 if (status) {
262 nvgpu_err(g, "boardobjgrp_pmudatainit_legacy failed %x",
263 status);
264 goto exit;
265 }
266
267 /* Calculate the max Dmem buffer size */
268 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_monitor_dmem_size);
269
270 /* Pass the table down the PMU as an object */
271 status = pmgr_pmu_set_object(
272 g,
273 NV_PMU_PMGR_OBJECT_PWR_MONITOR,
274 (u16)max_dmem_size,
275 (u16)sizeof(struct nv_pmu_pmgr_pwr_monitor_pack),
276 &pwr_monitor_pack);
277
278 if (status)
279 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
280 status);
281
282exit:
283 return status;
284}
285
286static u32 pmgr_send_pwr_policy_to_pmu(struct gk20a *g)
287{
288 struct nv_pmu_pmgr_pwr_policy_pack *ppwrpack = NULL;
289 struct pwr_policy *ppolicy = NULL;
290 u32 status = 0;
291 u8 indx;
292 u32 max_dmem_size;
293
294 ppwrpack = nvgpu_kzalloc(g, sizeof(struct nv_pmu_pmgr_pwr_policy_pack));
295 if (!ppwrpack) {
296 nvgpu_err(g, "pwr policy alloc failed %x",
297 status);
298 status = -ENOMEM;
299 goto exit;
300 }
301
302 ppwrpack->policies.hdr.data.version = g->pmgr_pmu.pmgr_policyobjs.version;
303 ppwrpack->policies.hdr.data.b_enabled = g->pmgr_pmu.pmgr_policyobjs.b_enabled;
304
305 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
306 &ppwrpack->policies.hdr.data.super,
307 g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super.objmask);
308
309 memset(&ppwrpack->policies.hdr.data.reserved_pmu_policy_mask,
310 0,
311 sizeof(ppwrpack->policies.hdr.data.reserved_pmu_policy_mask));
312
313 ppwrpack->policies.hdr.data.base_sample_period =
314 g->pmgr_pmu.pmgr_policyobjs.base_sample_period;
315 ppwrpack->policies.hdr.data.min_client_sample_period =
316 g->pmgr_pmu.pmgr_policyobjs.min_client_sample_period;
317 ppwrpack->policies.hdr.data.low_sampling_mult =
318 g->pmgr_pmu.pmgr_policyobjs.low_sampling_mult;
319
320 memcpy(&ppwrpack->policies.hdr.data.global_ceiling,
321 &g->pmgr_pmu.pmgr_policyobjs.global_ceiling,
322 sizeof(struct nv_pmu_perf_domain_group_limits));
323
324 memcpy(&ppwrpack->policies.hdr.data.semantic_policy_tbl,
325 &g->pmgr_pmu.pmgr_policyobjs.policy_idxs,
326 sizeof(g->pmgr_pmu.pmgr_policyobjs.policy_idxs));
327
328 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx,
329 ppwrpack->policies.hdr.data.super.obj_mask.super.data[0]) {
330 ppolicy = PMGR_GET_PWR_POLICY(g, indx);
331
332 status = ((struct boardobj *)ppolicy)->pmudatainit(g, (struct boardobj *)ppolicy,
333 (struct nv_pmu_boardobj *)&(ppwrpack->policies.policies[indx].data));
334 if (status) {
335 nvgpu_err(g, "pmudatainit failed %x indx %x",
336 status, indx);
337 status = -ENOMEM;
338 goto exit;
339 }
340 }
341 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END;
342
343 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
344 &ppwrpack->policy_rels.hdr.data.super,
345 g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super.objmask);
346
347 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
348 &ppwrpack->violations.hdr.data.super,
349 g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super.objmask);
350
351 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_policy_dmem_size);
352
353 /* Pass the table down the PMU as an object */
354 status = pmgr_pmu_set_object(
355 g,
356 NV_PMU_PMGR_OBJECT_PWR_POLICY,
357 (u16)max_dmem_size,
358 (u16)sizeof(struct nv_pmu_pmgr_pwr_policy_pack),
359 ppwrpack);
360
361 if (status)
362 nvgpu_err(g, "pmgr_pmu_set_object failed %x",
363 status);
364
365exit:
366 if (ppwrpack) {
367 nvgpu_kfree(g, ppwrpack);
368 }
369
370 return status;
371}
372
373u32 pmgr_pmu_pwr_devices_query_blocking(
374 struct gk20a *g,
375 u32 pwr_dev_mask,
376 struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload)
377{
378 struct pmu_cmd cmd;
379 struct pmu_payload payload;
380 struct nv_pmu_pmgr_cmd_pwr_devices_query *pcmd;
381 u32 status;
382 u32 seqdesc;
383 struct pmgr_pmucmdhandler_params handlerparams;
384
385 memset(&payload, 0, sizeof(struct pmu_payload));
386 memset(&cmd, 0, sizeof(struct pmu_cmd));
387 memset(&handlerparams, 0, sizeof(struct pmgr_pmucmdhandler_params));
388
389 cmd.hdr.unit_id = PMU_UNIT_PMGR;
390 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_pwr_devices_query) +
391 (u32)sizeof(struct pmu_hdr);
392
393 pcmd = &cmd.cmd.pmgr.pwr_dev_query;
394 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_PWR_DEVICES_QUERY;
395 pcmd->dev_mask = pwr_dev_mask;
396
397 payload.out.buf = ppayload;
398 payload.out.size = sizeof(struct nv_pmu_pmgr_pwr_devices_query_payload);
399 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
400 payload.out.offset = NV_PMU_PMGR_PWR_DEVICES_QUERY_ALLOC_OFFSET;
401
402 /* Setup the handler params to communicate back results.*/
403 handlerparams.success = 0;
404
405 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
406 PMU_COMMAND_QUEUE_LPQ,
407 pmgr_pmucmdhandler,
408 (void *)&handlerparams,
409 &seqdesc, ~0);
410 if (status) {
411 nvgpu_err(g,
412 "unable to post pmgr query cmd for unit %x cmd id %x dev mask %x",
413 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->dev_mask);
414 goto exit;
415 }
416
417 pmu_wait_message_cond(&g->pmu,
418 gk20a_get_gr_idle_timeout(g),
419 &handlerparams.success, 1);
420
421 if (handlerparams.success == 0) {
422 nvgpu_err(g, "could not process cmd");
423 status = -ETIMEDOUT;
424 goto exit;
425 }
426
427exit:
428 return status;
429}
430
431static u32 pmgr_pmu_load_blocking(struct gk20a *g)
432{
433 struct pmu_cmd cmd = { {0} };
434 struct nv_pmu_pmgr_cmd_load *pcmd;
435 u32 status;
436 u32 seqdesc;
437 struct pmgr_pmucmdhandler_params handlerparams = {0};
438
439 cmd.hdr.unit_id = PMU_UNIT_PMGR;
440 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_load) +
441 (u32)sizeof(struct pmu_hdr);
442
443 pcmd = &cmd.cmd.pmgr.load;
444 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_LOAD;
445
446 /* Setup the handler params to communicate back results.*/
447 handlerparams.success = 0;
448
449 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
450 PMU_COMMAND_QUEUE_LPQ,
451 pmgr_pmucmdhandler,
452 (void *)&handlerparams,
453 &seqdesc, ~0);
454 if (status) {
455 nvgpu_err(g,
456 "unable to post pmgr load cmd for unit %x cmd id %x",
457 cmd.hdr.unit_id, pcmd->cmd_type);
458 goto exit;
459 }
460
461 pmu_wait_message_cond(&g->pmu,
462 gk20a_get_gr_idle_timeout(g),
463 &handlerparams.success, 1);
464
465 if (handlerparams.success == 0) {
466 nvgpu_err(g, "could not process cmd");
467 status = -ETIMEDOUT;
468 goto exit;
469 }
470
471exit:
472 return status;
473}
474
475u32 pmgr_send_pmgr_tables_to_pmu(struct gk20a *g)
476{
477 u32 status = 0;
478
479 status = pmgr_send_i2c_device_topology_to_pmu(g);
480
481 if (status) {
482 nvgpu_err(g,
483 "pmgr_send_i2c_device_topology_to_pmu failed %x",
484 status);
485 goto exit;
486 }
487
488 if (!BOARDOBJGRP_IS_EMPTY(&g->pmgr_pmu.pmgr_deviceobjs.super.super)) {
489 status = pmgr_send_pwr_device_topology_to_pmu(g);
490 if (status) {
491 nvgpu_err(g,
492 "pmgr_send_pwr_device_topology_to_pmu failed %x",
493 status);
494 goto exit;
495 }
496 }
497
498 if (!(BOARDOBJGRP_IS_EMPTY(
499 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super)) ||
500 !(BOARDOBJGRP_IS_EMPTY(
501 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super))) {
502 status = pmgr_send_pwr_mointer_to_pmu(g);
503 if (status) {
504 nvgpu_err(g,
505 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
506 goto exit;
507 }
508 }
509
510 if (!(BOARDOBJGRP_IS_EMPTY(
511 &g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super)) ||
512 !(BOARDOBJGRP_IS_EMPTY(
513 &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super)) ||
514 !(BOARDOBJGRP_IS_EMPTY(
515 &g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super))) {
516 status = pmgr_send_pwr_policy_to_pmu(g);
517 if (status) {
518 nvgpu_err(g,
519 "pmgr_send_pwr_policy_to_pmu failed %x", status);
520 goto exit;
521 }
522 }
523
524 status = pmgr_pmu_load_blocking(g);
525 if (status) {
526 nvgpu_err(g,
527 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
528 goto exit;
529 }
530
531exit:
532 return status;
533}
diff --git a/drivers/gpu/nvgpu/pmgr/pmgrpmu.h b/drivers/gpu/nvgpu/pmgr/pmgrpmu.h
new file mode 100644
index 00000000..3cb9eecb
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgrpmu.h
@@ -0,0 +1,38 @@
1/*
2 * general power device control structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _PMGRPMU_H_
25#define _PMGRPMU_H_
26
27#include "gk20a/gk20a.h"
28#include "pwrdev.h"
29#include "pwrmonitor.h"
30
31u32 pmgr_send_pmgr_tables_to_pmu(struct gk20a *g);
32
33u32 pmgr_pmu_pwr_devices_query_blocking(
34 struct gk20a *g,
35 u32 pwr_dev_mask,
36 struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload);
37
38#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pwrdev.c b/drivers/gpu/nvgpu/pmgr/pwrdev.c
new file mode 100644
index 00000000..7f4ab716
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrdev.c
@@ -0,0 +1,315 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/bios.h>
24
25#include "gk20a/gk20a.h"
26#include "pwrdev.h"
27#include "boardobj/boardobjgrp.h"
28#include "boardobj/boardobjgrp_e32.h"
29#include "gp106/bios_gp106.h"
30
31static u32 _pwr_device_pmudata_instget(struct gk20a *g,
32 struct nv_pmu_boardobjgrp *pmuboardobjgrp,
33 struct nv_pmu_boardobj **ppboardobjpmudata,
34 u8 idx)
35{
36 struct nv_pmu_pmgr_pwr_device_desc_table *ppmgrdevice =
37 (struct nv_pmu_pmgr_pwr_device_desc_table *)pmuboardobjgrp;
38
39 gk20a_dbg_info("");
40
41 /*check whether pmuboardobjgrp has a valid boardobj in index*/
42 if (((u32)BIT(idx) &
43 ppmgrdevice->hdr.data.super.obj_mask.super.data[0]) == 0)
44 return -EINVAL;
45
46 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
47 &ppmgrdevice->devices[idx].data.board_obj;
48
49 gk20a_dbg_info(" Done");
50
51 return 0;
52}
53
54static u32 _pwr_domains_pmudatainit_ina3221(struct gk20a *g,
55 struct boardobj *board_obj_ptr,
56 struct nv_pmu_boardobj *ppmudata)
57{
58 struct nv_pmu_pmgr_pwr_device_desc_ina3221 *ina3221_desc;
59 struct pwr_device_ina3221 *ina3221;
60 u32 status = 0;
61 u32 indx;
62
63 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
64 if (status) {
65 nvgpu_err(g,
66 "error updating pmu boardobjgrp for pwr domain 0x%x",
67 status);
68 goto done;
69 }
70
71 ina3221 = (struct pwr_device_ina3221 *)board_obj_ptr;
72 ina3221_desc = (struct nv_pmu_pmgr_pwr_device_desc_ina3221 *) ppmudata;
73
74 ina3221_desc->super.power_corr_factor = ina3221->super.power_corr_factor;
75 ina3221_desc->i2c_dev_idx = ina3221->super.i2c_dev_idx;
76 ina3221_desc->configuration = ina3221->configuration;
77 ina3221_desc->mask_enable = ina3221->mask_enable;
78 /* configure NV_PMU_THERM_EVENT_EXT_OVERT */
79 ina3221_desc->event_mask = (1 << 0);
80 ina3221_desc->curr_correct_m = ina3221->curr_correct_m;
81 ina3221_desc->curr_correct_b = ina3221->curr_correct_b;
82
83 for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) {
84 ina3221_desc->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx];
85 }
86
87done:
88 return status;
89}
90
91static struct boardobj *construct_pwr_device(struct gk20a *g,
92 void *pargs, u16 pargs_size, u8 type)
93{
94 struct boardobj *board_obj_ptr = NULL;
95 u32 status;
96 u32 indx;
97 struct pwr_device_ina3221 *pwrdev;
98 struct pwr_device_ina3221 *ina3221 = (struct pwr_device_ina3221*)pargs;
99
100 status = boardobj_construct_super(g, &board_obj_ptr,
101 pargs_size, pargs);
102 if (status)
103 return NULL;
104
105 pwrdev = (struct pwr_device_ina3221*)board_obj_ptr;
106
107 /* Set Super class interfaces */
108 board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_ina3221;
109 pwrdev->super.power_rail = ina3221->super.power_rail;
110 pwrdev->super.i2c_dev_idx = ina3221->super.i2c_dev_idx;
111 pwrdev->super.power_corr_factor = (1 << 12);
112 pwrdev->super.bIs_inforom_config = false;
113
114 /* Set INA3221-specific information */
115 pwrdev->configuration = ina3221->configuration;
116 pwrdev->mask_enable = ina3221->mask_enable;
117 pwrdev->gpio_function = ina3221->gpio_function;
118 pwrdev->curr_correct_m = ina3221->curr_correct_m;
119 pwrdev->curr_correct_b = ina3221->curr_correct_b;
120
121 for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) {
122 pwrdev->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx];
123 }
124
125 gk20a_dbg_info(" Done");
126
127 return board_obj_ptr;
128}
129
130static u32 devinit_get_pwr_device_table(struct gk20a *g,
131 struct pwr_devices *ppwrdeviceobjs)
132{
133 u32 status = 0;
134 u8 *pwr_device_table_ptr = NULL;
135 u8 *curr_pwr_device_table_ptr = NULL;
136 struct boardobj *boardobj;
137 struct pwr_sensors_2x_header pwr_sensor_table_header = { 0 };
138 struct pwr_sensors_2x_entry pwr_sensor_table_entry = { 0 };
139 u32 index;
140 u32 obj_index = 0;
141 u16 pwr_device_size;
142 union {
143 struct boardobj boardobj;
144 struct pwr_device pwrdev;
145 struct pwr_device_ina3221 ina3221;
146 } pwr_device_data;
147
148 gk20a_dbg_info("");
149
150 pwr_device_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
151 g->bios.perf_token, POWER_SENSORS_TABLE);
152 if (pwr_device_table_ptr == NULL) {
153 status = -EINVAL;
154 goto done;
155 }
156
157 memcpy(&pwr_sensor_table_header, pwr_device_table_ptr,
158 VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08);
159
160 if (pwr_sensor_table_header.version !=
161 VBIOS_POWER_SENSORS_VERSION_2X) {
162 status = -EINVAL;
163 goto done;
164 }
165
166 if (pwr_sensor_table_header.header_size <
167 VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08) {
168 status = -EINVAL;
169 goto done;
170 }
171
172 if (pwr_sensor_table_header.table_entry_size !=
173 VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15) {
174 status = -EINVAL;
175 goto done;
176 }
177
178 curr_pwr_device_table_ptr = (pwr_device_table_ptr +
179 VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08);
180
181 for (index = 0; index < pwr_sensor_table_header.num_table_entries; index++) {
182 bool use_fxp8_8 = false;
183 u8 i2c_dev_idx;
184 u8 device_type;
185
186 curr_pwr_device_table_ptr += (pwr_sensor_table_header.table_entry_size * index);
187
188 pwr_sensor_table_entry.flags0 = *curr_pwr_device_table_ptr;
189
190 memcpy(&pwr_sensor_table_entry.class_param0,
191 (curr_pwr_device_table_ptr + 1),
192 (VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15 - 1));
193
194 device_type = (u8)BIOS_GET_FIELD(
195 pwr_sensor_table_entry.flags0,
196 NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS);
197
198 if (device_type == NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_I2C) {
199 i2c_dev_idx = (u8)BIOS_GET_FIELD(
200 pwr_sensor_table_entry.class_param0,
201 NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_INDEX);
202 use_fxp8_8 = (u8)BIOS_GET_FIELD(
203 pwr_sensor_table_entry.class_param0,
204 NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_USE_FXP8_8);
205
206 pwr_device_data.ina3221.super.i2c_dev_idx = i2c_dev_idx;
207 pwr_device_data.ina3221.r_shuntm_ohm[0].use_fxp8_8 = use_fxp8_8;
208 pwr_device_data.ina3221.r_shuntm_ohm[1].use_fxp8_8 = use_fxp8_8;
209 pwr_device_data.ina3221.r_shuntm_ohm[2].use_fxp8_8 = use_fxp8_8;
210 pwr_device_data.ina3221.r_shuntm_ohm[0].rshunt_value =
211 (u16)BIOS_GET_FIELD(
212 pwr_sensor_table_entry.sensor_param0,
213 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT0_MOHM);
214
215 pwr_device_data.ina3221.r_shuntm_ohm[1].rshunt_value =
216 (u16)BIOS_GET_FIELD(
217 pwr_sensor_table_entry.sensor_param0,
218 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT1_MOHM);
219
220 pwr_device_data.ina3221.r_shuntm_ohm[2].rshunt_value =
221 (u16)BIOS_GET_FIELD(
222 pwr_sensor_table_entry.sensor_param1,
223 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_RSHUNT2_MOHM);
224 pwr_device_data.ina3221.configuration =
225 (u16)BIOS_GET_FIELD(
226 pwr_sensor_table_entry.sensor_param1,
227 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_CONFIGURATION);
228
229 pwr_device_data.ina3221.mask_enable =
230 (u16)BIOS_GET_FIELD(
231 pwr_sensor_table_entry.sensor_param2,
232 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_MASKENABLE);
233
234 pwr_device_data.ina3221.gpio_function =
235 (u8)BIOS_GET_FIELD(
236 pwr_sensor_table_entry.sensor_param2,
237 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_GPIOFUNCTION);
238
239 pwr_device_data.ina3221.curr_correct_m =
240 (u16)BIOS_GET_FIELD(
241 pwr_sensor_table_entry.sensor_param3,
242 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_M);
243
244 pwr_device_data.ina3221.curr_correct_b =
245 (u16)BIOS_GET_FIELD(
246 pwr_sensor_table_entry.sensor_param3,
247 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_B);
248
249 if (!pwr_device_data.ina3221.curr_correct_m) {
250 pwr_device_data.ina3221.curr_correct_m = (1 << 12);
251 }
252 pwr_device_size = sizeof(struct pwr_device_ina3221);
253 } else
254 continue;
255
256 pwr_device_data.boardobj.type = CTRL_PMGR_PWR_DEVICE_TYPE_INA3221;
257 pwr_device_data.pwrdev.power_rail = (u8)0;
258
259 boardobj = construct_pwr_device(g, &pwr_device_data,
260 pwr_device_size, pwr_device_data.boardobj.type);
261
262 if (!boardobj) {
263 nvgpu_err(g,
264 "unable to create pwr device for %d type %d", index, pwr_device_data.boardobj.type);
265 status = -EINVAL;
266 goto done;
267 }
268
269 status = boardobjgrp_objinsert(&ppwrdeviceobjs->super.super,
270 boardobj, obj_index);
271
272 if (status) {
273 nvgpu_err(g,
274 "unable to insert pwr device boardobj for %d", index);
275 status = -EINVAL;
276 goto done;
277 }
278
279 ++obj_index;
280 }
281
282done:
283 gk20a_dbg_info(" done status %x", status);
284 return status;
285}
286
287u32 pmgr_device_sw_setup(struct gk20a *g)
288{
289 u32 status;
290 struct boardobjgrp *pboardobjgrp = NULL;
291 struct pwr_devices *ppwrdeviceobjs;
292
293 /* Construct the Super Class and override the Interfaces */
294 status = boardobjgrpconstruct_e32(g, &g->pmgr_pmu.pmgr_deviceobjs.super);
295 if (status) {
296 nvgpu_err(g,
297 "error creating boardobjgrp for pmgr devices, status - 0x%x",
298 status);
299 goto done;
300 }
301
302 pboardobjgrp = &g->pmgr_pmu.pmgr_deviceobjs.super.super;
303 ppwrdeviceobjs = &(g->pmgr_pmu.pmgr_deviceobjs);
304
305 /* Override the Interfaces */
306 pboardobjgrp->pmudatainstget = _pwr_device_pmudata_instget;
307
308 status = devinit_get_pwr_device_table(g, ppwrdeviceobjs);
309 if (status)
310 goto done;
311
312done:
313 gk20a_dbg_info(" done status %x", status);
314 return status;
315}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrdev.h b/drivers/gpu/nvgpu/pmgr/pwrdev.h
new file mode 100644
index 00000000..1d9acb89
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrdev.h
@@ -0,0 +1,60 @@
1/*
2 * general power device structures & definitions
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _PWRDEV_H_
25#define _PWRDEV_H_
26
27#include "boardobj/boardobj.h"
28#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
29#include "ctrl/ctrlpmgr.h"
30
31#define PWRDEV_I2CDEV_DEVICE_INDEX_NONE (0xFF)
32
33#define PWR_DEVICE_PROV_NUM_DEFAULT 1
34
35struct pwr_device {
36 struct boardobj super;
37 u8 power_rail;
38 u8 i2c_dev_idx;
39 bool bIs_inforom_config;
40 u32 power_corr_factor;
41};
42
43struct pwr_devices {
44 struct boardobjgrp_e32 super;
45};
46
47struct pwr_device_ina3221 {
48 struct pwr_device super;
49 struct ctrl_pmgr_pwr_device_info_rshunt
50 r_shuntm_ohm[NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM];
51 u16 configuration;
52 u16 mask_enable;
53 u8 gpio_function;
54 u16 curr_correct_m;
55 s16 curr_correct_b;
56} ;
57
58u32 pmgr_device_sw_setup(struct gk20a *g);
59
60#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pwrmonitor.c b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c
new file mode 100644
index 00000000..00c930a6
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c
@@ -0,0 +1,370 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/bios.h>
24
25#include "gk20a/gk20a.h"
26#include "pwrdev.h"
27#include "boardobj/boardobjgrp.h"
28#include "boardobj/boardobjgrp_e32.h"
29#include "gp106/bios_gp106.h"
30
31static u32 _pwr_channel_pmudata_instget(struct gk20a *g,
32 struct nv_pmu_boardobjgrp *pmuboardobjgrp,
33 struct nv_pmu_boardobj **ppboardobjpmudata,
34 u8 idx)
35{
36 struct nv_pmu_pmgr_pwr_channel_desc *ppmgrchannel =
37 (struct nv_pmu_pmgr_pwr_channel_desc *)pmuboardobjgrp;
38
39 gk20a_dbg_info("");
40
41 /*check whether pmuboardobjgrp has a valid boardobj in index*/
42 if (((u32)BIT(idx) &
43 ppmgrchannel->hdr.data.super.obj_mask.super.data[0]) == 0)
44 return -EINVAL;
45
46 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
47 &ppmgrchannel->channels[idx].data.board_obj;
48
49 /* handle Global/common data here as we need index */
50 ppmgrchannel->channels[idx].data.pwr_channel.ch_idx = idx;
51
52 gk20a_dbg_info(" Done");
53
54 return 0;
55}
56
57static u32 _pwr_channel_rels_pmudata_instget(struct gk20a *g,
58 struct nv_pmu_boardobjgrp *pmuboardobjgrp,
59 struct nv_pmu_boardobj **ppboardobjpmudata,
60 u8 idx)
61{
62 struct nv_pmu_pmgr_pwr_chrelationship_desc *ppmgrchrels =
63 (struct nv_pmu_pmgr_pwr_chrelationship_desc *)pmuboardobjgrp;
64
65 gk20a_dbg_info("");
66
67 /*check whether pmuboardobjgrp has a valid boardobj in index*/
68 if (((u32)BIT(idx) &
69 ppmgrchrels->hdr.data.super.obj_mask.super.data[0]) == 0)
70 return -EINVAL;
71
72 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
73 &ppmgrchrels->ch_rels[idx].data.board_obj;
74
75 gk20a_dbg_info(" Done");
76
77 return 0;
78}
79
80static u32 _pwr_channel_state_init(struct gk20a *g)
81{
82 u8 indx = 0;
83 struct pwr_channel *pchannel;
84 u32 objmask =
85 g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask;
86
87 /* Initialize each PWR_CHANNEL's dependent channel mask */
88 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx, objmask) {
89 pchannel = PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, indx);
90 if (pchannel == NULL) {
91 nvgpu_err(g,
92 "PMGR_PWR_MONITOR_GET_PWR_CHANNEL-failed %d", indx);
93 return -EINVAL;
94 }
95 pchannel->dependent_ch_mask =0;
96 }
97 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END
98
99 return 0;
100}
101
102static bool _pwr_channel_implements(struct pwr_channel *pchannel,
103 u8 type)
104{
105 return (type == BOARDOBJ_GET_TYPE(pchannel));
106}
107
108static u32 _pwr_domains_pmudatainit_sensor(struct gk20a *g,
109 struct boardobj *board_obj_ptr,
110 struct nv_pmu_boardobj *ppmudata)
111{
112 struct nv_pmu_pmgr_pwr_channel_sensor *pmu_sensor_data;
113 struct pwr_channel_sensor *sensor;
114 u32 status = 0;
115
116 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
117 if (status) {
118 nvgpu_err(g,
119 "error updating pmu boardobjgrp for pwr sensor 0x%x",
120 status);
121 goto done;
122 }
123
124 sensor = (struct pwr_channel_sensor *)board_obj_ptr;
125 pmu_sensor_data = (struct nv_pmu_pmgr_pwr_channel_sensor *) ppmudata;
126
127 pmu_sensor_data->super.pwr_rail = sensor->super.pwr_rail;
128 pmu_sensor_data->super.volt_fixedu_v = sensor->super.volt_fixed_uv;
129 pmu_sensor_data->super.pwr_corr_slope = sensor->super.pwr_corr_slope;
130 pmu_sensor_data->super.pwr_corr_offsetm_w = sensor->super.pwr_corr_offset_mw;
131 pmu_sensor_data->super.curr_corr_slope = sensor->super.curr_corr_slope;
132 pmu_sensor_data->super.curr_corr_offsetm_a = sensor->super.curr_corr_offset_ma;
133 pmu_sensor_data->super.dependent_ch_mask = sensor->super.dependent_ch_mask;
134 pmu_sensor_data->super.ch_idx = 0;
135
136 pmu_sensor_data->pwr_dev_idx = sensor->pwr_dev_idx;
137 pmu_sensor_data->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx;
138
139done:
140 return status;
141}
142
143static struct boardobj *construct_pwr_topology(struct gk20a *g,
144 void *pargs, u16 pargs_size, u8 type)
145{
146 struct boardobj *board_obj_ptr = NULL;
147 u32 status;
148 struct pwr_channel_sensor *pwrchannel;
149 struct pwr_channel_sensor *sensor = (struct pwr_channel_sensor*)pargs;
150
151 status = boardobj_construct_super(g, &board_obj_ptr,
152 pargs_size, pargs);
153 if (status)
154 return NULL;
155
156 pwrchannel = (struct pwr_channel_sensor*)board_obj_ptr;
157
158 /* Set Super class interfaces */
159 board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_sensor;
160
161 pwrchannel->super.pwr_rail = sensor->super.pwr_rail;
162 pwrchannel->super.volt_fixed_uv = sensor->super.volt_fixed_uv;
163 pwrchannel->super.pwr_corr_slope = sensor->super.pwr_corr_slope;
164 pwrchannel->super.pwr_corr_offset_mw = sensor->super.pwr_corr_offset_mw;
165 pwrchannel->super.curr_corr_slope = sensor->super.curr_corr_slope;
166 pwrchannel->super.curr_corr_offset_ma = sensor->super.curr_corr_offset_ma;
167 pwrchannel->super.dependent_ch_mask = 0;
168
169 pwrchannel->pwr_dev_idx = sensor->pwr_dev_idx;
170 pwrchannel->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx;
171
172 gk20a_dbg_info(" Done");
173
174 return board_obj_ptr;
175}
176
177static u32 devinit_get_pwr_topology_table(struct gk20a *g,
178 struct pmgr_pwr_monitor *ppwrmonitorobjs)
179{
180 u32 status = 0;
181 u8 *pwr_topology_table_ptr = NULL;
182 u8 *curr_pwr_topology_table_ptr = NULL;
183 struct boardobj *boardobj;
184 struct pwr_topology_2x_header pwr_topology_table_header = { 0 };
185 struct pwr_topology_2x_entry pwr_topology_table_entry = { 0 };
186 u32 index;
187 u32 obj_index = 0;
188 u16 pwr_topology_size;
189 union {
190 struct boardobj boardobj;
191 struct pwr_channel pwrchannel;
192 struct pwr_channel_sensor sensor;
193 } pwr_topology_data;
194
195 gk20a_dbg_info("");
196
197 pwr_topology_table_ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
198 g->bios.perf_token, POWER_TOPOLOGY_TABLE);
199 if (pwr_topology_table_ptr == NULL) {
200 status = -EINVAL;
201 goto done;
202 }
203
204 memcpy(&pwr_topology_table_header, pwr_topology_table_ptr,
205 VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06);
206
207 if (pwr_topology_table_header.version !=
208 VBIOS_POWER_TOPOLOGY_VERSION_2X) {
209 status = -EINVAL;
210 goto done;
211 }
212
213 g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = false;
214
215 if (pwr_topology_table_header.header_size <
216 VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06) {
217 status = -EINVAL;
218 goto done;
219 }
220
221 if (pwr_topology_table_header.table_entry_size !=
222 VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16) {
223 status = -EINVAL;
224 goto done;
225 }
226
227 curr_pwr_topology_table_ptr = (pwr_topology_table_ptr +
228 VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06);
229
230 for (index = 0; index < pwr_topology_table_header.num_table_entries;
231 index++) {
232 u8 class_type;
233
234 curr_pwr_topology_table_ptr += (pwr_topology_table_header.table_entry_size * index);
235
236 pwr_topology_table_entry.flags0 = *curr_pwr_topology_table_ptr;
237 pwr_topology_table_entry.pwr_rail = *(curr_pwr_topology_table_ptr + 1);
238
239 memcpy(&pwr_topology_table_entry.param0,
240 (curr_pwr_topology_table_ptr + 2),
241 (VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16 - 2));
242
243 class_type = (u8)BIOS_GET_FIELD(
244 pwr_topology_table_entry.flags0,
245 NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS);
246
247 if (class_type == NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_SENSOR) {
248 pwr_topology_data.sensor.pwr_dev_idx = (u8)BIOS_GET_FIELD(
249 pwr_topology_table_entry.param1,
250 NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_INDEX);
251 pwr_topology_data.sensor.pwr_dev_prov_idx = (u8)BIOS_GET_FIELD(
252 pwr_topology_table_entry.param1,
253 NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_PROVIDER_INDEX);
254
255 pwr_topology_size = sizeof(struct pwr_channel_sensor);
256 } else
257 continue;
258
259 /* Initialize data for the parent class */
260 pwr_topology_data.boardobj.type = CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR;
261 pwr_topology_data.pwrchannel.pwr_rail = (u8)pwr_topology_table_entry.pwr_rail;
262 pwr_topology_data.pwrchannel.volt_fixed_uv = pwr_topology_table_entry.param0;
263 pwr_topology_data.pwrchannel.pwr_corr_slope = (1 << 12);
264 pwr_topology_data.pwrchannel.pwr_corr_offset_mw = 0;
265 pwr_topology_data.pwrchannel.curr_corr_slope =
266 (u32)pwr_topology_table_entry.curr_corr_slope;
267 pwr_topology_data.pwrchannel.curr_corr_offset_ma =
268 (s32)pwr_topology_table_entry.curr_corr_offset;
269
270 boardobj = construct_pwr_topology(g, &pwr_topology_data,
271 pwr_topology_size, pwr_topology_data.boardobj.type);
272
273 if (!boardobj) {
274 nvgpu_err(g,
275 "unable to create pwr topology for %d type %d",
276 index, pwr_topology_data.boardobj.type);
277 status = -EINVAL;
278 goto done;
279 }
280
281 status = boardobjgrp_objinsert(&ppwrmonitorobjs->pwr_channels.super,
282 boardobj, obj_index);
283
284 if (status) {
285 nvgpu_err(g,
286 "unable to insert pwr topology boardobj for %d", index);
287 status = -EINVAL;
288 goto done;
289 }
290
291 ++obj_index;
292 }
293
294done:
295 gk20a_dbg_info(" done status %x", status);
296 return status;
297}
298
299u32 pmgr_monitor_sw_setup(struct gk20a *g)
300{
301 u32 status;
302 struct boardobjgrp *pboardobjgrp = NULL;
303 struct pwr_channel *pchannel;
304 struct pmgr_pwr_monitor *ppwrmonitorobjs;
305 u8 indx = 0;
306
307 /* Construct the Super Class and override the Interfaces */
308 status = boardobjgrpconstruct_e32(g,
309 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels);
310 if (status) {
311 nvgpu_err(g,
312 "error creating boardobjgrp for pmgr channel, status - 0x%x",
313 status);
314 goto done;
315 }
316
317 pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super);
318
319 /* Override the Interfaces */
320 pboardobjgrp->pmudatainstget = _pwr_channel_pmudata_instget;
321
322 /* Construct the Super Class and override the Interfaces */
323 status = boardobjgrpconstruct_e32(g,
324 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels);
325 if (status) {
326 nvgpu_err(g,
327 "error creating boardobjgrp for pmgr channel relationship, status - 0x%x",
328 status);
329 goto done;
330 }
331
332 pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super);
333
334 /* Override the Interfaces */
335 pboardobjgrp->pmudatainstget = _pwr_channel_rels_pmudata_instget;
336
337 /* Initialize the Total GPU Power Channel Mask to 0 */
338 g->pmgr_pmu.pmgr_monitorobjs.pmu_data.channels.hdr.data.total_gpu_power_channel_mask = 0;
339 g->pmgr_pmu.pmgr_monitorobjs.total_gpu_channel_idx =
340 CTRL_PMGR_PWR_CHANNEL_INDEX_INVALID;
341
342 /* Supported topology table version 1.0 */
343 g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = true;
344
345 ppwrmonitorobjs = &(g->pmgr_pmu.pmgr_monitorobjs);
346
347 status = devinit_get_pwr_topology_table(g, ppwrmonitorobjs);
348 if (status)
349 goto done;
350
351 status = _pwr_channel_state_init(g);
352 if (status)
353 goto done;
354
355 /* Initialise physicalChannelMask */
356 g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask = 0;
357
358 pboardobjgrp = &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super;
359
360 BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_channel *, pchannel, indx) {
361 if (_pwr_channel_implements(pchannel,
362 CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR)) {
363 g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask |= BIT(indx);
364 }
365 }
366
367done:
368 gk20a_dbg_info(" done status %x", status);
369 return status;
370}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrmonitor.h b/drivers/gpu/nvgpu/pmgr/pwrmonitor.h
new file mode 100644
index 00000000..4f094c3e
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrmonitor.h
@@ -0,0 +1,69 @@
1/*
2 * general power channel structures & definitions
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _PWRMONITOR_H_
25#define _PWRMONITOR_H_
26
27#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
28#include "boardobj/boardobjgrp.h"
29#include "boardobj/boardobj.h"
30#include "ctrl/ctrlpmgr.h"
31
32struct pwr_channel {
33 struct boardobj super;
34 u8 pwr_rail;
35 u32 volt_fixed_uv;
36 u32 pwr_corr_slope;
37 s32 pwr_corr_offset_mw;
38 u32 curr_corr_slope;
39 s32 curr_corr_offset_ma;
40 u32 dependent_ch_mask;
41};
42
43struct pwr_chrelationship {
44 struct boardobj super;
45 u8 chIdx;
46};
47
48struct pwr_channel_sensor {
49 struct pwr_channel super;
50 u8 pwr_dev_idx;
51 u8 pwr_dev_prov_idx;
52};
53
54struct pmgr_pwr_monitor {
55 bool b_is_topology_tbl_ver_1x;
56 struct boardobjgrp_e32 pwr_channels;
57 struct boardobjgrp_e32 pwr_ch_rels;
58 u8 total_gpu_channel_idx;
59 u32 physical_channel_mask;
60 struct nv_pmu_pmgr_pwr_monitor_pack pmu_data;
61};
62
63#define PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, channel_idx) \
64 ((struct pwr_channel *)BOARDOBJGRP_OBJ_GET_BY_IDX( \
65 &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super), (channel_idx)))
66
67u32 pmgr_monitor_sw_setup(struct gk20a *g);
68
69#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pwrpolicy.c b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c
new file mode 100644
index 00000000..420eda4f
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c
@@ -0,0 +1,781 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/bios.h>
24#include <nvgpu/bug.h>
25
26#include "gk20a/gk20a.h"
27#include "pwrpolicy.h"
28#include "boardobj/boardobjgrp.h"
29#include "boardobj/boardobjgrp_e32.h"
30#include "gp106/bios_gp106.h"
31#include "common/linux/os_linux.h"
32#include "common/linux/platform_gk20a.h"
33
34#define _pwr_policy_limitarboutputget_helper(p_limit_arb) (p_limit_arb)->output
35#define _pwr_policy_limitdeltaapply(limit, delta) ((u32)max(((s32)limit) + (delta), 0))
36
37static u32 _pwr_policy_limitarbinputset_helper(struct gk20a *g,
38 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb,
39 u8 client_idx,
40 u32 limit_value)
41{
42 u8 indx;
43 bool b_found = false;
44 u32 status = 0;
45 u32 output = limit_value;
46
47 for (indx = 0; indx< p_limit_arb->num_inputs; indx++) {
48 if (p_limit_arb->inputs[indx].pwr_policy_idx == client_idx) {
49 p_limit_arb->inputs[indx].limit_value = limit_value;
50 b_found = true;
51 } else if (p_limit_arb->b_arb_max) {
52 output = max(output, p_limit_arb->inputs[indx].limit_value);
53 } else {
54 output = min(output, p_limit_arb->inputs[indx].limit_value);
55 }
56 }
57
58 if (!b_found) {
59 if (p_limit_arb->num_inputs <
60 CTRL_PMGR_PWR_POLICY_MAX_LIMIT_INPUTS) {
61 p_limit_arb->inputs[
62 p_limit_arb->num_inputs].pwr_policy_idx = client_idx;
63 p_limit_arb->inputs[
64 p_limit_arb->num_inputs].limit_value = limit_value;
65 p_limit_arb->num_inputs++;
66 } else {
67 nvgpu_err(g, "No entries remaining for clientIdx=%d",
68 client_idx);
69 status = -EINVAL;
70 }
71 }
72
73 if (!status) {
74 p_limit_arb->output = output;
75 }
76
77 return status;
78}
79
80static u32 _pwr_policy_limitid_translate(struct gk20a *g,
81 struct pwr_policy *ppolicy,
82 enum pwr_policy_limit_id limit_id,
83 struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb,
84 struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb_sec)
85{
86 u32 status = 0;
87
88 switch (limit_id) {
89 case PWR_POLICY_LIMIT_ID_MIN:
90 *p_limit_arb = &ppolicy->limit_arb_min;
91 break;
92
93 case PWR_POLICY_LIMIT_ID_RATED:
94 *p_limit_arb = &ppolicy->limit_arb_rated;
95
96 if (p_limit_arb_sec != NULL) {
97 *p_limit_arb_sec = &ppolicy->limit_arb_curr;
98 }
99 break;
100
101 case PWR_POLICY_LIMIT_ID_MAX:
102 *p_limit_arb = &ppolicy->limit_arb_max;
103 break;
104
105 case PWR_POLICY_LIMIT_ID_CURR:
106 *p_limit_arb = &ppolicy->limit_arb_curr;
107 break;
108
109 case PWR_POLICY_LIMIT_ID_BATT:
110 *p_limit_arb = &ppolicy->limit_arb_batt;
111 break;
112
113 default:
114 nvgpu_err(g, "Unsupported limitId=%d",
115 limit_id);
116 status = -EINVAL;
117 break;
118 }
119
120 return status;
121}
122
123static u32 _pwr_policy_limitarbinputset(struct gk20a *g,
124 struct pwr_policy *ppolicy,
125 enum pwr_policy_limit_id limit_id,
126 u8 client_idx,
127 u32 limit)
128{
129 u32 status = 0;
130 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL;
131 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb_sec = NULL;
132
133 status = _pwr_policy_limitid_translate(g,
134 ppolicy,
135 limit_id,
136 &p_limit_arb,
137 &p_limit_arb_sec);
138 if (status) {
139 goto exit;
140 }
141
142 status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb, client_idx, limit);
143 if (status) {
144 nvgpu_err(g,
145 "Error setting client limit value: status=0x%08x, limitId=0x%x, clientIdx=0x%x, limit=%d",
146 status, limit_id, client_idx, limit);
147 goto exit;
148 }
149
150 if (NULL != p_limit_arb_sec) {
151 status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb_sec,
152 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
153 _pwr_policy_limitarboutputget_helper(p_limit_arb));
154 }
155
156exit:
157 return status;
158}
159
160static inline void _pwr_policy_limitarbconstruct(
161 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb,
162 bool b_arb_max)
163{
164 p_limit_arb->num_inputs = 0;
165 p_limit_arb->b_arb_max = b_arb_max;
166}
167
168static u32 _pwr_policy_limitarboutputget(struct gk20a *g,
169 struct pwr_policy *ppolicy,
170 enum pwr_policy_limit_id limit_id)
171{
172 u32 status = 0;
173 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL;
174
175 status = _pwr_policy_limitid_translate(g,
176 ppolicy,
177 limit_id,
178 &p_limit_arb,
179 NULL);
180 if (status) {
181 return 0;
182 }
183
184 return _pwr_policy_limitarboutputget_helper(p_limit_arb);
185}
186
187static u32 _pwr_domains_pmudatainit_hw_threshold(struct gk20a *g,
188 struct boardobj *board_obj_ptr,
189 struct nv_pmu_boardobj *ppmudata)
190{
191 struct nv_pmu_pmgr_pwr_policy_hw_threshold *pmu_hw_threshold_data;
192 struct pwr_policy_hw_threshold *p_hw_threshold;
193 struct pwr_policy *p_pwr_policy;
194 struct nv_pmu_pmgr_pwr_policy *pmu_pwr_policy;
195 u32 status = 0;
196
197 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
198 if (status) {
199 nvgpu_err(g,
200 "error updating pmu boardobjgrp for pwr sensor 0x%x",
201 status);
202 status = -ENOMEM;
203 goto done;
204 }
205
206 p_hw_threshold = (struct pwr_policy_hw_threshold *)board_obj_ptr;
207 pmu_hw_threshold_data = (struct nv_pmu_pmgr_pwr_policy_hw_threshold *) ppmudata;
208 pmu_pwr_policy = (struct nv_pmu_pmgr_pwr_policy *) ppmudata;
209 p_pwr_policy = (struct pwr_policy *)&(p_hw_threshold->super.super);
210
211 pmu_pwr_policy->ch_idx = 0;
212 pmu_pwr_policy->limit_unit = p_pwr_policy->limit_unit;
213 pmu_pwr_policy->num_limit_inputs = p_pwr_policy->num_limit_inputs;
214
215 pmu_pwr_policy->limit_min = _pwr_policy_limitdeltaapply(
216 _pwr_policy_limitarboutputget(g, p_pwr_policy,
217 PWR_POLICY_LIMIT_ID_MIN),
218 p_pwr_policy->limit_delta);
219
220 pmu_pwr_policy->limit_max = _pwr_policy_limitdeltaapply(
221 _pwr_policy_limitarboutputget(g, p_pwr_policy,
222 PWR_POLICY_LIMIT_ID_MAX),
223 p_pwr_policy->limit_delta);
224
225 pmu_pwr_policy->limit_curr = _pwr_policy_limitdeltaapply(
226 _pwr_policy_limitarboutputget(g, p_pwr_policy,
227 PWR_POLICY_LIMIT_ID_CURR),
228 p_pwr_policy->limit_delta);
229
230 memcpy(&pmu_pwr_policy->integral, &p_pwr_policy->integral,
231 sizeof(struct ctrl_pmgr_pwr_policy_info_integral));
232
233 pmu_pwr_policy->sample_mult = p_pwr_policy->sample_mult;
234 pmu_pwr_policy->filter_type = p_pwr_policy->filter_type;
235 pmu_pwr_policy->filter_param = p_pwr_policy->filter_param;
236
237 pmu_hw_threshold_data->threshold_idx = p_hw_threshold->threshold_idx;
238 pmu_hw_threshold_data->low_threshold_idx = p_hw_threshold->low_threshold_idx;
239 pmu_hw_threshold_data->b_use_low_threshold = p_hw_threshold->b_use_low_threshold;
240 pmu_hw_threshold_data->low_threshold_value = p_hw_threshold->low_threshold_value;
241
242 if (BOARDOBJ_GET_TYPE(board_obj_ptr) ==
243 CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD) {
244 struct nv_pmu_pmgr_pwr_policy_sw_threshold *pmu_sw_threshold_data;
245 struct pwr_policy_sw_threshold *p_sw_threshold;
246
247 p_sw_threshold = (struct pwr_policy_sw_threshold *)board_obj_ptr;
248 pmu_sw_threshold_data =
249 (struct nv_pmu_pmgr_pwr_policy_sw_threshold *) ppmudata;
250 pmu_sw_threshold_data->event_id =
251 p_sw_threshold->event_id;
252 }
253done:
254 return status;
255}
256
257static struct boardobj *construct_pwr_policy(struct gk20a *g,
258 void *pargs, u16 pargs_size, u8 type)
259{
260 struct boardobj *board_obj_ptr = NULL;
261 u32 status;
262 struct pwr_policy_hw_threshold *pwrpolicyhwthreshold;
263 struct pwr_policy *pwrpolicy;
264 struct pwr_policy *pwrpolicyparams = (struct pwr_policy*)pargs;
265 struct pwr_policy_hw_threshold *hwthreshold = (struct pwr_policy_hw_threshold*)pargs;
266
267 status = boardobj_construct_super(g, &board_obj_ptr,
268 pargs_size, pargs);
269 if (status)
270 return NULL;
271
272 pwrpolicyhwthreshold = (struct pwr_policy_hw_threshold*)board_obj_ptr;
273 pwrpolicy = (struct pwr_policy *)board_obj_ptr;
274
275 gk20a_dbg_fn("min=%u rated=%u max=%u",
276 pwrpolicyparams->limit_min,
277 pwrpolicyparams->limit_rated,
278 pwrpolicyparams->limit_max);
279
280 /* Set Super class interfaces */
281 board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_hw_threshold;
282
283 pwrpolicy->ch_idx = pwrpolicyparams->ch_idx;
284 pwrpolicy->num_limit_inputs = 0;
285 pwrpolicy->limit_unit = pwrpolicyparams->limit_unit;
286 pwrpolicy->filter_type = (enum ctrl_pmgr_pwr_policy_filter_type)(pwrpolicyparams->filter_type);
287 pwrpolicy->sample_mult = pwrpolicyparams->sample_mult;
288 switch (pwrpolicy->filter_type)
289 {
290 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_NONE:
291 break;
292
293 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_BLOCK:
294 pwrpolicy->filter_param.block.block_size =
295 pwrpolicyparams->filter_param.block.block_size;
296 break;
297
298 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE:
299 pwrpolicy->filter_param.moving_avg.window_size =
300 pwrpolicyparams->filter_param.moving_avg.window_size;
301 break;
302
303 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_IIR:
304 pwrpolicy->filter_param.iir.divisor = pwrpolicyparams->filter_param.iir.divisor;
305 break;
306
307 default:
308 nvgpu_err(g, "Error: unrecognized Power Policy filter type: %d",
309 pwrpolicy->filter_type);
310 }
311
312 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_curr, false);
313
314 pwrpolicy->limit_delta = 0;
315
316 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_min, true);
317 status = _pwr_policy_limitarbinputset(g,
318 pwrpolicy,
319 PWR_POLICY_LIMIT_ID_MIN,
320 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
321 pwrpolicyparams->limit_min);
322
323 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_max, false);
324 status = _pwr_policy_limitarbinputset(g,
325 pwrpolicy,
326 PWR_POLICY_LIMIT_ID_MAX,
327 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
328 pwrpolicyparams->limit_max);
329
330 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_rated, false);
331 status = _pwr_policy_limitarbinputset(g,
332 pwrpolicy,
333 PWR_POLICY_LIMIT_ID_RATED,
334 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
335 pwrpolicyparams->limit_rated);
336
337 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_batt, false);
338 status = _pwr_policy_limitarbinputset(g,
339 pwrpolicy,
340 PWR_POLICY_LIMIT_ID_BATT,
341 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
342 ((pwrpolicyparams->limit_batt != 0) ?
343 pwrpolicyparams->limit_batt:
344 CTRL_PMGR_PWR_POLICY_LIMIT_MAX));
345
346 memcpy(&pwrpolicy->integral, &pwrpolicyparams->integral,
347 sizeof(struct ctrl_pmgr_pwr_policy_info_integral));
348
349 pwrpolicyhwthreshold->threshold_idx = hwthreshold->threshold_idx;
350 pwrpolicyhwthreshold->b_use_low_threshold = hwthreshold->b_use_low_threshold;
351 pwrpolicyhwthreshold->low_threshold_idx = hwthreshold->low_threshold_idx;
352 pwrpolicyhwthreshold->low_threshold_value = hwthreshold->low_threshold_value;
353
354 if (type == CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD) {
355 struct pwr_policy_sw_threshold *pwrpolicyswthreshold;
356 struct pwr_policy_sw_threshold *swthreshold =
357 (struct pwr_policy_sw_threshold*)pargs;
358
359 pwrpolicyswthreshold = (struct pwr_policy_sw_threshold*)board_obj_ptr;
360 pwrpolicyswthreshold->event_id = swthreshold->event_id;
361 }
362
363 gk20a_dbg_info(" Done");
364
365 return board_obj_ptr;
366}
367
368static u32 _pwr_policy_construct_WAR_SW_Threshold_policy(struct gk20a *g,
369 struct pmgr_pwr_policy *ppwrpolicyobjs,
370 union pwr_policy_data_union *ppwrpolicydata,
371 u16 pwr_policy_size,
372 u32 obj_index)
373{
374 u32 status = 0;
375 struct boardobj *boardobj;
376
377 /* WARN policy */
378 ppwrpolicydata->pwrpolicy.limit_unit = 0;
379 ppwrpolicydata->pwrpolicy.limit_min = 10000;
380 ppwrpolicydata->pwrpolicy.limit_rated = 100000;
381 ppwrpolicydata->pwrpolicy.limit_max = 100000;
382 ppwrpolicydata->sw_threshold.threshold_idx = 1;
383 ppwrpolicydata->pwrpolicy.filter_type =
384 CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE;
385 ppwrpolicydata->pwrpolicy.sample_mult = 5;
386
387 /* Filled the entry.filterParam value in the filterParam */
388 ppwrpolicydata->pwrpolicy.filter_param.moving_avg.window_size = 10;
389
390 ppwrpolicydata->sw_threshold.event_id = 0x01;
391
392 ppwrpolicydata->boardobj.type = CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD;
393
394 boardobj = construct_pwr_policy(g, ppwrpolicydata,
395 pwr_policy_size, ppwrpolicydata->boardobj.type);
396
397 if (!boardobj) {
398 nvgpu_err(g,
399 "unable to create pwr policy for type %d", ppwrpolicydata->boardobj.type);
400 status = -EINVAL;
401 goto done;
402 }
403
404 status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super,
405 boardobj, obj_index);
406
407 if (status) {
408 nvgpu_err(g,
409 "unable to insert pwr policy boardobj for %d", obj_index);
410 status = -EINVAL;
411 goto done;
412 }
413done:
414 return status;
415}
416
417struct pwr_policy_3x_header_unpacked {
418 u8 version;
419 u8 header_size;
420 u8 table_entry_size;
421 u8 num_table_entries;
422 u16 base_sample_period;
423 u16 min_client_sample_period;
424 u8 table_rel_entry_size;
425 u8 num_table_rel_entries;
426 u8 tgp_policy_idx;
427 u8 rtp_policy_idx;
428 u8 mxm_policy_idx;
429 u8 dnotifier_policy_idx;
430 u32 d2_limit;
431 u32 d3_limit;
432 u32 d4_limit;
433 u32 d5_limit;
434 u8 low_sampling_mult;
435 u8 pwr_tgt_policy_idx;
436 u8 pwr_tgt_floor_policy_idx;
437 u8 sm_bus_policy_idx;
438 u8 table_viol_entry_size;
439 u8 num_table_viol_entries;
440};
441
442#define __UNPACK_FIELD(unpacked, packed, field) \
443 __builtin_memcpy(&unpacked->field, &packed->field, \
444 sizeof(unpacked->field))
445
446static inline void devinit_unpack_pwr_policy_header(
447 struct pwr_policy_3x_header_unpacked *unpacked,
448 struct pwr_policy_3x_header_struct *packed)
449{
450 __UNPACK_FIELD(unpacked, packed, version);
451 __UNPACK_FIELD(unpacked, packed, header_size);
452 __UNPACK_FIELD(unpacked, packed, table_entry_size);
453 __UNPACK_FIELD(unpacked, packed, num_table_entries);
454 __UNPACK_FIELD(unpacked, packed, base_sample_period);
455 __UNPACK_FIELD(unpacked, packed, min_client_sample_period);
456 __UNPACK_FIELD(unpacked, packed, table_rel_entry_size);
457 __UNPACK_FIELD(unpacked, packed, num_table_rel_entries);
458 __UNPACK_FIELD(unpacked, packed, tgp_policy_idx);
459 __UNPACK_FIELD(unpacked, packed, rtp_policy_idx);
460 __UNPACK_FIELD(unpacked, packed, mxm_policy_idx);
461 __UNPACK_FIELD(unpacked, packed, dnotifier_policy_idx);
462 __UNPACK_FIELD(unpacked, packed, d2_limit);
463 __UNPACK_FIELD(unpacked, packed, d3_limit);
464 __UNPACK_FIELD(unpacked, packed, d4_limit);
465 __UNPACK_FIELD(unpacked, packed, d5_limit);
466 __UNPACK_FIELD(unpacked, packed, low_sampling_mult);
467 __UNPACK_FIELD(unpacked, packed, pwr_tgt_policy_idx);
468 __UNPACK_FIELD(unpacked, packed, pwr_tgt_floor_policy_idx);
469 __UNPACK_FIELD(unpacked, packed, sm_bus_policy_idx);
470 __UNPACK_FIELD(unpacked, packed, table_viol_entry_size);
471 __UNPACK_FIELD(unpacked, packed, num_table_viol_entries);
472}
473
474struct pwr_policy_3x_entry_unpacked {
475 u8 flags0;
476 u8 ch_idx;
477 u32 limit_min;
478 u32 limit_rated;
479 u32 limit_max;
480 u32 param0;
481 u32 param1;
482 u32 param2;
483 u32 param3;
484 u32 limit_batt;
485 u8 flags1;
486 u8 past_length;
487 u8 next_length;
488 u16 ratio_min;
489 u16 ratio_max;
490 u8 sample_mult;
491 u32 filter_param;
492};
493
494static inline void devinit_unpack_pwr_policy_entry(
495 struct pwr_policy_3x_entry_unpacked *unpacked,
496 struct pwr_policy_3x_entry_struct *packed)
497{
498 __UNPACK_FIELD(unpacked, packed, flags0);
499 __UNPACK_FIELD(unpacked, packed, ch_idx);
500 __UNPACK_FIELD(unpacked, packed, limit_min);
501 __UNPACK_FIELD(unpacked, packed, limit_rated);
502 __UNPACK_FIELD(unpacked, packed, limit_max);
503 __UNPACK_FIELD(unpacked, packed, param0);
504 __UNPACK_FIELD(unpacked, packed, param1);
505 __UNPACK_FIELD(unpacked, packed, param2);
506 __UNPACK_FIELD(unpacked, packed, param3);
507 __UNPACK_FIELD(unpacked, packed, limit_batt);
508 __UNPACK_FIELD(unpacked, packed, flags1);
509 __UNPACK_FIELD(unpacked, packed, past_length);
510 __UNPACK_FIELD(unpacked, packed, next_length);
511 __UNPACK_FIELD(unpacked, packed, ratio_min);
512 __UNPACK_FIELD(unpacked, packed, ratio_max);
513 __UNPACK_FIELD(unpacked, packed, sample_mult);
514 __UNPACK_FIELD(unpacked, packed, filter_param);
515}
516
517static u32 devinit_get_pwr_policy_table(struct gk20a *g,
518 struct pmgr_pwr_policy *ppwrpolicyobjs)
519{
520 struct gk20a_platform *platform = gk20a_get_platform(dev_from_gk20a(g));
521 u32 status = 0;
522 u8 *ptr = NULL;
523 struct boardobj *boardobj;
524 struct pwr_policy_3x_header_struct *packed_hdr;
525 struct pwr_policy_3x_header_unpacked hdr;
526 u32 index;
527 u32 obj_index = 0;
528 u16 pwr_policy_size;
529 bool integral_control = false;
530 u32 hw_threshold_policy_index = 0;
531 union pwr_policy_data_union pwr_policy_data;
532
533 gk20a_dbg_info("");
534
535 ptr = (u8 *)nvgpu_bios_get_perf_table_ptrs(g,
536 g->bios.perf_token, POWER_CAPPING_TABLE);
537 if (ptr == NULL) {
538 status = -EINVAL;
539 goto done;
540 }
541
542 packed_hdr = (struct pwr_policy_3x_header_struct *)ptr;
543
544 if (packed_hdr->version !=
545 VBIOS_POWER_POLICY_VERSION_3X) {
546 status = -EINVAL;
547 goto done;
548 }
549
550 if (packed_hdr->header_size <
551 VBIOS_POWER_POLICY_3X_HEADER_SIZE_25) {
552 status = -EINVAL;
553 goto done;
554 }
555
556 if (packed_hdr->table_entry_size <
557 VBIOS_POWER_POLICY_3X_ENTRY_SIZE_2E) {
558 status = -EINVAL;
559 goto done;
560 }
561
562 /* unpack power policy table header */
563 devinit_unpack_pwr_policy_header(&hdr, packed_hdr);
564
565 ptr += (u32)hdr.header_size;
566
567 for (index = 0; index < hdr.num_table_entries;
568 index++, ptr += (u32)hdr.table_entry_size) {
569
570 struct pwr_policy_3x_entry_struct *packed_entry;
571 struct pwr_policy_3x_entry_unpacked entry;
572
573 u8 class_type;
574
575 packed_entry = (struct pwr_policy_3x_entry_struct *)ptr;
576
577 class_type = (u8)BIOS_GET_FIELD(
578 packed_entry->flags0,
579 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS);
580
581 if (class_type != NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_HW_THRESHOLD)
582 continue;
583
584 /* unpack power policy table entry */
585 devinit_unpack_pwr_policy_entry(&entry, packed_entry);
586
587 ppwrpolicyobjs->version =
588 CTRL_PMGR_PWR_POLICY_TABLE_VERSION_3X;
589 ppwrpolicyobjs->base_sample_period = hdr.base_sample_period;
590 ppwrpolicyobjs->min_client_sample_period =
591 hdr.min_client_sample_period;
592 ppwrpolicyobjs->low_sampling_mult = hdr.low_sampling_mult;
593
594 ppwrpolicyobjs->policy_idxs[1] = hdr.tgp_policy_idx;
595 ppwrpolicyobjs->policy_idxs[0] = hdr.rtp_policy_idx;
596 ppwrpolicyobjs->policy_idxs[2] = hdr.mxm_policy_idx;
597 ppwrpolicyobjs->policy_idxs[3] = hdr.dnotifier_policy_idx;
598 ppwrpolicyobjs->ext_limits[0].limit = hdr.d2_limit;
599 ppwrpolicyobjs->ext_limits[1].limit = hdr.d3_limit;
600 ppwrpolicyobjs->ext_limits[2].limit = hdr.d4_limit;
601 ppwrpolicyobjs->ext_limits[3].limit = hdr.d5_limit;
602 ppwrpolicyobjs->policy_idxs[4] = hdr.pwr_tgt_policy_idx;
603 ppwrpolicyobjs->policy_idxs[5] = hdr.pwr_tgt_floor_policy_idx;
604 ppwrpolicyobjs->policy_idxs[6] = hdr.sm_bus_policy_idx;
605
606 integral_control = (bool)BIOS_GET_FIELD(entry.flags1,
607 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_INTEGRAL_CONTROL);
608
609 if (integral_control == 0x01) {
610 pwr_policy_data.pwrpolicy.integral.past_sample_count =
611 entry.past_length;
612 pwr_policy_data.pwrpolicy.integral.next_sample_count =
613 entry.next_length;
614 pwr_policy_data.pwrpolicy.integral.ratio_limit_max =
615 entry.ratio_max;
616 pwr_policy_data.pwrpolicy.integral.ratio_limit_min =
617 entry.ratio_min;
618 } else {
619 memset(&(pwr_policy_data.pwrpolicy.integral), 0x0,
620 sizeof(struct ctrl_pmgr_pwr_policy_info_integral));
621 }
622 pwr_policy_data.hw_threshold.threshold_idx = (u8)
623 BIOS_GET_FIELD(entry.param0,
624 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_THRES_IDX);
625
626 pwr_policy_data.hw_threshold.b_use_low_threshold =
627 BIOS_GET_FIELD(entry.param0,
628 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_USE);
629
630 if (pwr_policy_data.hw_threshold.b_use_low_threshold) {
631 pwr_policy_data.hw_threshold.low_threshold_idx = (u8)
632 BIOS_GET_FIELD(entry.param0,
633 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_IDX);
634
635 pwr_policy_data.hw_threshold.low_threshold_value = (u16)
636 BIOS_GET_FIELD(entry.param1,
637 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM1_HW_THRESHOLD_LOW_THRESHOLD_VAL);
638 }
639
640 pwr_policy_size = sizeof(struct pwr_policy_hw_threshold);
641
642 /* Initialize data for the parent class */
643 pwr_policy_data.boardobj.type =
644 CTRL_PMGR_PWR_POLICY_TYPE_HW_THRESHOLD;
645 pwr_policy_data.pwrpolicy.ch_idx = entry.ch_idx;
646 pwr_policy_data.pwrpolicy.limit_unit = (u8)
647 BIOS_GET_FIELD(entry.flags0,
648 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT);
649 pwr_policy_data.pwrpolicy.filter_type = (u8)
650 BIOS_GET_FIELD(entry.flags1,
651 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE);
652
653 pwr_policy_data.pwrpolicy.limit_min = entry.limit_min;
654 pwr_policy_data.pwrpolicy.limit_rated = entry.limit_rated;
655 pwr_policy_data.pwrpolicy.limit_max = entry.limit_max;
656 pwr_policy_data.pwrpolicy.limit_batt = entry.limit_batt;
657
658 pwr_policy_data.pwrpolicy.sample_mult = (u8)entry.sample_mult;
659
660 /* Filled the entry.filterParam value in the filterParam */
661 pwr_policy_data.pwrpolicy.filter_param.block.block_size = 0;
662 pwr_policy_data.pwrpolicy.filter_param.moving_avg.window_size = 0;
663 pwr_policy_data.pwrpolicy.filter_param.iir.divisor = 0;
664
665 hw_threshold_policy_index |=
666 BIT(pwr_policy_data.hw_threshold.threshold_idx);
667
668 boardobj = construct_pwr_policy(g, &pwr_policy_data,
669 pwr_policy_size, pwr_policy_data.boardobj.type);
670
671 if (!boardobj) {
672 nvgpu_err(g,
673 "unable to create pwr policy for %d type %d",
674 index, pwr_policy_data.boardobj.type);
675 status = -EINVAL;
676 goto done;
677 }
678
679 status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super,
680 boardobj, obj_index);
681
682 if (status) {
683 nvgpu_err(g,
684 "unable to insert pwr policy boardobj for %d",
685 index);
686 status = -EINVAL;
687 goto done;
688 }
689
690 ++obj_index;
691 }
692
693 if (platform->hardcode_sw_threshold) {
694 status = _pwr_policy_construct_WAR_SW_Threshold_policy(g,
695 ppwrpolicyobjs,
696 &pwr_policy_data,
697 sizeof(struct pwr_policy_sw_threshold),
698 obj_index);
699 if (status) {
700 nvgpu_err(g, "unable to construct_WAR_policy");
701 status = -EINVAL;
702 goto done;
703 }
704 ++obj_index;
705 }
706
707done:
708 gk20a_dbg_info(" done status %x", status);
709 return status;
710}
711
712u32 pmgr_policy_sw_setup(struct gk20a *g)
713{
714 u32 status;
715 struct boardobjgrp *pboardobjgrp = NULL;
716 struct pwr_policy *ppolicy;
717 struct pmgr_pwr_policy *ppwrpolicyobjs;
718 u8 indx = 0;
719
720 /* Construct the Super Class and override the Interfaces */
721 status = boardobjgrpconstruct_e32(g,
722 &g->pmgr_pmu.pmgr_policyobjs.pwr_policies);
723 if (status) {
724 nvgpu_err(g,
725 "error creating boardobjgrp for pmgr policy, status - 0x%x",
726 status);
727 goto done;
728 }
729
730 status = boardobjgrpconstruct_e32(g,
731 &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels);
732 if (status) {
733 nvgpu_err(g,
734 "error creating boardobjgrp for pmgr policy rels, status - 0x%x",
735 status);
736 goto done;
737 }
738
739 status = boardobjgrpconstruct_e32(g,
740 &g->pmgr_pmu.pmgr_policyobjs.pwr_violations);
741 if (status) {
742 nvgpu_err(g,
743 "error creating boardobjgrp for pmgr violations, status - 0x%x",
744 status);
745 goto done;
746 }
747
748 memset(g->pmgr_pmu.pmgr_policyobjs.policy_idxs, CTRL_PMGR_PWR_POLICY_INDEX_INVALID,
749 sizeof(u8) * CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES);
750
751 /* Initialize external power limit policy indexes to _INVALID/0xFF */
752 for (indx = 0; indx < PWR_POLICY_EXT_POWER_STATE_ID_COUNT; indx++) {
753 g->pmgr_pmu.pmgr_policyobjs.ext_limits[indx].policy_table_idx =
754 CTRL_PMGR_PWR_POLICY_INDEX_INVALID;
755 }
756
757 /* Initialize external power state to _D1 */
758 g->pmgr_pmu.pmgr_policyobjs.ext_power_state = 0xFFFFFFFF;
759
760 ppwrpolicyobjs = &(g->pmgr_pmu.pmgr_policyobjs);
761 pboardobjgrp = &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super);
762
763 status = devinit_get_pwr_policy_table(g, ppwrpolicyobjs);
764 if (status)
765 goto done;
766
767 g->pmgr_pmu.pmgr_policyobjs.b_enabled = true;
768
769 BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_policy *, ppolicy, indx) {
770 PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy);
771 }
772
773 g->pmgr_pmu.pmgr_policyobjs.global_ceiling.values[0] =
774 0xFF;
775
776 g->pmgr_pmu.pmgr_policyobjs.client_work_item.b_pending = false;
777
778done:
779 gk20a_dbg_info(" done status %x", status);
780 return status;
781}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrpolicy.h b/drivers/gpu/nvgpu/pmgr/pwrpolicy.h
new file mode 100644
index 00000000..9bc99bb7
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrpolicy.h
@@ -0,0 +1,136 @@
1/*
2 * general power channel structures & definitions
3 *
4 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24#ifndef _PWRPOLICY_H_
25#define _PWRPOLICY_H_
26
27#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
28#include "boardobj/boardobjgrp.h"
29#include "boardobj/boardobj.h"
30#include "ctrl/ctrlpmgr.h"
31
32#define PWR_POLICY_EXT_POWER_STATE_ID_COUNT 0x4
33
34enum pwr_policy_limit_id {
35 PWR_POLICY_LIMIT_ID_MIN = 0x00000000,
36 PWR_POLICY_LIMIT_ID_RATED,
37 PWR_POLICY_LIMIT_ID_MAX,
38 PWR_POLICY_LIMIT_ID_CURR,
39 PWR_POLICY_LIMIT_ID_BATT,
40};
41
42struct pwr_policy {
43 struct boardobj super;
44 u8 ch_idx;
45 u8 num_limit_inputs;
46 u8 limit_unit;
47 s32 limit_delta;
48 u32 limit_min;
49 u32 limit_rated;
50 u32 limit_max;
51 u32 limit_batt;
52 struct ctrl_pmgr_pwr_policy_info_integral integral;
53 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_min;
54 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_rated;
55 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_max;
56 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_batt;
57 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_curr;
58 u8 sample_mult;
59 enum ctrl_pmgr_pwr_policy_filter_type filter_type;
60 union ctrl_pmgr_pwr_policy_filter_param filter_param;
61};
62
63struct pwr_policy_ext_limit {
64 u8 policy_table_idx;
65 u32 limit;
66};
67
68struct pwr_policy_batt_workitem {
69 u32 power_state;
70 bool b_full_deflection;
71};
72
73struct pwr_policy_client_workitem {
74 u32 limit;
75 bool b_pending;
76};
77
78struct pwr_policy_relationship {
79 struct boardobj super;
80 u8 policy_idx;
81};
82
83struct pmgr_pwr_policy {
84 u8 version;
85 bool b_enabled;
86 struct nv_pmu_perf_domain_group_limits global_ceiling;
87 u8 policy_idxs[CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES];
88 struct pwr_policy_ext_limit ext_limits[PWR_POLICY_EXT_POWER_STATE_ID_COUNT];
89 s32 ext_power_state;
90 u16 base_sample_period;
91 u16 min_client_sample_period;
92 u8 low_sampling_mult;
93 struct boardobjgrp_e32 pwr_policies;
94 struct boardobjgrp_e32 pwr_policy_rels;
95 struct boardobjgrp_e32 pwr_violations;
96 struct pwr_policy_client_workitem client_work_item;
97};
98
99struct pwr_policy_limit {
100 struct pwr_policy super;
101};
102
103struct pwr_policy_hw_threshold {
104 struct pwr_policy_limit super;
105 u8 threshold_idx;
106 u8 low_threshold_idx;
107 bool b_use_low_threshold;
108 u16 low_threshold_value;
109};
110
111struct pwr_policy_sw_threshold {
112 struct pwr_policy_limit super;
113 u8 threshold_idx;
114 u8 low_threshold_idx;
115 bool b_use_low_threshold;
116 u16 low_threshold_value;
117 u8 event_id;
118};
119
120union pwr_policy_data_union {
121 struct boardobj boardobj;
122 struct pwr_policy pwrpolicy;
123 struct pwr_policy_hw_threshold hw_threshold;
124 struct pwr_policy_sw_threshold sw_threshold;
125} ;
126
127#define PMGR_GET_PWR_POLICY(g, policy_idx) \
128 ((struct pwr_policy *)BOARDOBJGRP_OBJ_GET_BY_IDX( \
129 &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super), (policy_idx)))
130
131#define PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy) \
132 ((ppolicy)->num_limit_inputs++)
133
134u32 pmgr_policy_sw_setup(struct gk20a *g);
135
136#endif