summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/pmgr
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2016-12-27 05:01:00 -0500
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 05:35:06 -0500
commit7a81883a0d70c3a43ad2841ac235f6dc344c60fb (patch)
tree92923d2efccf90d1961071fa9acde59178a0d688 /drivers/gpu/nvgpu/pmgr
parent505b442551a2e27aa3bc9e608c5a2bc9fccecbc4 (diff)
parent2aa3c85f8e82b3c07c39e677663abd3687c1822a (diff)
Merge remote-tracking branch 'remotes/origin/dev/merge-nvgpu-t18x-into-nvgpu' into dev-kernel
Merge T186 - gp10b/gp106 code into common nvgpu repo Bug 200266498 Change-Id: Ibf100ee38010cbed85c149b69b99147256f9a005 Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/pmgr')
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgr.c176
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgr.h34
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgrpmu.c524
-rw-r--r--drivers/gpu/nvgpu/pmgr/pmgrpmu.h29
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrdev.c310
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrdev.h51
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrmonitor.c365
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrmonitor.h60
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrpolicy.c765
-rw-r--r--drivers/gpu/nvgpu/pmgr/pwrpolicy.h127
10 files changed, 2441 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/pmgr/pmgr.c b/drivers/gpu/nvgpu/pmgr/pmgr.c
new file mode 100644
index 00000000..e101aba8
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgr.c
@@ -0,0 +1,176 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrdev.h"
16#include "pmgrpmu.h"
17#include <linux/debugfs.h>
18
19int pmgr_pwr_devices_get_power(struct gk20a *g, u32 *val)
20{
21 struct nv_pmu_pmgr_pwr_devices_query_payload payload;
22 int status;
23
24 status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload);
25 if (status)
26 gk20a_err(dev_from_gk20a(g),
27 "pmgr_pwr_devices_get_current_power failed %x",
28 status);
29
30 *val = payload.devices[0].powerm_w;
31
32 return status;
33}
34
35int pmgr_pwr_devices_get_current(struct gk20a *g, u32 *val)
36{
37 struct nv_pmu_pmgr_pwr_devices_query_payload payload;
38 int status;
39
40 status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload);
41 if (status)
42 gk20a_err(dev_from_gk20a(g),
43 "pmgr_pwr_devices_get_current failed %x",
44 status);
45
46 *val = payload.devices[0].currentm_a;
47
48 return status;
49}
50
51int pmgr_pwr_devices_get_voltage(struct gk20a *g, u32 *val)
52{
53 struct nv_pmu_pmgr_pwr_devices_query_payload payload;
54 int status;
55
56 status = pmgr_pmu_pwr_devices_query_blocking(g, 1, &payload);
57 if (status)
58 gk20a_err(dev_from_gk20a(g),
59 "pmgr_pwr_devices_get_current_voltage failed %x",
60 status);
61
62 *val = payload.devices[0].voltageu_v;
63
64 return status;
65}
66
67#ifdef CONFIG_DEBUG_FS
68int pmgr_pwr_devices_get_power_u64(void *data, u64 *p)
69{
70 struct gk20a *g = (struct gk20a *)data;
71 int err;
72 u32 val;
73
74 err = pmgr_pwr_devices_get_power(g, &val);
75 *p = val;
76
77 return err;
78}
79
80int pmgr_pwr_devices_get_current_u64(void *data, u64 *p)
81{
82 struct gk20a *g = (struct gk20a *)data;
83 int err;
84 u32 val;
85
86 err = pmgr_pwr_devices_get_current(g, &val);
87 *p = val;
88
89 return err;
90}
91
92int pmgr_pwr_devices_get_voltage_u64(void *data, u64 *p)
93{
94 struct gk20a *g = (struct gk20a *)data;
95 int err;
96 u32 val;
97
98 err = pmgr_pwr_devices_get_voltage(g, &val);
99 *p = val;
100
101 return err;
102}
103
104DEFINE_SIMPLE_ATTRIBUTE(
105 pmgr_power_ctrl_fops, pmgr_pwr_devices_get_power_u64, NULL, "%llu\n");
106
107DEFINE_SIMPLE_ATTRIBUTE(
108 pmgr_current_ctrl_fops, pmgr_pwr_devices_get_current_u64, NULL, "%llu\n");
109
110DEFINE_SIMPLE_ATTRIBUTE(
111 pmgr_voltage_ctrl_fops, pmgr_pwr_devices_get_voltage_u64, NULL, "%llu\n");
112
113static void pmgr_debugfs_init(struct gk20a *g) {
114 struct gk20a_platform *platform = dev_get_drvdata(g->dev);
115 struct dentry *dbgentry;
116
117 dbgentry = debugfs_create_file(
118 "power", S_IRUGO, platform->debugfs, g, &pmgr_power_ctrl_fops);
119 if (!dbgentry)
120 gk20a_err(dev_from_gk20a(g),
121 "debugfs entry create failed for power");
122
123 dbgentry = debugfs_create_file(
124 "current", S_IRUGO, platform->debugfs, g, &pmgr_current_ctrl_fops);
125 if (!dbgentry)
126 gk20a_err(dev_from_gk20a(g),
127 "debugfs entry create failed for current");
128
129 dbgentry = debugfs_create_file(
130 "voltage", S_IRUGO, platform->debugfs, g, &pmgr_voltage_ctrl_fops);
131 if (!dbgentry)
132 gk20a_err(dev_from_gk20a(g),
133 "debugfs entry create failed for voltage");
134}
135#endif
136
137u32 pmgr_domain_sw_setup(struct gk20a *g)
138{
139 u32 status;
140
141 status = pmgr_device_sw_setup(g);
142 if (status) {
143 gk20a_err(dev_from_gk20a(g),
144 "error creating boardobjgrp for pmgr devices, status - 0x%x",
145 status);
146 goto exit;
147 }
148
149 status = pmgr_monitor_sw_setup(g);
150 if (status) {
151 gk20a_err(dev_from_gk20a(g),
152 "error creating boardobjgrp for pmgr monitor, status - 0x%x",
153 status);
154 goto exit;
155 }
156
157 status = pmgr_policy_sw_setup(g);
158 if (status) {
159 gk20a_err(dev_from_gk20a(g),
160 "error creating boardobjgrp for pmgr policy, status - 0x%x",
161 status);
162 goto exit;
163 }
164
165#ifdef CONFIG_DEBUG_FS
166 pmgr_debugfs_init(g);
167#endif
168
169exit:
170 return status;
171}
172
173u32 pmgr_domain_pmu_setup(struct gk20a *g)
174{
175 return pmgr_send_pmgr_tables_to_pmu(g);
176}
diff --git a/drivers/gpu/nvgpu/pmgr/pmgr.h b/drivers/gpu/nvgpu/pmgr/pmgr.h
new file mode 100644
index 00000000..cf511fd1
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgr.h
@@ -0,0 +1,34 @@
1/*
2 * general power device structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PMGR_H_
16#define _PMGR_H_
17
18#include "pwrdev.h"
19#include "pwrmonitor.h"
20#include "pwrpolicy.h"
21
22struct pmgr_pmupstate {
23 struct pwr_devices pmgr_deviceobjs;
24 struct pmgr_pwr_monitor pmgr_monitorobjs;
25 struct pmgr_pwr_policy pmgr_policyobjs;
26};
27
28u32 pmgr_domain_sw_setup(struct gk20a *g);
29u32 pmgr_domain_pmu_setup(struct gk20a *g);
30int pmgr_pwr_devices_get_current(struct gk20a *g, u32 *val);
31int pmgr_pwr_devices_get_voltage(struct gk20a *g, u32 *val);
32int pmgr_pwr_devices_get_power(struct gk20a *g, u32 *val);
33
34#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pmgrpmu.c b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c
new file mode 100644
index 00000000..ea070060
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgrpmu.c
@@ -0,0 +1,524 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrdev.h"
16#include "include/bios.h"
17#include "boardobj/boardobjgrp.h"
18#include "boardobj/boardobjgrp_e32.h"
19#include "pmuif/gpmuifboardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "gm206/bios_gm206.h"
22#include "gk20a/pmu_gk20a.h"
23#include "pmgrpmu.h"
24
25struct pmgr_pmucmdhandler_params {
26 u32 success;
27};
28
29static void pmgr_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
30 void *param, u32 handle, u32 status)
31{
32 struct pmgr_pmucmdhandler_params *phandlerparams =
33 (struct pmgr_pmucmdhandler_params *)param;
34
35 if ((msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_SET_OBJECT) &&
36 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_QUERY) &&
37 (msg->msg.pmgr.msg_type != NV_PMU_PMGR_MSG_ID_LOAD)) {
38 gk20a_err(dev_from_gk20a(g),
39 "unknow msg %x",
40 msg->msg.pmgr.msg_type);
41 return;
42 }
43
44 if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_SET_OBJECT) {
45 if ((msg->msg.pmgr.set_object.b_success != 1) ||
46 (msg->msg.pmgr.set_object.flcnstatus != 0) ) {
47 gk20a_err(dev_from_gk20a(g),
48 "pmgr msg failed %x %x %x %x",
49 msg->msg.pmgr.set_object.msg_type,
50 msg->msg.pmgr.set_object.b_success,
51 msg->msg.pmgr.set_object.flcnstatus,
52 msg->msg.pmgr.set_object.object_type);
53 return;
54 }
55 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_QUERY) {
56 if ((msg->msg.pmgr.query.b_success != 1) ||
57 (msg->msg.pmgr.query.flcnstatus != 0) ) {
58 gk20a_err(dev_from_gk20a(g),
59 "pmgr msg failed %x %x %x %x",
60 msg->msg.pmgr.query.msg_type,
61 msg->msg.pmgr.query.b_success,
62 msg->msg.pmgr.query.flcnstatus,
63 msg->msg.pmgr.query.cmd_type);
64 return;
65 }
66 } else if (msg->msg.pmgr.msg_type == NV_PMU_PMGR_MSG_ID_LOAD) {
67 if ((msg->msg.pmgr.query.b_success != 1) ||
68 (msg->msg.pmgr.query.flcnstatus != 0) ) {
69 gk20a_err(dev_from_gk20a(g),
70 "pmgr msg failed %x %x %x",
71 msg->msg.pmgr.load.msg_type,
72 msg->msg.pmgr.load.b_success,
73 msg->msg.pmgr.load.flcnstatus);
74 return;
75 }
76 }
77
78 phandlerparams->success = 1;
79}
80
81static u32 pmgr_pmu_set_object(struct gk20a *g,
82 u8 type,
83 u16 dmem_size,
84 u16 fb_size,
85 void *pobj)
86{
87 struct pmu_cmd cmd = { {0} };
88 struct pmu_payload payload = { {0} };
89 struct nv_pmu_pmgr_cmd_set_object *pcmd;
90 u32 status;
91 u32 seqdesc;
92 struct pmgr_pmucmdhandler_params handlerparams = {0};
93
94 cmd.hdr.unit_id = PMU_UNIT_PMGR;
95 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_set_object) +
96 (u32)sizeof(struct pmu_hdr);;
97
98 pcmd = &cmd.cmd.pmgr.set_object;
99 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_SET_OBJECT;
100 pcmd->object_type = type;
101
102 payload.in.buf = pobj;
103 payload.in.size = dmem_size;
104 payload.in.fb_size = fb_size;
105 payload.in.offset = NV_PMU_PMGR_SET_OBJECT_ALLOC_OFFSET;
106
107 /* Setup the handler params to communicate back results.*/
108 handlerparams.success = 0;
109
110 status = gk20a_pmu_cmd_post(g, &cmd, NULL, &payload,
111 PMU_COMMAND_QUEUE_LPQ,
112 pmgr_pmucmdhandler,
113 (void *)&handlerparams,
114 &seqdesc, ~0);
115 if (status) {
116 gk20a_err(dev_from_gk20a(g),
117 "unable to post pmgr cmd for unit %x cmd id %x obj type %x",
118 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->object_type);
119 goto exit;
120 }
121
122 pmu_wait_message_cond(&g->pmu,
123 gk20a_get_gr_idle_timeout(g),
124 &handlerparams.success, 1);
125
126 if (handlerparams.success == 0) {
127 gk20a_err(dev_from_gk20a(g), "could not process cmd\n");
128 status = -ETIMEDOUT;
129 goto exit;
130 }
131
132exit:
133 return status;
134}
135
136static u32 pmgr_send_i2c_device_topology_to_pmu(struct gk20a *g)
137{
138 struct nv_pmu_pmgr_i2c_device_desc_table i2c_desc_table;
139 u32 status = 0;
140
141 /* INA3221 I2C device info */
142 i2c_desc_table.dev_mask = 0x01;
143
144 /* INA3221 */
145 i2c_desc_table.devices[0].super.type = 0x4E;
146
147 i2c_desc_table.devices[0].dcb_index = 0;
148 i2c_desc_table.devices[0].i2c_address = 0x84;
149 i2c_desc_table.devices[0].i2c_flags = 0xC2F;
150 i2c_desc_table.devices[0].i2c_port = 0x2;
151
152 /* Pass the table down the PMU as an object */
153 status = pmgr_pmu_set_object(
154 g,
155 NV_PMU_PMGR_OBJECT_I2C_DEVICE_DESC_TABLE,
156 (u16)sizeof(struct nv_pmu_pmgr_i2c_device_desc_table),
157 PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED,
158 &i2c_desc_table);
159
160 if (status)
161 gk20a_err(dev_from_gk20a(g),
162 "pmgr_pmu_set_object failed %x",
163 status);
164
165 return status;
166}
167
168static u32 pmgr_send_pwr_device_topology_to_pmu(struct gk20a *g)
169{
170 struct nv_pmu_pmgr_pwr_device_desc_table pwr_desc_table;
171 struct nv_pmu_pmgr_pwr_device_desc_table_header *ppwr_desc_header;
172 u32 status = 0;
173
174 /* Set the BA-device-independent HW information */
175 ppwr_desc_header = &(pwr_desc_table.hdr.data);
176 ppwr_desc_header->ba_info.b_initialized_and_used = false;
177
178 /* populate the table */
179 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&ppwr_desc_header->super,
180 g->pmgr_pmu.pmgr_deviceobjs.super.super.objmask);
181
182 status = boardobjgrp_pmudatainit_legacy(g,
183 &g->pmgr_pmu.pmgr_deviceobjs.super.super,
184 (struct nv_pmu_boardobjgrp_super *)&pwr_desc_table);
185
186 if (status) {
187 gk20a_err(dev_from_gk20a(g),
188 "boardobjgrp_pmudatainit_legacy failed %x",
189 status);
190 goto exit;
191 }
192
193 /* Pass the table down the PMU as an object */
194 status = pmgr_pmu_set_object(
195 g,
196 NV_PMU_PMGR_OBJECT_PWR_DEVICE_DESC_TABLE,
197 (u16)sizeof(
198 union nv_pmu_pmgr_pwr_device_dmem_size),
199 (u16)sizeof(struct nv_pmu_pmgr_pwr_device_desc_table),
200 &pwr_desc_table);
201
202 if (status)
203 gk20a_err(dev_from_gk20a(g),
204 "pmgr_pmu_set_object failed %x",
205 status);
206
207exit:
208 return status;
209}
210
211static u32 pmgr_send_pwr_mointer_to_pmu(struct gk20a *g)
212{
213 struct nv_pmu_pmgr_pwr_monitor_pack pwr_monitor_pack;
214 struct nv_pmu_pmgr_pwr_channel_header *pwr_channel_hdr;
215 struct nv_pmu_pmgr_pwr_chrelationship_header *pwr_chrelationship_header;
216 u32 max_dmem_size;
217 u32 status = 0;
218
219 /* Copy all the global settings from the RM copy */
220 pwr_channel_hdr = &(pwr_monitor_pack.channels.hdr.data);
221 pwr_monitor_pack = g->pmgr_pmu.pmgr_monitorobjs.pmu_data;
222
223 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_channel_hdr->super,
224 g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask);
225
226 /* Copy in each channel */
227 status = boardobjgrp_pmudatainit_legacy(g,
228 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super,
229 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack.channels));
230
231 if (status) {
232 gk20a_err(dev_from_gk20a(g),
233 "boardobjgrp_pmudatainit_legacy failed %x",
234 status);
235 goto exit;
236 }
237
238 /* Copy in each channel relationship */
239 pwr_chrelationship_header = &(pwr_monitor_pack.ch_rels.hdr.data);
240
241 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)&pwr_chrelationship_header->super,
242 g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super.objmask);
243
244 pwr_channel_hdr->physical_channel_mask = g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask;
245 pwr_channel_hdr->type = NV_PMU_PMGR_PWR_MONITOR_TYPE_NO_POLLING;
246
247 status = boardobjgrp_pmudatainit_legacy(g,
248 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super,
249 (struct nv_pmu_boardobjgrp_super *)&(pwr_monitor_pack.ch_rels));
250
251 if (status) {
252 gk20a_err(dev_from_gk20a(g),
253 "boardobjgrp_pmudatainit_legacy failed %x",
254 status);
255 goto exit;
256 }
257
258 /* Calculate the max Dmem buffer size */
259 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_monitor_dmem_size);
260
261 /* Pass the table down the PMU as an object */
262 status = pmgr_pmu_set_object(
263 g,
264 NV_PMU_PMGR_OBJECT_PWR_MONITOR,
265 (u16)max_dmem_size,
266 (u16)sizeof(struct nv_pmu_pmgr_pwr_monitor_pack),
267 &pwr_monitor_pack);
268
269 if (status)
270 gk20a_err(dev_from_gk20a(g),
271 "pmgr_pmu_set_object failed %x",
272 status);
273
274exit:
275 return status;
276}
277
278u32 pmgr_send_pwr_policy_to_pmu(struct gk20a *g)
279{
280 struct nv_pmu_pmgr_pwr_policy_pack *ppwrpack = NULL;
281 struct pwr_policy *ppolicy = NULL;
282 u32 status = 0;
283 u8 indx;
284 u32 max_dmem_size;
285
286 ppwrpack = kzalloc(sizeof(struct nv_pmu_pmgr_pwr_policy_pack), GFP_KERNEL);
287 if (!ppwrpack) {
288 gk20a_err(dev_from_gk20a(g),
289 "pwr policy alloc failed %x",
290 status);
291 status = -ENOMEM;
292 goto exit;
293 }
294
295 ppwrpack->policies.hdr.data.version = g->pmgr_pmu.pmgr_policyobjs.version;
296 ppwrpack->policies.hdr.data.b_enabled = g->pmgr_pmu.pmgr_policyobjs.b_enabled;
297
298 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
299 &ppwrpack->policies.hdr.data.super,
300 g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super.objmask);
301
302 memset(&ppwrpack->policies.hdr.data.reserved_pmu_policy_mask,
303 0,
304 sizeof(ppwrpack->policies.hdr.data.reserved_pmu_policy_mask));
305
306 ppwrpack->policies.hdr.data.base_sample_period =
307 g->pmgr_pmu.pmgr_policyobjs.base_sample_period;
308 ppwrpack->policies.hdr.data.min_client_sample_period =
309 g->pmgr_pmu.pmgr_policyobjs.min_client_sample_period;
310 ppwrpack->policies.hdr.data.low_sampling_mult =
311 g->pmgr_pmu.pmgr_policyobjs.low_sampling_mult;
312
313 memcpy(&ppwrpack->policies.hdr.data.global_ceiling,
314 &g->pmgr_pmu.pmgr_policyobjs.global_ceiling,
315 sizeof(struct nv_pmu_perf_domain_group_limits));
316
317 memcpy(&ppwrpack->policies.hdr.data.semantic_policy_tbl,
318 &g->pmgr_pmu.pmgr_policyobjs.policy_idxs,
319 sizeof(g->pmgr_pmu.pmgr_policyobjs.policy_idxs));
320
321 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx,
322 ppwrpack->policies.hdr.data.super.obj_mask.super.data[0]) {
323 ppolicy = PMGR_GET_PWR_POLICY(g, indx);
324
325 status = ((struct boardobj *)ppolicy)->pmudatainit(g, (struct boardobj *)ppolicy,
326 (struct nv_pmu_boardobj *)&(ppwrpack->policies.policies[indx].data));
327 if (status) {
328 gk20a_err(dev_from_gk20a(g),
329 "pmudatainit failed %x indx %x",
330 status, indx);
331 status = -ENOMEM;
332 goto exit;
333 }
334 }
335 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END;
336
337 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
338 &ppwrpack->policy_rels.hdr.data.super,
339 g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super.objmask);
340
341 boardobjgrpe32hdrset((struct nv_pmu_boardobjgrp *)
342 &ppwrpack->violations.hdr.data.super,
343 g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super.objmask);
344
345 max_dmem_size = sizeof(union nv_pmu_pmgr_pwr_policy_dmem_size);
346
347 /* Pass the table down the PMU as an object */
348 status = pmgr_pmu_set_object(
349 g,
350 NV_PMU_PMGR_OBJECT_PWR_POLICY,
351 (u16)max_dmem_size,
352 (u16)sizeof(struct nv_pmu_pmgr_pwr_policy_pack),
353 ppwrpack);
354
355 if (status)
356 gk20a_err(dev_from_gk20a(g),
357 "pmgr_pmu_set_object failed %x",
358 status);
359
360exit:
361 if (ppwrpack) {
362 kfree(ppwrpack);
363 }
364
365 return status;
366}
367
368u32 pmgr_pmu_pwr_devices_query_blocking(
369 struct gk20a *g,
370 u32 pwr_dev_mask,
371 struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload)
372{
373 struct pmu_cmd cmd = { {0} };
374 struct pmu_payload payload = { {0} };
375 struct nv_pmu_pmgr_cmd_pwr_devices_query *pcmd;
376 u32 status;
377 u32 seqdesc;
378 struct pmgr_pmucmdhandler_params handlerparams = {0};
379
380 cmd.hdr.unit_id = PMU_UNIT_PMGR;
381 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_pwr_devices_query) +
382 (u32)sizeof(struct pmu_hdr);
383
384 pcmd = &cmd.cmd.pmgr.pwr_dev_query;
385 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_PWR_DEVICES_QUERY;
386 pcmd->dev_mask = pwr_dev_mask;
387
388 payload.out.buf = ppayload;
389 payload.out.size = sizeof(struct nv_pmu_pmgr_pwr_devices_query_payload);
390 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
391 payload.out.offset = NV_PMU_PMGR_PWR_DEVICES_QUERY_ALLOC_OFFSET;
392
393 /* Setup the handler params to communicate back results.*/
394 handlerparams.success = 0;
395
396 status = gk20a_pmu_cmd_post(g, &cmd, NULL, &payload,
397 PMU_COMMAND_QUEUE_LPQ,
398 pmgr_pmucmdhandler,
399 (void *)&handlerparams,
400 &seqdesc, ~0);
401 if (status) {
402 gk20a_err(dev_from_gk20a(g),
403 "unable to post pmgr query cmd for unit %x cmd id %x dev mask %x",
404 cmd.hdr.unit_id, pcmd->cmd_type, pcmd->dev_mask);
405 goto exit;
406 }
407
408 pmu_wait_message_cond(&g->pmu,
409 gk20a_get_gr_idle_timeout(g),
410 &handlerparams.success, 1);
411
412 if (handlerparams.success == 0) {
413 gk20a_err(dev_from_gk20a(g), "could not process cmd\n");
414 status = -ETIMEDOUT;
415 goto exit;
416 }
417
418exit:
419 return status;
420}
421
422static u32 pmgr_pmu_load_blocking(struct gk20a *g)
423{
424 struct pmu_cmd cmd = { {0} };
425 struct nv_pmu_pmgr_cmd_load *pcmd;
426 u32 status;
427 u32 seqdesc;
428 struct pmgr_pmucmdhandler_params handlerparams = {0};
429
430 cmd.hdr.unit_id = PMU_UNIT_PMGR;
431 cmd.hdr.size = (u32)sizeof(struct nv_pmu_pmgr_cmd_load) +
432 (u32)sizeof(struct pmu_hdr);
433
434 pcmd = &cmd.cmd.pmgr.load;
435 pcmd->cmd_type = NV_PMU_PMGR_CMD_ID_LOAD;
436
437 /* Setup the handler params to communicate back results.*/
438 handlerparams.success = 0;
439
440 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
441 PMU_COMMAND_QUEUE_LPQ,
442 pmgr_pmucmdhandler,
443 (void *)&handlerparams,
444 &seqdesc, ~0);
445 if (status) {
446 gk20a_err(dev_from_gk20a(g),
447 "unable to post pmgr load cmd for unit %x cmd id %x",
448 cmd.hdr.unit_id, pcmd->cmd_type);
449 goto exit;
450 }
451
452 pmu_wait_message_cond(&g->pmu,
453 gk20a_get_gr_idle_timeout(g),
454 &handlerparams.success, 1);
455
456 if (handlerparams.success == 0) {
457 gk20a_err(dev_from_gk20a(g), "could not process cmd\n");
458 status = -ETIMEDOUT;
459 goto exit;
460 }
461
462exit:
463 return status;
464}
465
466u32 pmgr_send_pmgr_tables_to_pmu(struct gk20a *g)
467{
468 u32 status = 0;
469
470 status = pmgr_send_i2c_device_topology_to_pmu(g);
471
472 if (status) {
473 gk20a_err(dev_from_gk20a(g),
474 "pmgr_send_i2c_device_topology_to_pmu failed %x",
475 status);
476 goto exit;
477 }
478
479 if (!BOARDOBJGRP_IS_EMPTY(&g->pmgr_pmu.pmgr_deviceobjs.super.super)) {
480 status = pmgr_send_pwr_device_topology_to_pmu(g);
481 if (status) {
482 gk20a_err(dev_from_gk20a(g),
483 "pmgr_send_pwr_device_topology_to_pmu failed %x",
484 status);
485 goto exit;
486 }
487 }
488
489 if (!(BOARDOBJGRP_IS_EMPTY(
490 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super)) ||
491 !(BOARDOBJGRP_IS_EMPTY(
492 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super))) {
493 status = pmgr_send_pwr_mointer_to_pmu(g);
494 if (status) {
495 gk20a_err(dev_from_gk20a(g),
496 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
497 goto exit;
498 }
499 }
500
501 if (!(BOARDOBJGRP_IS_EMPTY(
502 &g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super)) ||
503 !(BOARDOBJGRP_IS_EMPTY(
504 &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels.super)) ||
505 !(BOARDOBJGRP_IS_EMPTY(
506 &g->pmgr_pmu.pmgr_policyobjs.pwr_violations.super))) {
507 status = pmgr_send_pwr_policy_to_pmu(g);
508 if (status) {
509 gk20a_err(dev_from_gk20a(g),
510 "pmgr_send_pwr_policy_to_pmu failed %x", status);
511 goto exit;
512 }
513 }
514
515 status = pmgr_pmu_load_blocking(g);
516 if (status) {
517 gk20a_err(dev_from_gk20a(g),
518 "pmgr_send_pwr_mointer_to_pmu failed %x", status);
519 goto exit;
520 }
521
522exit:
523 return status;
524}
diff --git a/drivers/gpu/nvgpu/pmgr/pmgrpmu.h b/drivers/gpu/nvgpu/pmgr/pmgrpmu.h
new file mode 100644
index 00000000..6b48396c
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pmgrpmu.h
@@ -0,0 +1,29 @@
1/*
2 * general power device control structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PMGRPMU_H_
16#define _PMGRPMU_H_
17
18#include "gk20a/gk20a.h"
19#include "pwrdev.h"
20#include "pwrmonitor.h"
21
22u32 pmgr_send_pmgr_tables_to_pmu(struct gk20a *g);
23
24u32 pmgr_pmu_pwr_devices_query_blocking(
25 struct gk20a *g,
26 u32 pwr_dev_mask,
27 struct nv_pmu_pmgr_pwr_devices_query_payload *ppayload);
28
29#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pwrdev.c b/drivers/gpu/nvgpu/pmgr/pwrdev.c
new file mode 100644
index 00000000..03e2eb34
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrdev.c
@@ -0,0 +1,310 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrdev.h"
16#include "include/bios.h"
17#include "boardobj/boardobjgrp.h"
18#include "boardobj/boardobjgrp_e32.h"
19#include "pmuif/gpmuifboardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "gm206/bios_gm206.h"
22#include "gk20a/pmu_gk20a.h"
23
24static u32 _pwr_device_pmudata_instget(struct gk20a *g,
25 struct nv_pmu_boardobjgrp *pmuboardobjgrp,
26 struct nv_pmu_boardobj **ppboardobjpmudata,
27 u8 idx)
28{
29 struct nv_pmu_pmgr_pwr_device_desc_table *ppmgrdevice =
30 (struct nv_pmu_pmgr_pwr_device_desc_table *)pmuboardobjgrp;
31
32 gk20a_dbg_info("");
33
34 /*check whether pmuboardobjgrp has a valid boardobj in index*/
35 if (((u32)BIT(idx) &
36 ppmgrdevice->hdr.data.super.obj_mask.super.data[0]) == 0)
37 return -EINVAL;
38
39 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
40 &ppmgrdevice->devices[idx].data.board_obj;
41
42 gk20a_dbg_info(" Done");
43
44 return 0;
45}
46
47static u32 _pwr_domains_pmudatainit_ina3221(struct gk20a *g,
48 struct boardobj *board_obj_ptr,
49 struct nv_pmu_boardobj *ppmudata)
50{
51 struct nv_pmu_pmgr_pwr_device_desc_ina3221 *ina3221_desc;
52 struct pwr_device_ina3221 *ina3221;
53 u32 status = 0;
54 u32 indx;
55
56 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
57 if (status) {
58 gk20a_err(dev_from_gk20a(g),
59 "error updating pmu boardobjgrp for pwr domain 0x%x",
60 status);
61 goto done;
62 }
63
64 ina3221 = (struct pwr_device_ina3221 *)board_obj_ptr;
65 ina3221_desc = (struct nv_pmu_pmgr_pwr_device_desc_ina3221 *) ppmudata;
66
67 ina3221_desc->super.power_corr_factor = ina3221->super.power_corr_factor;
68 ina3221_desc->i2c_dev_idx = ina3221->super.i2c_dev_idx;
69 ina3221_desc->configuration = ina3221->configuration;
70 ina3221_desc->mask_enable = ina3221->mask_enable;
71 /* configure NV_PMU_THERM_EVENT_EXT_OVERT */
72 ina3221_desc->event_mask = (1 << 0);
73 ina3221_desc->curr_correct_m = ina3221->curr_correct_m;
74 ina3221_desc->curr_correct_b = ina3221->curr_correct_b;
75
76 for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) {
77 ina3221_desc->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx];
78 }
79
80done:
81 return status;
82}
83
84static struct boardobj *construct_pwr_device(struct gk20a *g,
85 void *pargs, u16 pargs_size, u8 type)
86{
87 struct boardobj *board_obj_ptr = NULL;
88 u32 status;
89 u32 indx;
90 struct pwr_device_ina3221 *pwrdev;
91 struct pwr_device_ina3221 *ina3221 = (struct pwr_device_ina3221*)pargs;
92
93 status = boardobj_construct_super(g, &board_obj_ptr,
94 pargs_size, pargs);
95 if (status)
96 return NULL;
97
98 pwrdev = (struct pwr_device_ina3221*)board_obj_ptr;
99
100 /* Set Super class interfaces */
101 board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_ina3221;
102 pwrdev->super.power_rail = ina3221->super.power_rail;
103 pwrdev->super.i2c_dev_idx = ina3221->super.i2c_dev_idx;
104 pwrdev->super.power_corr_factor = (1 << 12);
105 pwrdev->super.bIs_inforom_config = false;
106
107 /* Set INA3221-specific information */
108 pwrdev->configuration = ina3221->configuration;
109 pwrdev->mask_enable = ina3221->mask_enable;
110 pwrdev->gpio_function = ina3221->gpio_function;
111 pwrdev->curr_correct_m = ina3221->curr_correct_m;
112 pwrdev->curr_correct_b = ina3221->curr_correct_b;
113
114 for (indx = 0; indx < NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM; indx++) {
115 pwrdev->r_shuntm_ohm[indx] = ina3221->r_shuntm_ohm[indx];
116 }
117
118 gk20a_dbg_info(" Done");
119
120 return board_obj_ptr;
121}
122
123static u32 devinit_get_pwr_device_table(struct gk20a *g,
124 struct pwr_devices *ppwrdeviceobjs)
125{
126 u32 status = 0;
127 u8 *pwr_device_table_ptr = NULL;
128 u8 *curr_pwr_device_table_ptr = NULL;
129 struct boardobj *boardobj;
130 struct pwr_sensors_2x_header pwr_sensor_table_header = { 0 };
131 struct pwr_sensors_2x_entry pwr_sensor_table_entry = { 0 };
132 u32 index;
133 u32 obj_index = 0;
134 u16 pwr_device_size;
135 union {
136 struct boardobj boardobj;
137 struct pwr_device pwrdev;
138 struct pwr_device_ina3221 ina3221;
139 } pwr_device_data;
140
141 gk20a_dbg_info("");
142
143 if (g->ops.bios.get_perf_table_ptrs != NULL) {
144 pwr_device_table_ptr = (u8 *)g->ops.bios.get_perf_table_ptrs(g,
145 g->bios.perf_token, POWER_SENSORS_TABLE);
146 if (pwr_device_table_ptr == NULL) {
147 status = -EINVAL;
148 goto done;
149 }
150 }
151
152 memcpy(&pwr_sensor_table_header, pwr_device_table_ptr,
153 VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08);
154
155 if (pwr_sensor_table_header.version !=
156 VBIOS_POWER_SENSORS_VERSION_2X) {
157 status = -EINVAL;
158 goto done;
159 }
160
161 if (pwr_sensor_table_header.header_size <
162 VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08) {
163 status = -EINVAL;
164 goto done;
165 }
166
167 if (pwr_sensor_table_header.table_entry_size !=
168 VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15) {
169 status = -EINVAL;
170 goto done;
171 }
172
173 curr_pwr_device_table_ptr = (pwr_device_table_ptr +
174 VBIOS_POWER_SENSORS_2X_HEADER_SIZE_08);
175
176 for (index = 0; index < pwr_sensor_table_header.num_table_entries; index++) {
177 bool use_fxp8_8 = false;
178 u8 i2c_dev_idx;
179 u8 device_type;
180
181 curr_pwr_device_table_ptr += (pwr_sensor_table_header.table_entry_size * index);
182
183 pwr_sensor_table_entry.flags0 = *curr_pwr_device_table_ptr;
184
185 memcpy(&pwr_sensor_table_entry.class_param0,
186 (curr_pwr_device_table_ptr + 1),
187 (VBIOS_POWER_SENSORS_2X_ENTRY_SIZE_15 - 1));
188
189 device_type = (u8)BIOS_GET_FIELD(
190 pwr_sensor_table_entry.flags0,
191 NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS);
192
193 if (device_type == NV_VBIOS_POWER_SENSORS_2X_ENTRY_FLAGS0_CLASS_I2C) {
194 i2c_dev_idx = (u8)BIOS_GET_FIELD(
195 pwr_sensor_table_entry.class_param0,
196 NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_INDEX);
197 use_fxp8_8 = (u8)BIOS_GET_FIELD(
198 pwr_sensor_table_entry.class_param0,
199 NV_VBIOS_POWER_SENSORS_2X_ENTRY_CLASS_PARAM0_I2C_USE_FXP8_8);
200
201 pwr_device_data.ina3221.super.i2c_dev_idx = i2c_dev_idx;
202 pwr_device_data.ina3221.r_shuntm_ohm[0].use_fxp8_8 = use_fxp8_8;
203 pwr_device_data.ina3221.r_shuntm_ohm[1].use_fxp8_8 = use_fxp8_8;
204 pwr_device_data.ina3221.r_shuntm_ohm[2].use_fxp8_8 = use_fxp8_8;
205 pwr_device_data.ina3221.r_shuntm_ohm[0].rshunt_value =
206 (u16)BIOS_GET_FIELD(
207 pwr_sensor_table_entry.sensor_param0,
208 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT0_MOHM);
209
210 pwr_device_data.ina3221.r_shuntm_ohm[1].rshunt_value =
211 (u16)BIOS_GET_FIELD(
212 pwr_sensor_table_entry.sensor_param0,
213 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM0_INA3221_RSHUNT1_MOHM);
214
215 pwr_device_data.ina3221.r_shuntm_ohm[2].rshunt_value =
216 (u16)BIOS_GET_FIELD(
217 pwr_sensor_table_entry.sensor_param1,
218 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_RSHUNT2_MOHM);
219 pwr_device_data.ina3221.configuration =
220 (u16)BIOS_GET_FIELD(
221 pwr_sensor_table_entry.sensor_param1,
222 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM1_INA3221_CONFIGURATION);
223
224 pwr_device_data.ina3221.mask_enable =
225 (u16)BIOS_GET_FIELD(
226 pwr_sensor_table_entry.sensor_param2,
227 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_MASKENABLE);
228
229 pwr_device_data.ina3221.gpio_function =
230 (u8)BIOS_GET_FIELD(
231 pwr_sensor_table_entry.sensor_param2,
232 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM2_INA3221_GPIOFUNCTION);
233
234 pwr_device_data.ina3221.curr_correct_m =
235 (u16)BIOS_GET_FIELD(
236 pwr_sensor_table_entry.sensor_param3,
237 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_M);
238
239 pwr_device_data.ina3221.curr_correct_b =
240 (u16)BIOS_GET_FIELD(
241 pwr_sensor_table_entry.sensor_param3,
242 NV_VBIOS_POWER_SENSORS_2X_ENTRY_SENSOR_PARAM3_INA3221_CURR_CORRECT_B);
243
244 if (!pwr_device_data.ina3221.curr_correct_m) {
245 pwr_device_data.ina3221.curr_correct_m = (1 << 12);
246 }
247 pwr_device_size = sizeof(struct pwr_device_ina3221);
248 } else
249 continue;
250
251 pwr_device_data.boardobj.type = CTRL_PMGR_PWR_DEVICE_TYPE_INA3221;
252 pwr_device_data.pwrdev.power_rail = (u8)0;
253
254 boardobj = construct_pwr_device(g, &pwr_device_data,
255 pwr_device_size, pwr_device_data.boardobj.type);
256
257 if (!boardobj) {
258 gk20a_err(dev_from_gk20a(g),
259 "unable to create pwr device for %d type %d", index, pwr_device_data.boardobj.type);
260 status = -EINVAL;
261 goto done;
262 }
263
264 status = boardobjgrp_objinsert(&ppwrdeviceobjs->super.super,
265 boardobj, obj_index);
266
267 if (status) {
268 gk20a_err(dev_from_gk20a(g),
269 "unable to insert pwr device boardobj for %d", index);
270 status = -EINVAL;
271 goto done;
272 }
273
274 ++obj_index;
275 }
276
277done:
278 gk20a_dbg_info(" done status %x", status);
279 return status;
280}
281
282u32 pmgr_device_sw_setup(struct gk20a *g)
283{
284 u32 status;
285 struct boardobjgrp *pboardobjgrp = NULL;
286 struct pwr_devices *ppwrdeviceobjs;
287
288 /* Construct the Super Class and override the Interfaces */
289 status = boardobjgrpconstruct_e32(&g->pmgr_pmu.pmgr_deviceobjs.super);
290 if (status) {
291 gk20a_err(dev_from_gk20a(g),
292 "error creating boardobjgrp for pmgr devices, status - 0x%x",
293 status);
294 goto done;
295 }
296
297 pboardobjgrp = &g->pmgr_pmu.pmgr_deviceobjs.super.super;
298 ppwrdeviceobjs = &(g->pmgr_pmu.pmgr_deviceobjs);
299
300 /* Override the Interfaces */
301 pboardobjgrp->pmudatainstget = _pwr_device_pmudata_instget;
302
303 status = devinit_get_pwr_device_table(g, ppwrdeviceobjs);
304 if (status)
305 goto done;
306
307done:
308 gk20a_dbg_info(" done status %x", status);
309 return status;
310}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrdev.h b/drivers/gpu/nvgpu/pmgr/pwrdev.h
new file mode 100644
index 00000000..b8592a18
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrdev.h
@@ -0,0 +1,51 @@
1/*
2 * general power device structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PWRDEV_H_
16#define _PWRDEV_H_
17
18#include "boardobj/boardobj.h"
19#include "pmuif/gpmuifpmgr.h"
20#include "ctrl/ctrlpmgr.h"
21
22#define PWRDEV_I2CDEV_DEVICE_INDEX_NONE (0xFF)
23
24#define PWR_DEVICE_PROV_NUM_DEFAULT 1
25
26struct pwr_device {
27 struct boardobj super;
28 u8 power_rail;
29 u8 i2c_dev_idx;
30 bool bIs_inforom_config;
31 u32 power_corr_factor;
32};
33
34struct pwr_devices {
35 struct boardobjgrp_e32 super;
36};
37
38struct pwr_device_ina3221 {
39 struct pwr_device super;
40 struct ctrl_pmgr_pwr_device_info_rshunt
41 r_shuntm_ohm[NV_PMU_PMGR_PWR_DEVICE_INA3221_CH_NUM];
42 u16 configuration;
43 u16 mask_enable;
44 u8 gpio_function;
45 u16 curr_correct_m;
46 s16 curr_correct_b;
47} ;
48
49u32 pmgr_device_sw_setup(struct gk20a *g);
50
51#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pwrmonitor.c b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c
new file mode 100644
index 00000000..c28751fd
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrmonitor.c
@@ -0,0 +1,365 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrdev.h"
16#include "include/bios.h"
17#include "boardobj/boardobjgrp.h"
18#include "boardobj/boardobjgrp_e32.h"
19#include "pmuif/gpmuifboardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "gm206/bios_gm206.h"
22#include "gk20a/pmu_gk20a.h"
23
24static u32 _pwr_channel_pmudata_instget(struct gk20a *g,
25 struct nv_pmu_boardobjgrp *pmuboardobjgrp,
26 struct nv_pmu_boardobj **ppboardobjpmudata,
27 u8 idx)
28{
29 struct nv_pmu_pmgr_pwr_channel_desc *ppmgrchannel =
30 (struct nv_pmu_pmgr_pwr_channel_desc *)pmuboardobjgrp;
31
32 gk20a_dbg_info("");
33
34 /*check whether pmuboardobjgrp has a valid boardobj in index*/
35 if (((u32)BIT(idx) &
36 ppmgrchannel->hdr.data.super.obj_mask.super.data[0]) == 0)
37 return -EINVAL;
38
39 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
40 &ppmgrchannel->channels[idx].data.board_obj;
41
42 /* handle Global/common data here as we need index */
43 ppmgrchannel->channels[idx].data.pwr_channel.ch_idx = idx;
44
45 gk20a_dbg_info(" Done");
46
47 return 0;
48}
49
50static u32 _pwr_channel_rels_pmudata_instget(struct gk20a *g,
51 struct nv_pmu_boardobjgrp *pmuboardobjgrp,
52 struct nv_pmu_boardobj **ppboardobjpmudata,
53 u8 idx)
54{
55 struct nv_pmu_pmgr_pwr_chrelationship_desc *ppmgrchrels =
56 (struct nv_pmu_pmgr_pwr_chrelationship_desc *)pmuboardobjgrp;
57
58 gk20a_dbg_info("");
59
60 /*check whether pmuboardobjgrp has a valid boardobj in index*/
61 if (((u32)BIT(idx) &
62 ppmgrchrels->hdr.data.super.obj_mask.super.data[0]) == 0)
63 return -EINVAL;
64
65 *ppboardobjpmudata = (struct nv_pmu_boardobj *)
66 &ppmgrchrels->ch_rels[idx].data.board_obj;
67
68 gk20a_dbg_info(" Done");
69
70 return 0;
71}
72
73static u32 _pwr_channel_state_init(struct gk20a *g)
74{
75 u8 indx = 0;
76 struct pwr_channel *pchannel;
77 u32 objmask =
78 g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super.objmask;
79
80 /* Initialize each PWR_CHANNEL's dependent channel mask */
81 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK(32, indx, objmask) {
82 pchannel = PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, indx);
83 if (pchannel == NULL) {
84 gk20a_err(dev_from_gk20a(g),
85 "PMGR_PWR_MONITOR_GET_PWR_CHANNEL-failed %d", indx);
86 return -EINVAL;
87 }
88 pchannel->dependent_ch_mask =0;
89 }
90 BOARDOBJGRP_FOR_EACH_INDEX_IN_MASK_END
91
92 return 0;
93}
94
95static bool _pwr_channel_implements(struct pwr_channel *pchannel,
96 u8 type)
97{
98 return (type == BOARDOBJ_GET_TYPE(pchannel));
99}
100
101static u32 _pwr_domains_pmudatainit_sensor(struct gk20a *g,
102 struct boardobj *board_obj_ptr,
103 struct nv_pmu_boardobj *ppmudata)
104{
105 struct nv_pmu_pmgr_pwr_channel_sensor *pmu_sensor_data;
106 struct pwr_channel_sensor *sensor;
107 u32 status = 0;
108
109 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
110 if (status) {
111 gk20a_err(dev_from_gk20a(g),
112 "error updating pmu boardobjgrp for pwr sensor 0x%x",
113 status);
114 goto done;
115 }
116
117 sensor = (struct pwr_channel_sensor *)board_obj_ptr;
118 pmu_sensor_data = (struct nv_pmu_pmgr_pwr_channel_sensor *) ppmudata;
119
120 pmu_sensor_data->super.pwr_rail = sensor->super.pwr_rail;
121 pmu_sensor_data->super.volt_fixedu_v = sensor->super.volt_fixed_uv;
122 pmu_sensor_data->super.pwr_corr_slope = sensor->super.pwr_corr_slope;
123 pmu_sensor_data->super.pwr_corr_offsetm_w = sensor->super.pwr_corr_offset_mw;
124 pmu_sensor_data->super.curr_corr_slope = sensor->super.curr_corr_slope;
125 pmu_sensor_data->super.curr_corr_offsetm_a = sensor->super.curr_corr_offset_ma;
126 pmu_sensor_data->super.dependent_ch_mask = sensor->super.dependent_ch_mask;
127 pmu_sensor_data->super.ch_idx = 0;
128
129 pmu_sensor_data->pwr_dev_idx = sensor->pwr_dev_idx;
130 pmu_sensor_data->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx;
131
132done:
133 return status;
134}
135
136static struct boardobj *construct_pwr_topology(struct gk20a *g,
137 void *pargs, u16 pargs_size, u8 type)
138{
139 struct boardobj *board_obj_ptr = NULL;
140 u32 status;
141 struct pwr_channel_sensor *pwrchannel;
142 struct pwr_channel_sensor *sensor = (struct pwr_channel_sensor*)pargs;
143
144 status = boardobj_construct_super(g, &board_obj_ptr,
145 pargs_size, pargs);
146 if (status)
147 return NULL;
148
149 pwrchannel = (struct pwr_channel_sensor*)board_obj_ptr;
150
151 /* Set Super class interfaces */
152 board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_sensor;
153
154 pwrchannel->super.pwr_rail = sensor->super.pwr_rail;
155 pwrchannel->super.volt_fixed_uv = sensor->super.volt_fixed_uv;
156 pwrchannel->super.pwr_corr_slope = sensor->super.pwr_corr_slope;
157 pwrchannel->super.pwr_corr_offset_mw = sensor->super.pwr_corr_offset_mw;
158 pwrchannel->super.curr_corr_slope = sensor->super.curr_corr_slope;
159 pwrchannel->super.curr_corr_offset_ma = sensor->super.curr_corr_offset_ma;
160 pwrchannel->super.dependent_ch_mask = 0;
161
162 pwrchannel->pwr_dev_idx = sensor->pwr_dev_idx;
163 pwrchannel->pwr_dev_prov_idx = sensor->pwr_dev_prov_idx;
164
165 gk20a_dbg_info(" Done");
166
167 return board_obj_ptr;
168}
169
170static u32 devinit_get_pwr_topology_table(struct gk20a *g,
171 struct pmgr_pwr_monitor *ppwrmonitorobjs)
172{
173 u32 status = 0;
174 u8 *pwr_topology_table_ptr = NULL;
175 u8 *curr_pwr_topology_table_ptr = NULL;
176 struct boardobj *boardobj;
177 struct pwr_topology_2x_header pwr_topology_table_header = { 0 };
178 struct pwr_topology_2x_entry pwr_topology_table_entry = { 0 };
179 u32 index;
180 u32 obj_index = 0;
181 u16 pwr_topology_size;
182 union {
183 struct boardobj boardobj;
184 struct pwr_channel pwrchannel;
185 struct pwr_channel_sensor sensor;
186 } pwr_topology_data;
187
188 gk20a_dbg_info("");
189
190 if (g->ops.bios.get_perf_table_ptrs != NULL) {
191 pwr_topology_table_ptr = (u8 *)g->ops.bios.get_perf_table_ptrs(g,
192 g->bios.perf_token, POWER_TOPOLOGY_TABLE);
193 if (pwr_topology_table_ptr == NULL) {
194 status = -EINVAL;
195 goto done;
196 }
197 }
198
199 memcpy(&pwr_topology_table_header, pwr_topology_table_ptr,
200 VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06);
201
202 if (pwr_topology_table_header.version !=
203 VBIOS_POWER_TOPOLOGY_VERSION_2X) {
204 status = -EINVAL;
205 goto done;
206 }
207
208 g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = false;
209
210 if (pwr_topology_table_header.header_size <
211 VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06) {
212 status = -EINVAL;
213 goto done;
214 }
215
216 if (pwr_topology_table_header.table_entry_size !=
217 VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16) {
218 status = -EINVAL;
219 goto done;
220 }
221
222 curr_pwr_topology_table_ptr = (pwr_topology_table_ptr +
223 VBIOS_POWER_TOPOLOGY_2X_HEADER_SIZE_06);
224
225 for (index = 0; index < pwr_topology_table_header.num_table_entries;
226 index++) {
227 u8 class_type;
228
229 curr_pwr_topology_table_ptr += (pwr_topology_table_header.table_entry_size * index);
230
231 pwr_topology_table_entry.flags0 = *curr_pwr_topology_table_ptr;
232 pwr_topology_table_entry.pwr_rail = *(curr_pwr_topology_table_ptr + 1);
233
234 memcpy(&pwr_topology_table_entry.param0,
235 (curr_pwr_topology_table_ptr + 2),
236 (VBIOS_POWER_TOPOLOGY_2X_ENTRY_SIZE_16 - 2));
237
238 class_type = (u8)BIOS_GET_FIELD(
239 pwr_topology_table_entry.flags0,
240 NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS);
241
242 if (class_type == NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_FLAGS0_CLASS_SENSOR) {
243 pwr_topology_data.sensor.pwr_dev_idx = (u8)BIOS_GET_FIELD(
244 pwr_topology_table_entry.param1,
245 NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_INDEX);
246 pwr_topology_data.sensor.pwr_dev_prov_idx = (u8)BIOS_GET_FIELD(
247 pwr_topology_table_entry.param1,
248 NV_VBIOS_POWER_TOPOLOGY_2X_ENTRY_PARAM1_SENSOR_PROVIDER_INDEX);
249
250 pwr_topology_size = sizeof(struct pwr_channel_sensor);
251 } else
252 continue;
253
254 /* Initialize data for the parent class */
255 pwr_topology_data.boardobj.type = CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR;
256 pwr_topology_data.pwrchannel.pwr_rail = (u8)pwr_topology_table_entry.pwr_rail;
257 pwr_topology_data.pwrchannel.volt_fixed_uv = pwr_topology_table_entry.param0;
258 pwr_topology_data.pwrchannel.pwr_corr_slope = (1 << 12);
259 pwr_topology_data.pwrchannel.pwr_corr_offset_mw = 0;
260 pwr_topology_data.pwrchannel.curr_corr_slope =
261 (u32)pwr_topology_table_entry.curr_corr_slope;
262 pwr_topology_data.pwrchannel.curr_corr_offset_ma =
263 (s32)pwr_topology_table_entry.curr_corr_offset;
264
265 boardobj = construct_pwr_topology(g, &pwr_topology_data,
266 pwr_topology_size, pwr_topology_data.boardobj.type);
267
268 if (!boardobj) {
269 gk20a_err(dev_from_gk20a(g),
270 "unable to create pwr topology for %d type %d",
271 index, pwr_topology_data.boardobj.type);
272 status = -EINVAL;
273 goto done;
274 }
275
276 status = boardobjgrp_objinsert(&ppwrmonitorobjs->pwr_channels.super,
277 boardobj, obj_index);
278
279 if (status) {
280 gk20a_err(dev_from_gk20a(g),
281 "unable to insert pwr topology boardobj for %d", index);
282 status = -EINVAL;
283 goto done;
284 }
285
286 ++obj_index;
287 }
288
289done:
290 gk20a_dbg_info(" done status %x", status);
291 return status;
292}
293
294u32 pmgr_monitor_sw_setup(struct gk20a *g)
295{
296 u32 status;
297 struct boardobjgrp *pboardobjgrp = NULL;
298 struct pwr_channel *pchannel;
299 struct pmgr_pwr_monitor *ppwrmonitorobjs;
300 u8 indx = 0;
301
302 /* Construct the Super Class and override the Interfaces */
303 status = boardobjgrpconstruct_e32(
304 &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels);
305 if (status) {
306 gk20a_err(dev_from_gk20a(g),
307 "error creating boardobjgrp for pmgr channel, status - 0x%x",
308 status);
309 goto done;
310 }
311
312 pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super);
313
314 /* Override the Interfaces */
315 pboardobjgrp->pmudatainstget = _pwr_channel_pmudata_instget;
316
317 /* Construct the Super Class and override the Interfaces */
318 status = boardobjgrpconstruct_e32(
319 &g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels);
320 if (status) {
321 gk20a_err(dev_from_gk20a(g),
322 "error creating boardobjgrp for pmgr channel relationship, status - 0x%x",
323 status);
324 goto done;
325 }
326
327 pboardobjgrp = &(g->pmgr_pmu.pmgr_monitorobjs.pwr_ch_rels.super);
328
329 /* Override the Interfaces */
330 pboardobjgrp->pmudatainstget = _pwr_channel_rels_pmudata_instget;
331
332 /* Initialize the Total GPU Power Channel Mask to 0 */
333 g->pmgr_pmu.pmgr_monitorobjs.pmu_data.channels.hdr.data.total_gpu_power_channel_mask = 0;
334 g->pmgr_pmu.pmgr_monitorobjs.total_gpu_channel_idx =
335 CTRL_PMGR_PWR_CHANNEL_INDEX_INVALID;
336
337 /* Supported topology table version 1.0 */
338 g->pmgr_pmu.pmgr_monitorobjs.b_is_topology_tbl_ver_1x = true;
339
340 ppwrmonitorobjs = &(g->pmgr_pmu.pmgr_monitorobjs);
341
342 status = devinit_get_pwr_topology_table(g, ppwrmonitorobjs);
343 if (status)
344 goto done;
345
346 status = _pwr_channel_state_init(g);
347 if (status)
348 goto done;
349
350 /* Initialise physicalChannelMask */
351 g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask = 0;
352
353 pboardobjgrp = &g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super;
354
355 BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_channel *, pchannel, indx) {
356 if (_pwr_channel_implements(pchannel,
357 CTRL_PMGR_PWR_CHANNEL_TYPE_SENSOR)) {
358 g->pmgr_pmu.pmgr_monitorobjs.physical_channel_mask |= BIT(indx);
359 }
360 }
361
362done:
363 gk20a_dbg_info(" done status %x", status);
364 return status;
365}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrmonitor.h b/drivers/gpu/nvgpu/pmgr/pwrmonitor.h
new file mode 100644
index 00000000..7cd6b8c9
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrmonitor.h
@@ -0,0 +1,60 @@
1/*
2 * general power channel structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PWRMONITOR_H_
16#define _PWRMONITOR_H_
17
18#include "boardobj/boardobjgrp.h"
19#include "boardobj/boardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "ctrl/ctrlpmgr.h"
22
23struct pwr_channel {
24 struct boardobj super;
25 u8 pwr_rail;
26 u32 volt_fixed_uv;
27 u32 pwr_corr_slope;
28 s32 pwr_corr_offset_mw;
29 u32 curr_corr_slope;
30 s32 curr_corr_offset_ma;
31 u32 dependent_ch_mask;
32};
33
34struct pwr_chrelationship {
35 struct boardobj super;
36 u8 chIdx;
37};
38
39struct pwr_channel_sensor {
40 struct pwr_channel super;
41 u8 pwr_dev_idx;
42 u8 pwr_dev_prov_idx;
43};
44
45struct pmgr_pwr_monitor {
46 bool b_is_topology_tbl_ver_1x;
47 struct boardobjgrp_e32 pwr_channels;
48 struct boardobjgrp_e32 pwr_ch_rels;
49 u8 total_gpu_channel_idx;
50 u32 physical_channel_mask;
51 struct nv_pmu_pmgr_pwr_monitor_pack pmu_data;
52};
53
54#define PMGR_PWR_MONITOR_GET_PWR_CHANNEL(g, channel_idx) \
55 ((struct pwr_channel *)BOARDOBJGRP_OBJ_GET_BY_IDX( \
56 &(g->pmgr_pmu.pmgr_monitorobjs.pwr_channels.super), (channel_idx)))
57
58u32 pmgr_monitor_sw_setup(struct gk20a *g);
59
60#endif
diff --git a/drivers/gpu/nvgpu/pmgr/pwrpolicy.c b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c
new file mode 100644
index 00000000..d7926773
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrpolicy.c
@@ -0,0 +1,765 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "pwrpolicy.h"
16#include "include/bios.h"
17#include "boardobj/boardobjgrp.h"
18#include "boardobj/boardobjgrp_e32.h"
19#include "pmuif/gpmuifboardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "gm206/bios_gm206.h"
22#include "gk20a/pmu_gk20a.h"
23
24#define _pwr_policy_limitarboutputget_helper(p_limit_arb) (p_limit_arb)->output
25#define _pwr_policy_limitdeltaapply(limit, delta) ((u32)max(((s32)limit) + (delta), 0))
26
27static u32 _pwr_policy_limitarbinputset_helper(struct gk20a *g,
28 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb,
29 u8 client_idx,
30 u32 limit_value)
31{
32 u8 indx;
33 bool b_found = false;
34 u32 status = 0;
35 u32 output = limit_value;
36
37 for (indx = 0; indx< p_limit_arb->num_inputs; indx++) {
38 if (p_limit_arb->inputs[indx].pwr_policy_idx == client_idx) {
39 p_limit_arb->inputs[indx].limit_value = limit_value;
40 b_found = true;
41 } else if (p_limit_arb->b_arb_max) {
42 output = max(output, p_limit_arb->inputs[indx].limit_value);
43 } else {
44 output = min(output, p_limit_arb->inputs[indx].limit_value);
45 }
46 }
47
48 if (!b_found) {
49 if (p_limit_arb->num_inputs <
50 CTRL_PMGR_PWR_POLICY_MAX_LIMIT_INPUTS) {
51 p_limit_arb->inputs[
52 p_limit_arb->num_inputs].pwr_policy_idx = client_idx;
53 p_limit_arb->inputs[
54 p_limit_arb->num_inputs].limit_value = limit_value;
55 p_limit_arb->num_inputs++;
56 } else {
57 gk20a_err(g->dev, "No entries remaining for clientIdx=%d",
58 client_idx);
59 status = -EINVAL;
60 }
61 }
62
63 if (!status) {
64 p_limit_arb->output = output;
65 }
66
67 return status;
68}
69
70static u32 _pwr_policy_limitid_translate(struct gk20a *g,
71 struct pwr_policy *ppolicy,
72 enum pwr_policy_limit_id limit_id,
73 struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb,
74 struct ctrl_pmgr_pwr_policy_limit_arbitration **p_limit_arb_sec)
75{
76 u32 status = 0;
77
78 switch (limit_id) {
79 case PWR_POLICY_LIMIT_ID_MIN:
80 *p_limit_arb = &ppolicy->limit_arb_min;
81 break;
82
83 case PWR_POLICY_LIMIT_ID_RATED:
84 *p_limit_arb = &ppolicy->limit_arb_rated;
85
86 if (p_limit_arb_sec != NULL) {
87 *p_limit_arb_sec = &ppolicy->limit_arb_curr;
88 }
89 break;
90
91 case PWR_POLICY_LIMIT_ID_MAX:
92 *p_limit_arb = &ppolicy->limit_arb_max;
93 break;
94
95 case PWR_POLICY_LIMIT_ID_CURR:
96 *p_limit_arb = &ppolicy->limit_arb_curr;
97 break;
98
99 case PWR_POLICY_LIMIT_ID_BATT:
100 *p_limit_arb = &ppolicy->limit_arb_batt;
101 break;
102
103 default:
104 gk20a_err(g->dev, "Unsupported limitId=%d",
105 limit_id);
106 status = -EINVAL;
107 break;
108 }
109
110 return status;
111}
112
113static u32 _pwr_policy_limitarbinputset(struct gk20a *g,
114 struct pwr_policy *ppolicy,
115 enum pwr_policy_limit_id limit_id,
116 u8 client_idx,
117 u32 limit)
118{
119 u32 status = 0;
120 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL;
121 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb_sec = NULL;
122
123 status = _pwr_policy_limitid_translate(g,
124 ppolicy,
125 limit_id,
126 &p_limit_arb,
127 &p_limit_arb_sec);
128 if (status) {
129 goto exit;
130 }
131
132 status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb, client_idx, limit);
133 if (status) {
134 gk20a_err(g->dev,
135 "Error setting client limit value: status=0x%08x, limitId=0x%x, clientIdx=0x%x, limit=%d",
136 status, limit_id, client_idx, limit);
137 goto exit;
138 }
139
140 if (NULL != p_limit_arb_sec) {
141 status = _pwr_policy_limitarbinputset_helper(g, p_limit_arb_sec,
142 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
143 _pwr_policy_limitarboutputget_helper(p_limit_arb));
144 }
145
146exit:
147 return status;
148}
149
150static inline void _pwr_policy_limitarbconstruct(
151 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb,
152 bool b_arb_max)
153{
154 p_limit_arb->num_inputs = 0;
155 p_limit_arb->b_arb_max = b_arb_max;
156}
157
158static u32 _pwr_policy_limitarboutputget(struct gk20a *g,
159 struct pwr_policy *ppolicy,
160 enum pwr_policy_limit_id limit_id)
161{
162 u32 status = 0;
163 struct ctrl_pmgr_pwr_policy_limit_arbitration *p_limit_arb = NULL;
164
165 status = _pwr_policy_limitid_translate(g,
166 ppolicy,
167 limit_id,
168 &p_limit_arb,
169 NULL);
170 if (status) {
171 return 0;
172 }
173
174 return _pwr_policy_limitarboutputget_helper(p_limit_arb);
175}
176
177static u32 _pwr_domains_pmudatainit_hw_threshold(struct gk20a *g,
178 struct boardobj *board_obj_ptr,
179 struct nv_pmu_boardobj *ppmudata)
180{
181 struct nv_pmu_pmgr_pwr_policy_hw_threshold *pmu_hw_threshold_data;
182 struct pwr_policy_hw_threshold *p_hw_threshold;
183 struct pwr_policy *p_pwr_policy;
184 struct nv_pmu_pmgr_pwr_policy *pmu_pwr_policy;
185 u32 status = 0;
186
187 status = boardobj_pmudatainit_super(g, board_obj_ptr, ppmudata);
188 if (status) {
189 gk20a_err(dev_from_gk20a(g),
190 "error updating pmu boardobjgrp for pwr sensor 0x%x",
191 status);
192 status = -ENOMEM;
193 goto done;
194 }
195
196 p_hw_threshold = (struct pwr_policy_hw_threshold *)board_obj_ptr;
197 pmu_hw_threshold_data = (struct nv_pmu_pmgr_pwr_policy_hw_threshold *) ppmudata;
198 pmu_pwr_policy = (struct nv_pmu_pmgr_pwr_policy *) ppmudata;
199 p_pwr_policy = (struct pwr_policy *)&(p_hw_threshold->super.super);
200
201 pmu_pwr_policy->ch_idx = 0;
202 pmu_pwr_policy->limit_unit = p_pwr_policy->limit_unit;
203 pmu_pwr_policy->num_limit_inputs = p_pwr_policy->num_limit_inputs;
204
205 pmu_pwr_policy->limit_min = _pwr_policy_limitdeltaapply(
206 _pwr_policy_limitarboutputget(g, p_pwr_policy,
207 PWR_POLICY_LIMIT_ID_MIN),
208 p_pwr_policy->limit_delta);
209
210 pmu_pwr_policy->limit_max = _pwr_policy_limitdeltaapply(
211 _pwr_policy_limitarboutputget(g, p_pwr_policy,
212 PWR_POLICY_LIMIT_ID_MAX),
213 p_pwr_policy->limit_delta);
214
215 pmu_pwr_policy->limit_curr = _pwr_policy_limitdeltaapply(
216 _pwr_policy_limitarboutputget(g, p_pwr_policy,
217 PWR_POLICY_LIMIT_ID_CURR),
218 p_pwr_policy->limit_delta);
219
220 memcpy(&pmu_pwr_policy->integral, &p_pwr_policy->integral,
221 sizeof(struct ctrl_pmgr_pwr_policy_info_integral));
222
223 pmu_pwr_policy->sample_mult = p_pwr_policy->sample_mult;
224 pmu_pwr_policy->filter_type = p_pwr_policy->filter_type;
225 pmu_pwr_policy->filter_param = p_pwr_policy->filter_param;
226
227 pmu_hw_threshold_data->threshold_idx = p_hw_threshold->threshold_idx;
228 pmu_hw_threshold_data->low_threshold_idx = p_hw_threshold->low_threshold_idx;
229 pmu_hw_threshold_data->b_use_low_threshold = p_hw_threshold->b_use_low_threshold;
230 pmu_hw_threshold_data->low_threshold_value = p_hw_threshold->low_threshold_value;
231
232 if (BOARDOBJ_GET_TYPE(board_obj_ptr) ==
233 CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD) {
234 struct nv_pmu_pmgr_pwr_policy_sw_threshold *pmu_sw_threshold_data;
235 struct pwr_policy_sw_threshold *p_sw_threshold;
236
237 p_sw_threshold = (struct pwr_policy_sw_threshold *)board_obj_ptr;
238 pmu_sw_threshold_data =
239 (struct nv_pmu_pmgr_pwr_policy_sw_threshold *) ppmudata;
240 pmu_sw_threshold_data->event_id =
241 p_sw_threshold->event_id;
242 }
243done:
244 return status;
245}
246
247static struct boardobj *construct_pwr_policy(struct gk20a *g,
248 void *pargs, u16 pargs_size, u8 type)
249{
250 struct boardobj *board_obj_ptr = NULL;
251 u32 status;
252 struct pwr_policy_hw_threshold *pwrpolicyhwthreshold;
253 struct pwr_policy *pwrpolicy;
254 struct pwr_policy *pwrpolicyparams = (struct pwr_policy*)pargs;
255 struct pwr_policy_hw_threshold *hwthreshold = (struct pwr_policy_hw_threshold*)pargs;
256
257 status = boardobj_construct_super(g, &board_obj_ptr,
258 pargs_size, pargs);
259 if (status)
260 return NULL;
261
262 pwrpolicyhwthreshold = (struct pwr_policy_hw_threshold*)board_obj_ptr;
263 pwrpolicy = (struct pwr_policy *)board_obj_ptr;
264
265 /* Set Super class interfaces */
266 board_obj_ptr->pmudatainit = _pwr_domains_pmudatainit_hw_threshold;
267
268 pwrpolicy->ch_idx = pwrpolicyparams->ch_idx;
269 pwrpolicy->num_limit_inputs = 0;
270 pwrpolicy->limit_unit = pwrpolicyparams->limit_unit;
271 pwrpolicy->filter_type = (enum ctrl_pmgr_pwr_policy_filter_type)(pwrpolicyparams->filter_type);
272 pwrpolicy->sample_mult = pwrpolicyparams->sample_mult;
273 switch (pwrpolicy->filter_type)
274 {
275 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_NONE:
276 break;
277
278 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_BLOCK:
279 pwrpolicy->filter_param.block.block_size =
280 pwrpolicyparams->filter_param.block.block_size;
281 break;
282
283 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE:
284 pwrpolicy->filter_param.moving_avg.window_size =
285 pwrpolicyparams->filter_param.moving_avg.window_size;
286 break;
287
288 case CTRL_PMGR_PWR_POLICY_FILTER_TYPE_IIR:
289 pwrpolicy->filter_param.iir.divisor = pwrpolicyparams->filter_param.iir.divisor;
290 break;
291
292 default:
293 gk20a_err(g->dev,
294 "Error: unrecognized Power Policy filter type: %d.\n",
295 pwrpolicy->filter_type);
296 }
297
298 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_curr, false);
299
300 pwrpolicy->limit_delta = 0;
301
302 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_min, true);
303 status = _pwr_policy_limitarbinputset(g,
304 pwrpolicy,
305 PWR_POLICY_LIMIT_ID_MIN,
306 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
307 pwrpolicyparams->limit_min);
308
309 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_max, false);
310 status = _pwr_policy_limitarbinputset(g,
311 pwrpolicy,
312 PWR_POLICY_LIMIT_ID_MAX,
313 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
314 pwrpolicyparams->limit_max);
315
316 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_rated, false);
317 status = _pwr_policy_limitarbinputset(g,
318 pwrpolicy,
319 PWR_POLICY_LIMIT_ID_RATED,
320 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
321 pwrpolicyparams->limit_rated);
322
323 _pwr_policy_limitarbconstruct(&pwrpolicy->limit_arb_batt, false);
324 status = _pwr_policy_limitarbinputset(g,
325 pwrpolicy,
326 PWR_POLICY_LIMIT_ID_BATT,
327 CTRL_PMGR_PWR_POLICY_LIMIT_INPUT_CLIENT_IDX_RM,
328 ((pwrpolicyparams->limit_batt != 0) ?
329 pwrpolicyparams->limit_batt:
330 CTRL_PMGR_PWR_POLICY_LIMIT_MAX));
331
332 memcpy(&pwrpolicy->integral, &pwrpolicyparams->integral,
333 sizeof(struct ctrl_pmgr_pwr_policy_info_integral));
334
335 pwrpolicyhwthreshold->threshold_idx = hwthreshold->threshold_idx;
336 pwrpolicyhwthreshold->b_use_low_threshold = hwthreshold->b_use_low_threshold;
337 pwrpolicyhwthreshold->low_threshold_idx = hwthreshold->low_threshold_idx;
338 pwrpolicyhwthreshold->low_threshold_value = hwthreshold->low_threshold_value;
339
340 if (type == CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD) {
341 struct pwr_policy_sw_threshold *pwrpolicyswthreshold;
342 struct pwr_policy_sw_threshold *swthreshold =
343 (struct pwr_policy_sw_threshold*)pargs;
344
345 pwrpolicyswthreshold = (struct pwr_policy_sw_threshold*)board_obj_ptr;
346 pwrpolicyswthreshold->event_id = swthreshold->event_id;
347 }
348
349 gk20a_dbg_info(" Done");
350
351 return board_obj_ptr;
352}
353
354static u32 _pwr_policy_construct_WAR_policy(struct gk20a *g,
355 struct pmgr_pwr_policy *ppwrpolicyobjs,
356 union pwr_policy_data_union *ppwrpolicydata,
357 u16 pwr_policy_size,
358 u32 hw_threshold_policy_index,
359 u32 obj_index)
360{
361 u32 status = 0;
362 struct boardobj *boardobj;
363
364 if (!(hw_threshold_policy_index & 0x1)) {
365 /* CRIT policy */
366 ppwrpolicydata->pwrpolicy.limit_min = 1000;
367 ppwrpolicydata->pwrpolicy.limit_rated = 20000;
368 ppwrpolicydata->pwrpolicy.limit_max = 20000;
369 ppwrpolicydata->hw_threshold.threshold_idx = 0;
370 } else {
371 /* WARN policy */
372 ppwrpolicydata->pwrpolicy.limit_min = 1000;
373 ppwrpolicydata->pwrpolicy.limit_rated = 11600;
374 ppwrpolicydata->pwrpolicy.limit_max = 11600;
375 ppwrpolicydata->hw_threshold.threshold_idx = 1;
376 }
377
378 boardobj = construct_pwr_policy(g, ppwrpolicydata,
379 pwr_policy_size, ppwrpolicydata->boardobj.type);
380
381 if (!boardobj) {
382 gk20a_err(dev_from_gk20a(g),
383 "unable to create pwr policy for type %d", ppwrpolicydata->boardobj.type);
384 status = -EINVAL;
385 goto done;
386 }
387
388 status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super,
389 boardobj, obj_index);
390
391 if (status) {
392 gk20a_err(dev_from_gk20a(g),
393 "unable to insert pwr policy boardobj for %d", obj_index);
394 status = -EINVAL;
395 goto done;
396 }
397done:
398 return status;
399}
400
401static u32 _pwr_policy_construct_WAR_SW_Threshold_policy(struct gk20a *g,
402 struct pmgr_pwr_policy *ppwrpolicyobjs,
403 union pwr_policy_data_union *ppwrpolicydata,
404 u16 pwr_policy_size,
405 u32 obj_index)
406{
407 u32 status = 0;
408 struct boardobj *boardobj;
409
410 /* WARN policy */
411 ppwrpolicydata->pwrpolicy.limit_unit = 0;
412 ppwrpolicydata->pwrpolicy.limit_min = 10000;
413 ppwrpolicydata->pwrpolicy.limit_rated = 100000;
414 ppwrpolicydata->pwrpolicy.limit_max = 100000;
415 ppwrpolicydata->sw_threshold.threshold_idx = 1;
416 ppwrpolicydata->pwrpolicy.filter_type =
417 CTRL_PMGR_PWR_POLICY_FILTER_TYPE_MOVING_AVERAGE;
418 ppwrpolicydata->pwrpolicy.sample_mult = 5;
419
420 /* Filled the entry.filterParam value in the filterParam */
421 ppwrpolicydata->pwrpolicy.filter_param.moving_avg.window_size = 10;
422
423 ppwrpolicydata->sw_threshold.event_id = 0x01;
424
425 ppwrpolicydata->boardobj.type = CTRL_PMGR_PWR_POLICY_TYPE_SW_THRESHOLD;
426
427 boardobj = construct_pwr_policy(g, ppwrpolicydata,
428 pwr_policy_size, ppwrpolicydata->boardobj.type);
429
430 if (!boardobj) {
431 gk20a_err(dev_from_gk20a(g),
432 "unable to create pwr policy for type %d", ppwrpolicydata->boardobj.type);
433 status = -EINVAL;
434 goto done;
435 }
436
437 status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super,
438 boardobj, obj_index);
439
440 if (status) {
441 gk20a_err(dev_from_gk20a(g),
442 "unable to insert pwr policy boardobj for %d", obj_index);
443 status = -EINVAL;
444 goto done;
445 }
446done:
447 return status;
448}
449
450static u32 devinit_get_pwr_policy_table(struct gk20a *g,
451 struct pmgr_pwr_policy *ppwrpolicyobjs)
452{
453 u32 status = 0;
454 u8 *pwr_policy_table_ptr = NULL;
455 u8 *curr_pwr_policy_table_ptr = NULL;
456 struct boardobj *boardobj;
457 struct pwr_policy_3x_header_struct pwr_policy_table_header = { 0 };
458 struct pwr_policy_3x_entry_struct pwr_policy_table_entry = { 0 };
459 u32 index;
460 u32 obj_index = 0;
461 u16 pwr_policy_size;
462 bool integral_control = false;
463 u32 hw_threshold_policy_index = 0;
464 u32 sw_threshold_policy_index = 0;
465 union pwr_policy_data_union pwr_policy_data;
466
467 gk20a_dbg_info("");
468
469 if (g->ops.bios.get_perf_table_ptrs != NULL) {
470 pwr_policy_table_ptr = (u8 *)g->ops.bios.get_perf_table_ptrs(g,
471 g->bios.perf_token, POWER_CAPPING_TABLE);
472 if (pwr_policy_table_ptr == NULL) {
473 status = -EINVAL;
474 goto done;
475 }
476 }
477
478 memcpy(&pwr_policy_table_header.version,
479 (pwr_policy_table_ptr),
480 14);
481
482 memcpy(&pwr_policy_table_header.d2_limit,
483 (pwr_policy_table_ptr + 14),
484 (VBIOS_POWER_POLICY_3X_ENTRY_SIZE_2E - 14));
485
486 if (pwr_policy_table_header.version !=
487 VBIOS_POWER_POLICY_VERSION_3X) {
488 status = -EINVAL;
489 goto done;
490 }
491
492 if (pwr_policy_table_header.header_size <
493 VBIOS_POWER_POLICY_3X_HEADER_SIZE_25) {
494 status = -EINVAL;
495 goto done;
496 }
497
498 if (pwr_policy_table_header.table_entry_size !=
499 VBIOS_POWER_POLICY_3X_ENTRY_SIZE_2E) {
500 status = -EINVAL;
501 goto done;
502 }
503
504 curr_pwr_policy_table_ptr = (pwr_policy_table_ptr +
505 VBIOS_POWER_POLICY_3X_HEADER_SIZE_25);
506
507 for (index = 0; index < pwr_policy_table_header.num_table_entries;
508 index++) {
509 u8 class_type;
510
511 curr_pwr_policy_table_ptr += (pwr_policy_table_header.table_entry_size * index);
512
513 pwr_policy_table_entry.flags0 = *curr_pwr_policy_table_ptr;
514 pwr_policy_table_entry.ch_idx = *(curr_pwr_policy_table_ptr + 1);
515
516 memcpy(&pwr_policy_table_entry.limit_min,
517 (curr_pwr_policy_table_ptr + 2),
518 35);
519
520 memcpy(&pwr_policy_table_entry.ratio_min,
521 (curr_pwr_policy_table_ptr + 2 + 35),
522 4);
523
524 pwr_policy_table_entry.sample_mult =
525 *(curr_pwr_policy_table_ptr + 2 + 35 + 4);
526
527 memcpy(&pwr_policy_table_entry.filter_param,
528 (curr_pwr_policy_table_ptr + 2 + 35 + 4 + 1),
529 4);
530
531 class_type = (u8)BIOS_GET_FIELD(
532 pwr_policy_table_entry.flags0,
533 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS);
534
535 if (class_type == NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_CLASS_HW_THRESHOLD) {
536 ppwrpolicyobjs->version = CTRL_PMGR_PWR_POLICY_TABLE_VERSION_3X;
537 ppwrpolicyobjs->base_sample_period = (u16)
538 pwr_policy_table_header.base_sample_period;
539 ppwrpolicyobjs->min_client_sample_period = (u16)
540 pwr_policy_table_header.min_client_sample_period;
541 ppwrpolicyobjs->low_sampling_mult =
542 pwr_policy_table_header.low_sampling_mult;
543
544 ppwrpolicyobjs->policy_idxs[1] =
545 (u8)pwr_policy_table_header.tgp_policy_idx;
546 ppwrpolicyobjs->policy_idxs[0] =
547 (u8)pwr_policy_table_header.rtp_policy_idx;
548 ppwrpolicyobjs->policy_idxs[2] =
549 pwr_policy_table_header.mxm_policy_idx;
550 ppwrpolicyobjs->policy_idxs[3] =
551 pwr_policy_table_header.dnotifier_policy_idx;
552 ppwrpolicyobjs->ext_limits[0].limit =
553 pwr_policy_table_header.d2_limit;
554 ppwrpolicyobjs->ext_limits[1].limit =
555 pwr_policy_table_header.d3_limit;
556 ppwrpolicyobjs->ext_limits[2].limit =
557 pwr_policy_table_header.d4_limit;
558 ppwrpolicyobjs->ext_limits[3].limit =
559 pwr_policy_table_header.d5_limit;
560 ppwrpolicyobjs->policy_idxs[4] =
561 pwr_policy_table_header.pwr_tgt_policy_idx;
562 ppwrpolicyobjs->policy_idxs[5] =
563 pwr_policy_table_header.pwr_tgt_floor_policy_idx;
564 ppwrpolicyobjs->policy_idxs[6] =
565 pwr_policy_table_header.sm_bus_policy_idx;
566
567 integral_control = (bool)BIOS_GET_FIELD(
568 pwr_policy_table_entry.flags1,
569 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_INTEGRAL_CONTROL);
570
571 if (integral_control == 0x01) {
572 pwr_policy_data.pwrpolicy.integral.past_sample_count = (u8)
573 pwr_policy_table_entry.past_length;
574 pwr_policy_data.pwrpolicy.integral.next_sample_count = (u8)
575 pwr_policy_table_entry.next_length;
576 pwr_policy_data.pwrpolicy.integral.ratio_limit_max = (u16)
577 pwr_policy_table_entry.ratio_max;
578 pwr_policy_data.pwrpolicy.integral.ratio_limit_min = (u16)
579 pwr_policy_table_entry.ratio_min;
580 } else {
581 memset(&(pwr_policy_data.pwrpolicy.integral), 0x0,
582 sizeof(struct ctrl_pmgr_pwr_policy_info_integral));
583 }
584 pwr_policy_data.hw_threshold.threshold_idx = (u8)
585 BIOS_GET_FIELD(
586 pwr_policy_table_entry.param0,
587 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_THRES_IDX);
588
589 pwr_policy_data.hw_threshold.b_use_low_threshold =
590 BIOS_GET_FIELD(
591 pwr_policy_table_entry.param0,
592 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_USE);
593
594 if (pwr_policy_data.hw_threshold.b_use_low_threshold) {
595 pwr_policy_data.hw_threshold.low_threshold_idx = (u8)
596 BIOS_GET_FIELD(
597 pwr_policy_table_entry.param0,
598 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM0_HW_THRESHOLD_LOW_THRESHOLD_IDX);
599
600 pwr_policy_data.hw_threshold.low_threshold_value = (u16)
601 BIOS_GET_FIELD(
602 pwr_policy_table_entry.param1,
603 NV_VBIOS_POWER_POLICY_3X_ENTRY_PARAM1_HW_THRESHOLD_LOW_THRESHOLD_VAL);
604 }
605
606 pwr_policy_size = sizeof(struct pwr_policy_hw_threshold);
607 } else
608 continue;
609
610 /* Initialize data for the parent class */
611 pwr_policy_data.boardobj.type = CTRL_PMGR_PWR_POLICY_TYPE_HW_THRESHOLD;
612 pwr_policy_data.pwrpolicy.ch_idx = (u8)pwr_policy_table_entry.ch_idx;
613 pwr_policy_data.pwrpolicy.limit_unit = (u8)
614 BIOS_GET_FIELD(
615 pwr_policy_table_entry.flags0,
616 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS0_LIMIT_UNIT);
617 pwr_policy_data.pwrpolicy.filter_type = (u8)
618 BIOS_GET_FIELD(
619 pwr_policy_table_entry.flags1,
620 NV_VBIOS_POWER_POLICY_3X_ENTRY_FLAGS1_FILTER_TYPE);
621 pwr_policy_data.pwrpolicy.limit_min = pwr_policy_table_entry.limit_min;
622 pwr_policy_data.pwrpolicy.limit_rated = pwr_policy_table_entry.limit_rated;
623 pwr_policy_data.pwrpolicy.limit_max = pwr_policy_table_entry.limit_max;
624 pwr_policy_data.pwrpolicy.limit_batt = pwr_policy_table_entry.limit_batt;
625
626 pwr_policy_data.pwrpolicy.sample_mult = (u8)pwr_policy_table_entry.sample_mult;
627
628 /* Filled the entry.filterParam value in the filterParam */
629 pwr_policy_data.pwrpolicy.filter_param.block.block_size = 0;
630 pwr_policy_data.pwrpolicy.filter_param.moving_avg.window_size = 0;
631 pwr_policy_data.pwrpolicy.filter_param.iir.divisor = 0;
632
633 hw_threshold_policy_index |=
634 BIT(pwr_policy_data.hw_threshold.threshold_idx);
635
636 boardobj = construct_pwr_policy(g, &pwr_policy_data,
637 pwr_policy_size, pwr_policy_data.boardobj.type);
638
639 if (!boardobj) {
640 gk20a_err(dev_from_gk20a(g),
641 "unable to create pwr policy for %d type %d", index, pwr_policy_data.boardobj.type);
642 status = -EINVAL;
643 goto done;
644 }
645
646 status = boardobjgrp_objinsert(&ppwrpolicyobjs->pwr_policies.super,
647 boardobj, obj_index);
648
649 if (status) {
650 gk20a_err(dev_from_gk20a(g),
651 "unable to insert pwr policy boardobj for %d", index);
652 status = -EINVAL;
653 goto done;
654 }
655
656 ++obj_index;
657 }
658
659 if (hw_threshold_policy_index &&
660 (hw_threshold_policy_index < 0x3)) {
661 status = _pwr_policy_construct_WAR_policy(g,
662 ppwrpolicyobjs,
663 &pwr_policy_data,
664 pwr_policy_size,
665 hw_threshold_policy_index,
666 obj_index);
667 if (status) {
668 gk20a_err(dev_from_gk20a(g),
669 "unable to construct_WAR_policy");
670 status = -EINVAL;
671 goto done;
672 }
673 ++obj_index;
674 }
675
676 if (!sw_threshold_policy_index) {
677 status = _pwr_policy_construct_WAR_SW_Threshold_policy(g,
678 ppwrpolicyobjs,
679 &pwr_policy_data,
680 sizeof(struct pwr_policy_sw_threshold),
681 obj_index);
682 if (status) {
683 gk20a_err(dev_from_gk20a(g),
684 "unable to construct_WAR_policy");
685 status = -EINVAL;
686 goto done;
687 }
688 ++obj_index;
689 }
690
691done:
692 gk20a_dbg_info(" done status %x", status);
693 return status;
694}
695
696u32 pmgr_policy_sw_setup(struct gk20a *g)
697{
698 u32 status;
699 struct boardobjgrp *pboardobjgrp = NULL;
700 struct pwr_policy *ppolicy;
701 struct pmgr_pwr_policy *ppwrpolicyobjs;
702 u8 indx = 0;
703
704 /* Construct the Super Class and override the Interfaces */
705 status = boardobjgrpconstruct_e32(
706 &g->pmgr_pmu.pmgr_policyobjs.pwr_policies);
707 if (status) {
708 gk20a_err(dev_from_gk20a(g),
709 "error creating boardobjgrp for pmgr policy, status - 0x%x",
710 status);
711 goto done;
712 }
713
714 status = boardobjgrpconstruct_e32(
715 &g->pmgr_pmu.pmgr_policyobjs.pwr_policy_rels);
716 if (status) {
717 gk20a_err(dev_from_gk20a(g),
718 "error creating boardobjgrp for pmgr policy rels, status - 0x%x",
719 status);
720 goto done;
721 }
722
723 status = boardobjgrpconstruct_e32(
724 &g->pmgr_pmu.pmgr_policyobjs.pwr_violations);
725 if (status) {
726 gk20a_err(dev_from_gk20a(g),
727 "error creating boardobjgrp for pmgr violations, status - 0x%x",
728 status);
729 goto done;
730 }
731
732 memset(g->pmgr_pmu.pmgr_policyobjs.policy_idxs, CTRL_PMGR_PWR_POLICY_INDEX_INVALID,
733 sizeof(u8) * CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES);
734
735 /* Initialize external power limit policy indexes to _INVALID/0xFF */
736 for (indx = 0; indx < PWR_POLICY_EXT_POWER_STATE_ID_COUNT; indx++) {
737 g->pmgr_pmu.pmgr_policyobjs.ext_limits[indx].policy_table_idx =
738 CTRL_PMGR_PWR_POLICY_INDEX_INVALID;
739 }
740
741 /* Initialize external power state to _D1 */
742 g->pmgr_pmu.pmgr_policyobjs.ext_power_state = 0xFFFFFFFF;
743
744 ppwrpolicyobjs = &(g->pmgr_pmu.pmgr_policyobjs);
745 pboardobjgrp = &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super);
746
747 status = devinit_get_pwr_policy_table(g, ppwrpolicyobjs);
748 if (status)
749 goto done;
750
751 g->pmgr_pmu.pmgr_policyobjs.b_enabled = true;
752
753 BOARDOBJGRP_FOR_EACH(pboardobjgrp, struct pwr_policy *, ppolicy, indx) {
754 PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy);
755 }
756
757 g->pmgr_pmu.pmgr_policyobjs.global_ceiling.values[0] =
758 0xFF;
759
760 g->pmgr_pmu.pmgr_policyobjs.client_work_item.b_pending = false;
761
762done:
763 gk20a_dbg_info(" done status %x", status);
764 return status;
765}
diff --git a/drivers/gpu/nvgpu/pmgr/pwrpolicy.h b/drivers/gpu/nvgpu/pmgr/pwrpolicy.h
new file mode 100644
index 00000000..008282d3
--- /dev/null
+++ b/drivers/gpu/nvgpu/pmgr/pwrpolicy.h
@@ -0,0 +1,127 @@
1/*
2 * general power channel structures & definitions
3 *
4 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15#ifndef _PWRPOLICY_H_
16#define _PWRPOLICY_H_
17
18#include "boardobj/boardobjgrp.h"
19#include "boardobj/boardobj.h"
20#include "pmuif/gpmuifpmgr.h"
21#include "ctrl/ctrlpmgr.h"
22
23#define PWR_POLICY_EXT_POWER_STATE_ID_COUNT 0x4
24
25enum pwr_policy_limit_id {
26 PWR_POLICY_LIMIT_ID_MIN = 0x00000000,
27 PWR_POLICY_LIMIT_ID_RATED,
28 PWR_POLICY_LIMIT_ID_MAX,
29 PWR_POLICY_LIMIT_ID_CURR,
30 PWR_POLICY_LIMIT_ID_BATT,
31};
32
33struct pwr_policy {
34 struct boardobj super;
35 u8 ch_idx;
36 u8 num_limit_inputs;
37 u8 limit_unit;
38 s32 limit_delta;
39 u32 limit_min;
40 u32 limit_rated;
41 u32 limit_max;
42 u32 limit_batt;
43 struct ctrl_pmgr_pwr_policy_info_integral integral;
44 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_min;
45 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_rated;
46 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_max;
47 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_batt;
48 struct ctrl_pmgr_pwr_policy_limit_arbitration limit_arb_curr;
49 u8 sample_mult;
50 enum ctrl_pmgr_pwr_policy_filter_type filter_type;
51 union ctrl_pmgr_pwr_policy_filter_param filter_param;
52};
53
54struct pwr_policy_ext_limit {
55 u8 policy_table_idx;
56 u32 limit;
57};
58
59struct pwr_policy_batt_workitem {
60 u32 power_state;
61 bool b_full_deflection;
62};
63
64struct pwr_policy_client_workitem {
65 u32 limit;
66 bool b_pending;
67};
68
69struct pwr_policy_relationship {
70 struct boardobj super;
71 u8 policy_idx;
72};
73
74struct pmgr_pwr_policy {
75 u8 version;
76 bool b_enabled;
77 struct nv_pmu_perf_domain_group_limits global_ceiling;
78 u8 policy_idxs[CTRL_PMGR_PWR_POLICY_IDX_NUM_INDEXES];
79 struct pwr_policy_ext_limit ext_limits[PWR_POLICY_EXT_POWER_STATE_ID_COUNT];
80 s32 ext_power_state;
81 u16 base_sample_period;
82 u16 min_client_sample_period;
83 u8 low_sampling_mult;
84 struct boardobjgrp_e32 pwr_policies;
85 struct boardobjgrp_e32 pwr_policy_rels;
86 struct boardobjgrp_e32 pwr_violations;
87 struct pwr_policy_client_workitem client_work_item;
88};
89
90struct pwr_policy_limit {
91 struct pwr_policy super;
92};
93
94struct pwr_policy_hw_threshold {
95 struct pwr_policy_limit super;
96 u8 threshold_idx;
97 u8 low_threshold_idx;
98 bool b_use_low_threshold;
99 u16 low_threshold_value;
100};
101
102struct pwr_policy_sw_threshold {
103 struct pwr_policy_limit super;
104 u8 threshold_idx;
105 u8 low_threshold_idx;
106 bool b_use_low_threshold;
107 u16 low_threshold_value;
108 u8 event_id;
109};
110
111union pwr_policy_data_union {
112 struct boardobj boardobj;
113 struct pwr_policy pwrpolicy;
114 struct pwr_policy_hw_threshold hw_threshold;
115 struct pwr_policy_sw_threshold sw_threshold;
116} ;
117
118#define PMGR_GET_PWR_POLICY(g, policy_idx) \
119 ((struct pwr_policy *)BOARDOBJGRP_OBJ_GET_BY_IDX( \
120 &(g->pmgr_pmu.pmgr_policyobjs.pwr_policies.super), (policy_idx)))
121
122#define PMGR_PWR_POLICY_INCREMENT_LIMIT_INPUT_COUNT(ppolicy) \
123 ((ppolicy)->num_limit_inputs++)
124
125u32 pmgr_policy_sw_setup(struct gk20a *g);
126
127#endif