summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-06-07 12:56:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-13 16:19:47 -0400
commitc18364d0c4b3fb6581f937c018cd01fc329601bb (patch)
tree923ab682435379dc8bad7852c49725bf7f0f5286 /drivers/gpu/nvgpu/common
parent45355f00e7de9068f403682044f550026fa7e86e (diff)
gpu: nvgpu: moved pg out from pmu_gk20a.c/h
- moved pg related code to pmu_pg.c under common/pmu folder PG state machine support methods PG ACK handlers AELPG methods PG enable/disable methods -prepended with nvgpu_ for elpg/aelpg global methods by replacing gk20a_ JIRA NVGPU-97 Change-Id: I2148a69ff86b5c5d43c521ff6e241db84afafd82 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1498363 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c8
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c719
2 files changed, 723 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index ca532049..fc72d1fc 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -276,15 +276,15 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
276 /* Init reg with prod values*/ 276 /* Init reg with prod values*/
277 if (g->ops.pmu.pmu_setup_elpg) 277 if (g->ops.pmu.pmu_setup_elpg)
278 g->ops.pmu.pmu_setup_elpg(g); 278 g->ops.pmu.pmu_setup_elpg(g);
279 gk20a_pmu_enable_elpg(g); 279 nvgpu_pmu_enable_elpg(g);
280 } 280 }
281 281
282 nvgpu_udelay(50); 282 nvgpu_udelay(50);
283 283
284 /* Enable AELPG */ 284 /* Enable AELPG */
285 if (g->aelpg_enabled) { 285 if (g->aelpg_enabled) {
286 gk20a_aelpg_init(g); 286 nvgpu_aelpg_init(g);
287 gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS); 287 nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
288 } 288 }
289} 289}
290 290
@@ -398,7 +398,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
398 nvgpu_pmu_get_pg_stats(g, 398 nvgpu_pmu_get_pg_stats(g,
399 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data); 399 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
400 400
401 gk20a_pmu_disable_elpg(g); 401 nvgpu_pmu_disable_elpg(g);
402 pmu->initialized = false; 402 pmu->initialized = false;
403 403
404 /* update the s/w ELPG residency counters */ 404 /* update the s/w ELPG residency counters */
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
new file mode 100644
index 00000000..046f4d59
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -0,0 +1,719 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <nvgpu/pmu.h>
15#include <nvgpu/log.h>
16#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
17
18#include "gk20a/gk20a.h"
19
20/* state transition :
21 * OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
22 * ON => OFF is always synchronized
23 */
24/* elpg is off */
25#define PMU_ELPG_STAT_OFF 0
26/* elpg is on */
27#define PMU_ELPG_STAT_ON 1
28/* elpg is off, ALLOW cmd has been sent, wait for ack */
29#define PMU_ELPG_STAT_ON_PENDING 2
30/* elpg is on, DISALLOW cmd has been sent, wait for ack */
31#define PMU_ELPG_STAT_OFF_PENDING 3
32/* elpg is off, caller has requested on, but ALLOW
33 * cmd hasn't been sent due to ENABLE_ALLOW delay
34 */
35#define PMU_ELPG_STAT_OFF_ON_PENDING 4
36
37#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
38#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
39#define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
40
41static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
42 void *param, u32 handle, u32 status)
43{
44 struct nvgpu_pmu *pmu = param;
45 struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg;
46
47 nvgpu_log_fn(g, " ");
48
49 if (status != 0) {
50 nvgpu_err(g, "ELPG cmd aborted");
51 /* TBD: disable ELPG */
52 return;
53 }
54
55 switch (elpg_msg->msg) {
56 case PMU_PG_ELPG_MSG_INIT_ACK:
57 nvgpu_pmu_dbg(g, "INIT_PG is ack from PMU, eng - %d",
58 elpg_msg->engine_id);
59 break;
60 case PMU_PG_ELPG_MSG_ALLOW_ACK:
61 nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d",
62 elpg_msg->engine_id);
63 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
64 pmu->elpg_stat = PMU_ELPG_STAT_ON;
65 else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
66 pmu->mscg_transition_state = PMU_ELPG_STAT_ON;
67 break;
68 case PMU_PG_ELPG_MSG_DISALLOW_ACK:
69 nvgpu_pmu_dbg(g, "DISALLOW is ack from PMU, eng - %d",
70 elpg_msg->engine_id);
71
72 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
73 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
74 else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
75 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
76
77 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
78 if (g->ops.pmu.pmu_pg_engines_feature_list &&
79 g->ops.pmu.pmu_pg_engines_feature_list(g,
80 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
81 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
82 pmu->initialized = true;
83 nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
84 false);
85 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
86 /* make status visible */
87 smp_mb();
88 } else
89 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
90 true);
91 }
92 break;
93 default:
94 nvgpu_err(g,
95 "unsupported ELPG message : 0x%04x", elpg_msg->msg);
96 }
97}
98
99/* PG enable/disable */
100int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
101{
102 u32 status = 0;
103
104 if (enable_pg == true) {
105 if (g->ops.pmu.pmu_pg_engines_feature_list &&
106 g->ops.pmu.pmu_pg_engines_feature_list(g,
107 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
108 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
109 if (g->ops.pmu.pmu_lpwr_enable_pg)
110 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
111 true);
112 } else if (g->support_pmu && g->can_elpg)
113 status = nvgpu_pmu_enable_elpg(g);
114 } else if (enable_pg == false) {
115 if (g->ops.pmu.pmu_pg_engines_feature_list &&
116 g->ops.pmu.pmu_pg_engines_feature_list(g,
117 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
118 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
119 if (g->ops.pmu.pmu_lpwr_disable_pg)
120 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
121 true);
122 } else if (g->support_pmu && g->can_elpg)
123 status = nvgpu_pmu_disable_elpg(g);
124 }
125
126 return status;
127}
128
129static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
130{
131 struct nvgpu_pmu *pmu = &g->pmu;
132 struct pmu_cmd cmd;
133 u32 seq, status;
134
135 nvgpu_log_fn(g, " ");
136
137 memset(&cmd, 0, sizeof(struct pmu_cmd));
138 cmd.hdr.unit_id = PMU_UNIT_PG;
139 cmd.hdr.size = PMU_CMD_HDR_SIZE +
140 sizeof(struct pmu_pg_cmd_elpg_cmd);
141 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
142 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
143 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
144
145 /* no need to wait ack for ELPG enable but set
146 * pending to sync with follow up ELPG disable
147 */
148 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
149 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
150 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
151 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
152
153 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW");
154 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
155 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
156 pmu, &seq, ~0);
157 WARN_ON(status != 0);
158
159 nvgpu_log_fn(g, "done");
160 return 0;
161}
162
163int nvgpu_pmu_enable_elpg(struct gk20a *g)
164{
165 struct nvgpu_pmu *pmu = &g->pmu;
166 struct gr_gk20a *gr = &g->gr;
167 u32 pg_engine_id;
168 u32 pg_engine_id_list = 0;
169
170 int ret = 0;
171
172 nvgpu_log_fn(g, " ");
173
174 if (!g->support_pmu)
175 return ret;
176
177 nvgpu_mutex_acquire(&pmu->elpg_mutex);
178
179 pmu->elpg_refcnt++;
180 if (pmu->elpg_refcnt <= 0)
181 goto exit_unlock;
182
183 /* something is not right if we end up in following code path */
184 if (unlikely(pmu->elpg_refcnt > 1)) {
185 nvgpu_warn(g,
186 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
187 __func__, pmu->elpg_refcnt);
188 WARN_ON(1);
189 }
190
191 /* do NOT enable elpg until golden ctx is created,
192 * which is related with the ctx that ELPG save and restore.
193 */
194 if (unlikely(!gr->ctx_vars.golden_image_initialized))
195 goto exit_unlock;
196
197 /* return if ELPG is already on or on_pending or off_on_pending */
198 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
199 goto exit_unlock;
200
201 if (g->ops.pmu.pmu_pg_supported_engines_list)
202 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
203
204 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
205 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
206 pg_engine_id++) {
207
208 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
209 ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
210 continue;
211
212 if (BIT(pg_engine_id) & pg_engine_id_list)
213 ret = pmu_enable_elpg_locked(g, pg_engine_id);
214 }
215
216exit_unlock:
217 nvgpu_mutex_release(&pmu->elpg_mutex);
218 nvgpu_log_fn(g, "done");
219 return ret;
220}
221
222int nvgpu_pmu_disable_elpg(struct gk20a *g)
223{
224 struct nvgpu_pmu *pmu = &g->pmu;
225 struct pmu_cmd cmd;
226 u32 seq;
227 int ret = 0;
228 u32 pg_engine_id;
229 u32 pg_engine_id_list = 0;
230 u32 *ptr = NULL;
231
232 gk20a_dbg_fn("");
233
234 if (g->ops.pmu.pmu_pg_supported_engines_list)
235 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
236
237 if (!g->support_pmu)
238 return ret;
239
240 nvgpu_mutex_acquire(&pmu->elpg_mutex);
241
242 pmu->elpg_refcnt--;
243 if (pmu->elpg_refcnt > 0) {
244 nvgpu_warn(g,
245 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
246 __func__, pmu->elpg_refcnt);
247 WARN_ON(1);
248 ret = 0;
249 goto exit_unlock;
250 }
251
252 /* cancel off_on_pending and return */
253 if (pmu->elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) {
254 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
255 ret = 0;
256 goto exit_reschedule;
257 }
258 /* wait if on_pending */
259 else if (pmu->elpg_stat == PMU_ELPG_STAT_ON_PENDING) {
260
261 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
262 &pmu->elpg_stat, PMU_ELPG_STAT_ON);
263
264 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
265 nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
266 pmu->elpg_stat);
267 pmu_dump_elpg_stats(pmu);
268 pmu_dump_falcon_stats(pmu);
269 ret = -EBUSY;
270 goto exit_unlock;
271 }
272 }
273 /* return if ELPG is already off */
274 else if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
275 ret = 0;
276 goto exit_reschedule;
277 }
278
279 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
280 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
281 pg_engine_id++) {
282
283 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
284 ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
285 continue;
286
287 if (BIT(pg_engine_id) & pg_engine_id_list) {
288 memset(&cmd, 0, sizeof(struct pmu_cmd));
289 cmd.hdr.unit_id = PMU_UNIT_PG;
290 cmd.hdr.size = PMU_CMD_HDR_SIZE +
291 sizeof(struct pmu_pg_cmd_elpg_cmd);
292 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
293 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
294 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
295
296 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
297 pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
298 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
299 pmu->mscg_transition_state =
300 PMU_ELPG_STAT_OFF_PENDING;
301
302 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
303 ptr = &pmu->elpg_stat;
304 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
305 ptr = &pmu->mscg_transition_state;
306
307 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
308 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
309 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
310 pmu, &seq, ~0);
311
312 pmu_wait_message_cond(pmu,
313 gk20a_get_gr_idle_timeout(g),
314 ptr, PMU_ELPG_STAT_OFF);
315 if (*ptr != PMU_ELPG_STAT_OFF) {
316 nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
317 pmu_dump_elpg_stats(pmu);
318 pmu_dump_falcon_stats(pmu);
319 ret = -EBUSY;
320 goto exit_unlock;
321 }
322 }
323 }
324
325exit_reschedule:
326exit_unlock:
327 nvgpu_mutex_release(&pmu->elpg_mutex);
328 nvgpu_log_fn(g, "done");
329 return ret;
330}
331
332/* PG init */
333static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
334 void *param, u32 handle, u32 status)
335{
336 struct nvgpu_pmu *pmu = param;
337
338 nvgpu_log_fn(g, " ");
339
340 if (status != 0) {
341 nvgpu_err(g, "ELPG cmd aborted");
342 /* TBD: disable ELPG */
343 return;
344 }
345
346 switch (msg->msg.pg.stat.sub_msg_id) {
347 case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
348 nvgpu_pmu_dbg(g, "ALLOC_DMEM_OFFSET is acknowledged from PMU");
349 pmu->stat_dmem_offset[msg->msg.pg.stat.engine_id] =
350 msg->msg.pg.stat.data;
351 break;
352 default:
353 break;
354 }
355}
356
357static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
358{
359 struct nvgpu_pmu *pmu = &g->pmu;
360 struct pmu_cmd cmd;
361 u32 seq;
362
363 nvgpu_log_fn(g, " ");
364
365 gk20a_pmu_pg_idle_counter_config(g, pg_engine_id);
366
367 if (g->ops.pmu.pmu_pg_init_param)
368 g->ops.pmu.pmu_pg_init_param(g, pg_engine_id);
369
370 /* init ELPG */
371 memset(&cmd, 0, sizeof(struct pmu_cmd));
372 cmd.hdr.unit_id = PMU_UNIT_PG;
373 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
374 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
375 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
376 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_INIT;
377
378 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT");
379 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
380 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
381
382 /* alloc dmem for powergating state log */
383 pmu->stat_dmem_offset[pg_engine_id] = 0;
384 memset(&cmd, 0, sizeof(struct pmu_cmd));
385 cmd.hdr.unit_id = PMU_UNIT_PG;
386 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
387 cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT;
388 cmd.cmd.pg.stat.engine_id = pg_engine_id;
389 cmd.cmd.pg.stat.sub_cmd_id = PMU_PG_STAT_CMD_ALLOC_DMEM;
390 cmd.cmd.pg.stat.data = 0;
391
392 nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
393 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
394 pmu_handle_pg_stat_msg, pmu, &seq, ~0);
395
396 /* disallow ELPG initially
397 * PMU ucode requires a disallow cmd before allow cmd
398 */
399 /* set for wait_event PMU_ELPG_STAT_OFF */
400 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
401 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
402 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
403 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
404 memset(&cmd, 0, sizeof(struct pmu_cmd));
405 cmd.hdr.unit_id = PMU_UNIT_PG;
406 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
407 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
408 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
409 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
410
411 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
412 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
413 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
414
415 return 0;
416}
417
418int nvgpu_pmu_init_powergating(struct gk20a *g)
419{
420 struct nvgpu_pmu *pmu = &g->pmu;
421 u32 pg_engine_id;
422 u32 pg_engine_id_list = 0;
423
424 nvgpu_log_fn(g, " ");
425
426 if (g->ops.pmu.pmu_pg_supported_engines_list)
427 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
428
429 gk20a_gr_wait_initialized(g);
430
431 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
432 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
433 pg_engine_id++) {
434
435 if (BIT(pg_engine_id) & pg_engine_id_list) {
436 pmu_pg_init_send(g, pg_engine_id);
437 if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
438 nvgpu_pmu_state_change(g,
439 PMU_STATE_ELPG_BOOTING, false);
440 }
441 }
442
443 if (g->ops.pmu.pmu_pg_param_post_init)
444 g->ops.pmu.pmu_pg_param_post_init(g);
445
446 return 0;
447}
448
449static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
450 void *param, u32 handle, u32 status)
451{
452 struct nvgpu_pmu *pmu = param;
453 struct pmu_pg_msg_eng_buf_stat *eng_buf_stat =
454 &msg->msg.pg.eng_buf_stat;
455
456 nvgpu_log_fn(g, " ");
457
458 nvgpu_pmu_dbg(g,
459 "reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
460 if (status != 0) {
461 nvgpu_err(g, "PGENG cmd aborted");
462 /* TBD: disable ELPG */
463 return;
464 }
465
466 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
467 if ((!pmu->buf_loaded) &&
468 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
469 nvgpu_err(g, "failed to load PGENG buffer");
470 else {
471 nvgpu_pmu_state_change(g, pmu->pmu_state, true);
472 }
473}
474
475int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
476{
477 struct nvgpu_pmu *pmu = &g->pmu;
478 struct pmu_cmd cmd;
479 u32 desc;
480 int err = 0;
481 u32 gr_engine_id;
482
483 gk20a_dbg_fn("");
484
485 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
486
487 memset(&cmd, 0, sizeof(struct pmu_cmd));
488 cmd.hdr.unit_id = PMU_UNIT_PG;
489 cmd.hdr.size = PMU_CMD_HDR_SIZE +
490 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
491 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
492 PMU_PG_CMD_ID_ENG_BUF_LOAD);
493 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
494 gr_engine_id);
495 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
496 PMU_PGENG_GR_BUFFER_IDX_FECS);
497 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
498 pmu->pg_buf.size);
499 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
500 u64_lo32(pmu->pg_buf.gpu_va));
501 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
502 (u8)(pmu->pg_buf.gpu_va & 0xFF));
503 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
504 PMU_DMAIDX_VIRT);
505
506 pmu->buf_loaded = false;
507 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
508 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
509 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
510 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
511 return err;
512}
513
514void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
515{
516 struct nvgpu_pmu *pmu = &g->pmu;
517 struct pmu_cmd cmd;
518 u32 desc;
519 u32 gr_engine_id;
520
521 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
522
523 memset(&cmd, 0, sizeof(struct pmu_cmd));
524 cmd.hdr.unit_id = PMU_UNIT_PG;
525 cmd.hdr.size = PMU_CMD_HDR_SIZE +
526 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
527 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
528 PMU_PG_CMD_ID_ENG_BUF_LOAD);
529 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
530 gr_engine_id);
531 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
532 PMU_PGENG_GR_BUFFER_IDX_ZBC);
533 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
534 pmu->seq_buf.size);
535 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
536 u64_lo32(pmu->seq_buf.gpu_va));
537 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
538 (u8)(pmu->seq_buf.gpu_va & 0xFF));
539 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
540 PMU_DMAIDX_VIRT);
541
542 pmu->buf_loaded = false;
543 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
544 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
545 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
546 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
547}
548
549/* stats */
550int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
551 struct pmu_pg_stats_data *pg_stat_data)
552{
553 struct nvgpu_pmu *pmu = &g->pmu;
554 u32 pg_engine_id_list = 0;
555
556 if (!pmu->initialized) {
557 pg_stat_data->ingating_time = 0;
558 pg_stat_data->ungating_time = 0;
559 pg_stat_data->gating_cnt = 0;
560 return 0;
561 }
562
563 if (g->ops.pmu.pmu_pg_supported_engines_list)
564 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
565
566 if (BIT(pg_engine_id) & pg_engine_id_list)
567 g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
568 pg_stat_data);
569
570 return 0;
571}
572
573/* AELPG */
574static void ap_callback_init_and_enable_ctrl(
575 struct gk20a *g, struct pmu_msg *msg,
576 void *param, u32 seq_desc, u32 status)
577{
578 /* Define p_ap (i.e pointer to pmu_ap structure) */
579 WARN_ON(!msg);
580
581 if (!status) {
582 switch (msg->msg.pg.ap_msg.cmn.msg_id) {
583 case PMU_AP_MSG_ID_INIT_ACK:
584 nvgpu_pmu_dbg(g, "reply PMU_AP_CMD_ID_INIT");
585 break;
586
587 default:
588 nvgpu_pmu_dbg(g,
589 "%s: Invalid Adaptive Power Message: %x\n",
590 __func__, msg->msg.pg.ap_msg.cmn.msg_id);
591 break;
592 }
593 }
594}
595
596/* Send an Adaptive Power (AP) related command to PMU */
597int nvgpu_pmu_ap_send_command(struct gk20a *g,
598 union pmu_ap_cmd *p_ap_cmd, bool b_block)
599{
600 struct nvgpu_pmu *pmu = &g->pmu;
601 /* FIXME: where is the PG structure defined?? */
602 u32 status = 0;
603 struct pmu_cmd cmd;
604 u32 seq;
605 pmu_callback p_callback = NULL;
606
607 memset(&cmd, 0, sizeof(struct pmu_cmd));
608
609 /* Copy common members */
610 cmd.hdr.unit_id = PMU_UNIT_PG;
611 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(union pmu_ap_cmd);
612
613 cmd.cmd.pg.ap_cmd.cmn.cmd_type = PMU_PG_CMD_ID_AP;
614 cmd.cmd.pg.ap_cmd.cmn.cmd_id = p_ap_cmd->cmn.cmd_id;
615
616 /* Copy other members of command */
617 switch (p_ap_cmd->cmn.cmd_id) {
618 case PMU_AP_CMD_ID_INIT:
619 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT");
620 cmd.cmd.pg.ap_cmd.init.pg_sampling_period_us =
621 p_ap_cmd->init.pg_sampling_period_us;
622 break;
623
624 case PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL:
625 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL");
626 cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id =
627 p_ap_cmd->init_and_enable_ctrl.ctrl_id;
628 memcpy(
629 (void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params),
630 (void *)&(p_ap_cmd->init_and_enable_ctrl.params),
631 sizeof(struct pmu_ap_ctrl_init_params));
632
633 p_callback = ap_callback_init_and_enable_ctrl;
634 break;
635
636 case PMU_AP_CMD_ID_ENABLE_CTRL:
637 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_ENABLE_CTRL");
638 cmd.cmd.pg.ap_cmd.enable_ctrl.ctrl_id =
639 p_ap_cmd->enable_ctrl.ctrl_id;
640 break;
641
642 case PMU_AP_CMD_ID_DISABLE_CTRL:
643 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_DISABLE_CTRL");
644 cmd.cmd.pg.ap_cmd.disable_ctrl.ctrl_id =
645 p_ap_cmd->disable_ctrl.ctrl_id;
646 break;
647
648 case PMU_AP_CMD_ID_KICK_CTRL:
649 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_KICK_CTRL");
650 cmd.cmd.pg.ap_cmd.kick_ctrl.ctrl_id =
651 p_ap_cmd->kick_ctrl.ctrl_id;
652 cmd.cmd.pg.ap_cmd.kick_ctrl.skip_count =
653 p_ap_cmd->kick_ctrl.skip_count;
654 break;
655
656 default:
657 nvgpu_pmu_dbg(g, "%s: Invalid Adaptive Power command %d\n",
658 __func__, p_ap_cmd->cmn.cmd_id);
659 return 0x2f;
660 }
661
662 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
663 p_callback, pmu, &seq, ~0);
664
665 if (status) {
666 nvgpu_pmu_dbg(g,
667 "%s: Unable to submit Adaptive Power Command %d\n",
668 __func__, p_ap_cmd->cmn.cmd_id);
669 goto err_return;
670 }
671
672 /* TODO: Implement blocking calls (b_block) */
673
674err_return:
675 return status;
676}
677
678int nvgpu_aelpg_init(struct gk20a *g)
679{
680 int status = 0;
681
682 /* Remove reliance on app_ctrl field. */
683 union pmu_ap_cmd ap_cmd;
684
685 /* TODO: Check for elpg being ready? */
686 ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
687 ap_cmd.init.pg_sampling_period_us = g->pmu.aelpg_param[0];
688
689 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
690 return status;
691}
692
693int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
694{
695 int status = 0;
696 union pmu_ap_cmd ap_cmd;
697
698 /* TODO: Probably check if ELPG is ready? */
699 ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
700 ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
701 ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
702 g->pmu.aelpg_param[1];
703 ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
704 g->pmu.aelpg_param[2];
705 ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
706 g->pmu.aelpg_param[3];
707 ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
708 g->pmu.aelpg_param[4];
709
710 switch (ctrl_id) {
711 case PMU_AP_CTRL_ID_GRAPHICS:
712 break;
713 default:
714 break;
715 }
716
717 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, true);
718 return status;
719}