summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/pmu/pmu_pg.c')
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c748
1 files changed, 748 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
new file mode 100644
index 00000000..bf39ce19
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -0,0 +1,748 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/pmu.h>
24#include <nvgpu/log.h>
25#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
26#include <nvgpu/barrier.h>
27#include <nvgpu/bug.h>
28
29#include "gk20a/gk20a.h"
30
31/* state transition :
32 * OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
33 * ON => OFF is always synchronized
34 */
35/* elpg is off */
36#define PMU_ELPG_STAT_OFF 0
37/* elpg is on */
38#define PMU_ELPG_STAT_ON 1
39/* elpg is off, ALLOW cmd has been sent, wait for ack */
40#define PMU_ELPG_STAT_ON_PENDING 2
41/* elpg is on, DISALLOW cmd has been sent, wait for ack */
42#define PMU_ELPG_STAT_OFF_PENDING 3
43/* elpg is off, caller has requested on, but ALLOW
44 * cmd hasn't been sent due to ENABLE_ALLOW delay
45 */
46#define PMU_ELPG_STAT_OFF_ON_PENDING 4
47
48#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
49#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
50#define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
51
52static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
53 void *param, u32 handle, u32 status)
54{
55 struct nvgpu_pmu *pmu = param;
56 struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg;
57
58 nvgpu_log_fn(g, " ");
59
60 if (status != 0) {
61 nvgpu_err(g, "ELPG cmd aborted");
62 /* TBD: disable ELPG */
63 return;
64 }
65
66 switch (elpg_msg->msg) {
67 case PMU_PG_ELPG_MSG_INIT_ACK:
68 nvgpu_pmu_dbg(g, "INIT_PG is ack from PMU, eng - %d",
69 elpg_msg->engine_id);
70 break;
71 case PMU_PG_ELPG_MSG_ALLOW_ACK:
72 nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d",
73 elpg_msg->engine_id);
74 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
75 pmu->mscg_transition_state = PMU_ELPG_STAT_ON;
76 else
77 pmu->elpg_stat = PMU_ELPG_STAT_ON;
78 break;
79 case PMU_PG_ELPG_MSG_DISALLOW_ACK:
80 nvgpu_pmu_dbg(g, "DISALLOW is ack from PMU, eng - %d",
81 elpg_msg->engine_id);
82
83 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
84 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
85 else
86 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
87
88 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
89 if (g->ops.pmu.pmu_pg_engines_feature_list &&
90 g->ops.pmu.pmu_pg_engines_feature_list(g,
91 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
92 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
93 pmu->initialized = true;
94 nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
95 true);
96 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
97 /* make status visible */
98 nvgpu_smp_mb();
99 } else
100 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
101 true);
102 }
103 break;
104 default:
105 nvgpu_err(g,
106 "unsupported ELPG message : 0x%04x", elpg_msg->msg);
107 }
108}
109
110/* PG enable/disable */
111int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
112{
113 u32 status = 0;
114
115 if (enable_pg == true) {
116 if (g->ops.pmu.pmu_pg_engines_feature_list &&
117 g->ops.pmu.pmu_pg_engines_feature_list(g,
118 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
119 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
120 if (g->ops.pmu.pmu_lpwr_enable_pg)
121 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
122 true);
123 } else if (g->support_pmu && g->can_elpg)
124 status = nvgpu_pmu_enable_elpg(g);
125 } else if (enable_pg == false) {
126 if (g->ops.pmu.pmu_pg_engines_feature_list &&
127 g->ops.pmu.pmu_pg_engines_feature_list(g,
128 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
129 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
130 if (g->ops.pmu.pmu_lpwr_disable_pg)
131 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
132 true);
133 } else if (g->support_pmu && g->can_elpg)
134 status = nvgpu_pmu_disable_elpg(g);
135 }
136
137 return status;
138}
139
140static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
141{
142 struct nvgpu_pmu *pmu = &g->pmu;
143 struct pmu_cmd cmd;
144 u32 seq, status;
145
146 nvgpu_log_fn(g, " ");
147
148 memset(&cmd, 0, sizeof(struct pmu_cmd));
149 cmd.hdr.unit_id = PMU_UNIT_PG;
150 cmd.hdr.size = PMU_CMD_HDR_SIZE +
151 sizeof(struct pmu_pg_cmd_elpg_cmd);
152 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
153 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
154 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
155
156 /* no need to wait ack for ELPG enable but set
157 * pending to sync with follow up ELPG disable
158 */
159 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
160 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
161 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
162 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
163
164 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW");
165 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
166 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
167 pmu, &seq, ~0);
168 WARN_ON(status != 0);
169
170 nvgpu_log_fn(g, "done");
171 return 0;
172}
173
174int nvgpu_pmu_enable_elpg(struct gk20a *g)
175{
176 struct nvgpu_pmu *pmu = &g->pmu;
177 struct gr_gk20a *gr = &g->gr;
178 u32 pg_engine_id;
179 u32 pg_engine_id_list = 0;
180
181 int ret = 0;
182
183 nvgpu_log_fn(g, " ");
184
185 if (!g->support_pmu)
186 return ret;
187
188 nvgpu_mutex_acquire(&pmu->elpg_mutex);
189
190 pmu->elpg_refcnt++;
191 if (pmu->elpg_refcnt <= 0)
192 goto exit_unlock;
193
194 /* something is not right if we end up in following code path */
195 if (unlikely(pmu->elpg_refcnt > 1)) {
196 nvgpu_warn(g,
197 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
198 __func__, pmu->elpg_refcnt);
199 WARN_ON(1);
200 }
201
202 /* do NOT enable elpg until golden ctx is created,
203 * which is related with the ctx that ELPG save and restore.
204 */
205 if (unlikely(!gr->ctx_vars.golden_image_initialized))
206 goto exit_unlock;
207
208 /* return if ELPG is already on or on_pending or off_on_pending */
209 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
210 goto exit_unlock;
211
212 if (g->ops.pmu.pmu_pg_supported_engines_list)
213 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
214
215 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
216 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
217 pg_engine_id++) {
218
219 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
220 pmu->mscg_stat == PMU_MSCG_DISABLED)
221 continue;
222
223 if (BIT(pg_engine_id) & pg_engine_id_list)
224 ret = pmu_enable_elpg_locked(g, pg_engine_id);
225 }
226
227exit_unlock:
228 nvgpu_mutex_release(&pmu->elpg_mutex);
229 nvgpu_log_fn(g, "done");
230 return ret;
231}
232
233int nvgpu_pmu_disable_elpg(struct gk20a *g)
234{
235 struct nvgpu_pmu *pmu = &g->pmu;
236 struct pmu_cmd cmd;
237 u32 seq;
238 int ret = 0;
239 u32 pg_engine_id;
240 u32 pg_engine_id_list = 0;
241 u32 *ptr = NULL;
242
243 nvgpu_log_fn(g, " ");
244
245 if (g->ops.pmu.pmu_pg_supported_engines_list)
246 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
247
248 if (!g->support_pmu)
249 return ret;
250
251 nvgpu_mutex_acquire(&pmu->elpg_mutex);
252
253 pmu->elpg_refcnt--;
254 if (pmu->elpg_refcnt > 0) {
255 nvgpu_warn(g,
256 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
257 __func__, pmu->elpg_refcnt);
258 WARN_ON(1);
259 ret = 0;
260 goto exit_unlock;
261 }
262
263 /* cancel off_on_pending and return */
264 if (pmu->elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) {
265 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
266 ret = 0;
267 goto exit_reschedule;
268 }
269 /* wait if on_pending */
270 else if (pmu->elpg_stat == PMU_ELPG_STAT_ON_PENDING) {
271
272 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
273 &pmu->elpg_stat, PMU_ELPG_STAT_ON);
274
275 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
276 nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
277 pmu->elpg_stat);
278 nvgpu_pmu_dump_elpg_stats(pmu);
279 nvgpu_pmu_dump_falcon_stats(pmu);
280 ret = -EBUSY;
281 goto exit_unlock;
282 }
283 }
284 /* return if ELPG is already off */
285 else if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
286 ret = 0;
287 goto exit_reschedule;
288 }
289
290 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
291 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
292 pg_engine_id++) {
293
294 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
295 pmu->mscg_stat == PMU_MSCG_DISABLED)
296 continue;
297
298 if (BIT(pg_engine_id) & pg_engine_id_list) {
299 memset(&cmd, 0, sizeof(struct pmu_cmd));
300 cmd.hdr.unit_id = PMU_UNIT_PG;
301 cmd.hdr.size = PMU_CMD_HDR_SIZE +
302 sizeof(struct pmu_pg_cmd_elpg_cmd);
303 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
304 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
305 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
306
307 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
308 pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
309 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
310 pmu->mscg_transition_state =
311 PMU_ELPG_STAT_OFF_PENDING;
312
313 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
314 ptr = &pmu->elpg_stat;
315 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
316 ptr = &pmu->mscg_transition_state;
317
318 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
319 nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL,
320 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
321 pmu, &seq, ~0);
322
323 pmu_wait_message_cond(pmu,
324 gk20a_get_gr_idle_timeout(g),
325 ptr, PMU_ELPG_STAT_OFF);
326 if (*ptr != PMU_ELPG_STAT_OFF) {
327 nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
328 nvgpu_pmu_dump_elpg_stats(pmu);
329 nvgpu_pmu_dump_falcon_stats(pmu);
330 ret = -EBUSY;
331 goto exit_unlock;
332 }
333 }
334 }
335
336exit_reschedule:
337exit_unlock:
338 nvgpu_mutex_release(&pmu->elpg_mutex);
339 nvgpu_log_fn(g, "done");
340 return ret;
341}
342
343/* PG init */
344static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
345 void *param, u32 handle, u32 status)
346{
347 struct nvgpu_pmu *pmu = param;
348
349 nvgpu_log_fn(g, " ");
350
351 if (status != 0) {
352 nvgpu_err(g, "ELPG cmd aborted");
353 /* TBD: disable ELPG */
354 return;
355 }
356
357 switch (msg->msg.pg.stat.sub_msg_id) {
358 case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
359 nvgpu_pmu_dbg(g, "ALLOC_DMEM_OFFSET is acknowledged from PMU");
360 pmu->stat_dmem_offset[msg->msg.pg.stat.engine_id] =
361 msg->msg.pg.stat.data;
362 break;
363 default:
364 break;
365 }
366}
367
368static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
369{
370 struct nvgpu_pmu *pmu = &g->pmu;
371 struct pmu_cmd cmd;
372 u32 seq;
373 int err = 0;
374
375 nvgpu_log_fn(g, " ");
376
377 if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
378 nvgpu_pmu_state_change(g,
379 PMU_STATE_ELPG_BOOTING, false);
380 else
381 nvgpu_err(g, "PMU INIT not received\n");
382
383 gk20a_pmu_pg_idle_counter_config(g, pg_engine_id);
384
385 if (g->ops.pmu.pmu_pg_init_param)
386 g->ops.pmu.pmu_pg_init_param(g, pg_engine_id);
387
388 /* init ELPG */
389 memset(&cmd, 0, sizeof(struct pmu_cmd));
390 cmd.hdr.unit_id = PMU_UNIT_PG;
391 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
392 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
393 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
394 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_INIT;
395
396 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT");
397 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
398 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
399 if (err)
400 nvgpu_err(g, "PMU_PG_ELPG_CMD_INIT cmd failed\n");
401
402 /* alloc dmem for powergating state log */
403 pmu->stat_dmem_offset[pg_engine_id] = 0;
404 memset(&cmd, 0, sizeof(struct pmu_cmd));
405 cmd.hdr.unit_id = PMU_UNIT_PG;
406 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
407 cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT;
408 cmd.cmd.pg.stat.engine_id = pg_engine_id;
409 cmd.cmd.pg.stat.sub_cmd_id = PMU_PG_STAT_CMD_ALLOC_DMEM;
410 cmd.cmd.pg.stat.data = 0;
411
412 nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
413 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
414 pmu_handle_pg_stat_msg, pmu, &seq, ~0);
415 if (err)
416 nvgpu_err(g, "PMU_PG_STAT_CMD_ALLOC_DMEM cmd failed\n");
417
418 /* disallow ELPG initially
419 * PMU ucode requires a disallow cmd before allow cmd
420 */
421 /* set for wait_event PMU_ELPG_STAT_OFF */
422 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
423 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
424 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
425 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
426 memset(&cmd, 0, sizeof(struct pmu_cmd));
427 cmd.hdr.unit_id = PMU_UNIT_PG;
428 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
429 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
430 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
431 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
432
433 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
434 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
435 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
436 if (err)
437 nvgpu_err(g, "PMU_PG_ELPG_CMD_DISALLOW cmd failed\n");
438
439 if (g->ops.pmu.pmu_pg_set_sub_feature_mask)
440 g->ops.pmu.pmu_pg_set_sub_feature_mask(g, pg_engine_id);
441
442 return 0;
443}
444
445int nvgpu_pmu_init_powergating(struct gk20a *g)
446{
447 u32 pg_engine_id;
448 u32 pg_engine_id_list = 0;
449
450 nvgpu_log_fn(g, " ");
451
452 if (g->ops.pmu.pmu_pg_supported_engines_list)
453 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
454
455 gk20a_gr_wait_initialized(g);
456
457 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
458 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
459 pg_engine_id++) {
460
461 if (BIT(pg_engine_id) & pg_engine_id_list) {
462 pmu_pg_init_send(g, pg_engine_id);
463 }
464 }
465
466 if (g->ops.pmu.pmu_pg_param_post_init)
467 g->ops.pmu.pmu_pg_param_post_init(g);
468
469 return 0;
470}
471
472static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
473 void *param, u32 handle, u32 status)
474{
475 struct nvgpu_pmu *pmu = param;
476 struct pmu_pg_msg_eng_buf_stat *eng_buf_stat =
477 &msg->msg.pg.eng_buf_stat;
478
479 nvgpu_log_fn(g, " ");
480
481 nvgpu_pmu_dbg(g,
482 "reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
483 if (status != 0) {
484 nvgpu_err(g, "PGENG cmd aborted");
485 /* TBD: disable ELPG */
486 return;
487 }
488
489 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
490 if ((!pmu->buf_loaded) &&
491 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
492 nvgpu_err(g, "failed to load PGENG buffer");
493 else {
494 nvgpu_pmu_state_change(g, pmu->pmu_state, true);
495 }
496}
497
498int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
499{
500 struct nvgpu_pmu *pmu = &g->pmu;
501 struct pmu_cmd cmd;
502 u32 desc;
503 int err = 0;
504 u32 gr_engine_id;
505
506 nvgpu_log_fn(g, " ");
507
508 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
509
510 memset(&cmd, 0, sizeof(struct pmu_cmd));
511 cmd.hdr.unit_id = PMU_UNIT_PG;
512 cmd.hdr.size = PMU_CMD_HDR_SIZE +
513 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
514 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
515 PMU_PG_CMD_ID_ENG_BUF_LOAD);
516 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
517 gr_engine_id);
518 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
519 PMU_PGENG_GR_BUFFER_IDX_FECS);
520 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
521 pmu->pg_buf.size);
522 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
523 u64_lo32(pmu->pg_buf.gpu_va));
524 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
525 (u8)(pmu->pg_buf.gpu_va & 0xFF));
526 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
527 PMU_DMAIDX_VIRT);
528
529 pmu->buf_loaded = false;
530 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
531 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
532 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
533 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
534 if (err)
535 nvgpu_err(g, "cmd LOAD PMU_PGENG_GR_BUFFER_IDX_FECS failed\n");
536
537 return err;
538}
539
540void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
541{
542 struct nvgpu_pmu *pmu = &g->pmu;
543 struct pmu_cmd cmd;
544 u32 desc;
545 u32 gr_engine_id;
546 int err = 0;
547
548 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
549
550 memset(&cmd, 0, sizeof(struct pmu_cmd));
551 cmd.hdr.unit_id = PMU_UNIT_PG;
552 cmd.hdr.size = PMU_CMD_HDR_SIZE +
553 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
554 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
555 PMU_PG_CMD_ID_ENG_BUF_LOAD);
556 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
557 gr_engine_id);
558 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
559 PMU_PGENG_GR_BUFFER_IDX_ZBC);
560 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
561 pmu->seq_buf.size);
562 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
563 u64_lo32(pmu->seq_buf.gpu_va));
564 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
565 (u8)(pmu->seq_buf.gpu_va & 0xFF));
566 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
567 PMU_DMAIDX_VIRT);
568
569 pmu->buf_loaded = false;
570 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
571 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
572 err = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
573 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
574 if (err)
575 nvgpu_err(g, "CMD LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC failed\n");
576}
577
578/* stats */
579int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
580 struct pmu_pg_stats_data *pg_stat_data)
581{
582 struct nvgpu_pmu *pmu = &g->pmu;
583 u32 pg_engine_id_list = 0;
584
585 if (!pmu->initialized) {
586 pg_stat_data->ingating_time = 0;
587 pg_stat_data->ungating_time = 0;
588 pg_stat_data->gating_cnt = 0;
589 return 0;
590 }
591
592 if (g->ops.pmu.pmu_pg_supported_engines_list)
593 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
594
595 if (BIT(pg_engine_id) & pg_engine_id_list)
596 g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
597 pg_stat_data);
598
599 return 0;
600}
601
602/* AELPG */
603static void ap_callback_init_and_enable_ctrl(
604 struct gk20a *g, struct pmu_msg *msg,
605 void *param, u32 seq_desc, u32 status)
606{
607 /* Define p_ap (i.e pointer to pmu_ap structure) */
608 WARN_ON(!msg);
609
610 if (!status) {
611 switch (msg->msg.pg.ap_msg.cmn.msg_id) {
612 case PMU_AP_MSG_ID_INIT_ACK:
613 nvgpu_pmu_dbg(g, "reply PMU_AP_CMD_ID_INIT");
614 break;
615
616 default:
617 nvgpu_pmu_dbg(g,
618 "%s: Invalid Adaptive Power Message: %x\n",
619 __func__, msg->msg.pg.ap_msg.cmn.msg_id);
620 break;
621 }
622 }
623}
624
625/* Send an Adaptive Power (AP) related command to PMU */
626int nvgpu_pmu_ap_send_command(struct gk20a *g,
627 union pmu_ap_cmd *p_ap_cmd, bool b_block)
628{
629 struct nvgpu_pmu *pmu = &g->pmu;
630 /* FIXME: where is the PG structure defined?? */
631 u32 status = 0;
632 struct pmu_cmd cmd;
633 u32 seq;
634 pmu_callback p_callback = NULL;
635
636 memset(&cmd, 0, sizeof(struct pmu_cmd));
637
638 /* Copy common members */
639 cmd.hdr.unit_id = PMU_UNIT_PG;
640 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(union pmu_ap_cmd);
641
642 cmd.cmd.pg.ap_cmd.cmn.cmd_type = PMU_PG_CMD_ID_AP;
643 cmd.cmd.pg.ap_cmd.cmn.cmd_id = p_ap_cmd->cmn.cmd_id;
644
645 /* Copy other members of command */
646 switch (p_ap_cmd->cmn.cmd_id) {
647 case PMU_AP_CMD_ID_INIT:
648 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT");
649 cmd.cmd.pg.ap_cmd.init.pg_sampling_period_us =
650 p_ap_cmd->init.pg_sampling_period_us;
651 break;
652
653 case PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL:
654 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL");
655 cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id =
656 p_ap_cmd->init_and_enable_ctrl.ctrl_id;
657 memcpy(
658 (void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params),
659 (void *)&(p_ap_cmd->init_and_enable_ctrl.params),
660 sizeof(struct pmu_ap_ctrl_init_params));
661
662 p_callback = ap_callback_init_and_enable_ctrl;
663 break;
664
665 case PMU_AP_CMD_ID_ENABLE_CTRL:
666 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_ENABLE_CTRL");
667 cmd.cmd.pg.ap_cmd.enable_ctrl.ctrl_id =
668 p_ap_cmd->enable_ctrl.ctrl_id;
669 break;
670
671 case PMU_AP_CMD_ID_DISABLE_CTRL:
672 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_DISABLE_CTRL");
673 cmd.cmd.pg.ap_cmd.disable_ctrl.ctrl_id =
674 p_ap_cmd->disable_ctrl.ctrl_id;
675 break;
676
677 case PMU_AP_CMD_ID_KICK_CTRL:
678 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_KICK_CTRL");
679 cmd.cmd.pg.ap_cmd.kick_ctrl.ctrl_id =
680 p_ap_cmd->kick_ctrl.ctrl_id;
681 cmd.cmd.pg.ap_cmd.kick_ctrl.skip_count =
682 p_ap_cmd->kick_ctrl.skip_count;
683 break;
684
685 default:
686 nvgpu_pmu_dbg(g, "%s: Invalid Adaptive Power command %d\n",
687 __func__, p_ap_cmd->cmn.cmd_id);
688 return 0x2f;
689 }
690
691 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
692 p_callback, pmu, &seq, ~0);
693
694 if (status) {
695 nvgpu_pmu_dbg(g,
696 "%s: Unable to submit Adaptive Power Command %d\n",
697 __func__, p_ap_cmd->cmn.cmd_id);
698 goto err_return;
699 }
700
701 /* TODO: Implement blocking calls (b_block) */
702
703err_return:
704 return status;
705}
706
707int nvgpu_aelpg_init(struct gk20a *g)
708{
709 int status = 0;
710
711 /* Remove reliance on app_ctrl field. */
712 union pmu_ap_cmd ap_cmd;
713
714 /* TODO: Check for elpg being ready? */
715 ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
716 ap_cmd.init.pg_sampling_period_us = g->pmu.aelpg_param[0];
717
718 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
719 return status;
720}
721
722int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
723{
724 int status = 0;
725 union pmu_ap_cmd ap_cmd;
726
727 /* TODO: Probably check if ELPG is ready? */
728 ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
729 ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
730 ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
731 g->pmu.aelpg_param[1];
732 ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
733 g->pmu.aelpg_param[2];
734 ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
735 g->pmu.aelpg_param[3];
736 ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
737 g->pmu.aelpg_param[4];
738
739 switch (ctrl_id) {
740 case PMU_AP_CTRL_ID_GRAPHICS:
741 break;
742 default:
743 break;
744 }
745
746 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, true);
747 return status;
748}