summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMahantesh Kumbar <mkumbar@nvidia.com>2017-06-07 12:56:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-13 16:19:47 -0400
commitc18364d0c4b3fb6581f937c018cd01fc329601bb (patch)
tree923ab682435379dc8bad7852c49725bf7f0f5286
parent45355f00e7de9068f403682044f550026fa7e86e (diff)
gpu: nvgpu: moved pg out from pmu_gk20a.c/h
- moved pg related code to pmu_pg.c under common/pmu folder PG state machine support methods PG ACK handlers AELPG methods PG enable/disable methods -prepended with nvgpu_ for elpg/aelpg global methods by replacing gk20a_ JIRA NVGPU-97 Change-Id: I2148a69ff86b5c5d43c521ff6e241db84afafd82 Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com> Reviewed-on: http://git-master/r/1498363 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/Makefile.nvgpu1
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu.c8
-rw-r--r--drivers/gpu/nvgpu/common/pmu/pmu_pg.c719
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c20
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c4
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h6
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.c692
-rw-r--r--drivers/gpu/nvgpu/gk20a/pmu_gk20a.h50
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/pmu.h49
-rw-r--r--drivers/gpu/nvgpu/lpwr/lpwr.c4
12 files changed, 801 insertions, 764 deletions
diff --git a/drivers/gpu/nvgpu/Makefile.nvgpu b/drivers/gpu/nvgpu/Makefile.nvgpu
index 77d7be4a..72c2f8d5 100644
--- a/drivers/gpu/nvgpu/Makefile.nvgpu
+++ b/drivers/gpu/nvgpu/Makefile.nvgpu
@@ -61,6 +61,7 @@ nvgpu-y := \
61 common/pmu/pmu.o \ 61 common/pmu/pmu.o \
62 common/pmu/pmu_ipc.o \ 62 common/pmu/pmu_ipc.o \
63 common/pmu/pmu_fw.o \ 63 common/pmu/pmu_fw.o \
64 common/pmu/pmu_pg.o \
64 gk20a/gk20a.o \ 65 gk20a/gk20a.o \
65 gk20a/bus_gk20a.o \ 66 gk20a/bus_gk20a.o \
66 gk20a/pramin_gk20a.o \ 67 gk20a/pramin_gk20a.o \
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu.c b/drivers/gpu/nvgpu/common/pmu/pmu.c
index ca532049..fc72d1fc 100644
--- a/drivers/gpu/nvgpu/common/pmu/pmu.c
+++ b/drivers/gpu/nvgpu/common/pmu/pmu.c
@@ -276,15 +276,15 @@ static void pmu_setup_hw_enable_elpg(struct gk20a *g)
276 /* Init reg with prod values*/ 276 /* Init reg with prod values*/
277 if (g->ops.pmu.pmu_setup_elpg) 277 if (g->ops.pmu.pmu_setup_elpg)
278 g->ops.pmu.pmu_setup_elpg(g); 278 g->ops.pmu.pmu_setup_elpg(g);
279 gk20a_pmu_enable_elpg(g); 279 nvgpu_pmu_enable_elpg(g);
280 } 280 }
281 281
282 nvgpu_udelay(50); 282 nvgpu_udelay(50);
283 283
284 /* Enable AELPG */ 284 /* Enable AELPG */
285 if (g->aelpg_enabled) { 285 if (g->aelpg_enabled) {
286 gk20a_aelpg_init(g); 286 nvgpu_aelpg_init(g);
287 gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS); 287 nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
288 } 288 }
289} 289}
290 290
@@ -398,7 +398,7 @@ int nvgpu_pmu_destroy(struct gk20a *g)
398 nvgpu_pmu_get_pg_stats(g, 398 nvgpu_pmu_get_pg_stats(g,
399 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data); 399 PMU_PG_ELPG_ENGINE_ID_GRAPHICS, &pg_stat_data);
400 400
401 gk20a_pmu_disable_elpg(g); 401 nvgpu_pmu_disable_elpg(g);
402 pmu->initialized = false; 402 pmu->initialized = false;
403 403
404 /* update the s/w ELPG residency counters */ 404 /* update the s/w ELPG residency counters */
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_pg.c b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
new file mode 100644
index 00000000..046f4d59
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/pmu/pmu_pg.c
@@ -0,0 +1,719 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include <nvgpu/pmu.h>
15#include <nvgpu/log.h>
16#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
17
18#include "gk20a/gk20a.h"
19
20/* state transition :
21 * OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
22 * ON => OFF is always synchronized
23 */
24/* elpg is off */
25#define PMU_ELPG_STAT_OFF 0
26/* elpg is on */
27#define PMU_ELPG_STAT_ON 1
28/* elpg is off, ALLOW cmd has been sent, wait for ack */
29#define PMU_ELPG_STAT_ON_PENDING 2
30/* elpg is on, DISALLOW cmd has been sent, wait for ack */
31#define PMU_ELPG_STAT_OFF_PENDING 3
32/* elpg is off, caller has requested on, but ALLOW
33 * cmd hasn't been sent due to ENABLE_ALLOW delay
34 */
35#define PMU_ELPG_STAT_OFF_ON_PENDING 4
36
37#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
38#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
39#define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
40
41static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
42 void *param, u32 handle, u32 status)
43{
44 struct nvgpu_pmu *pmu = param;
45 struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg;
46
47 nvgpu_log_fn(g, " ");
48
49 if (status != 0) {
50 nvgpu_err(g, "ELPG cmd aborted");
51 /* TBD: disable ELPG */
52 return;
53 }
54
55 switch (elpg_msg->msg) {
56 case PMU_PG_ELPG_MSG_INIT_ACK:
57 nvgpu_pmu_dbg(g, "INIT_PG is ack from PMU, eng - %d",
58 elpg_msg->engine_id);
59 break;
60 case PMU_PG_ELPG_MSG_ALLOW_ACK:
61 nvgpu_pmu_dbg(g, "ALLOW is ack from PMU, eng - %d",
62 elpg_msg->engine_id);
63 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
64 pmu->elpg_stat = PMU_ELPG_STAT_ON;
65 else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
66 pmu->mscg_transition_state = PMU_ELPG_STAT_ON;
67 break;
68 case PMU_PG_ELPG_MSG_DISALLOW_ACK:
69 nvgpu_pmu_dbg(g, "DISALLOW is ack from PMU, eng - %d",
70 elpg_msg->engine_id);
71
72 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
73 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
74 else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
75 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
76
77 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
78 if (g->ops.pmu.pmu_pg_engines_feature_list &&
79 g->ops.pmu.pmu_pg_engines_feature_list(g,
80 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
81 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
82 pmu->initialized = true;
83 nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
84 false);
85 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
86 /* make status visible */
87 smp_mb();
88 } else
89 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
90 true);
91 }
92 break;
93 default:
94 nvgpu_err(g,
95 "unsupported ELPG message : 0x%04x", elpg_msg->msg);
96 }
97}
98
99/* PG enable/disable */
100int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
101{
102 u32 status = 0;
103
104 if (enable_pg == true) {
105 if (g->ops.pmu.pmu_pg_engines_feature_list &&
106 g->ops.pmu.pmu_pg_engines_feature_list(g,
107 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
108 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
109 if (g->ops.pmu.pmu_lpwr_enable_pg)
110 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
111 true);
112 } else if (g->support_pmu && g->can_elpg)
113 status = nvgpu_pmu_enable_elpg(g);
114 } else if (enable_pg == false) {
115 if (g->ops.pmu.pmu_pg_engines_feature_list &&
116 g->ops.pmu.pmu_pg_engines_feature_list(g,
117 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
118 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
119 if (g->ops.pmu.pmu_lpwr_disable_pg)
120 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
121 true);
122 } else if (g->support_pmu && g->can_elpg)
123 status = nvgpu_pmu_disable_elpg(g);
124 }
125
126 return status;
127}
128
129static int pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
130{
131 struct nvgpu_pmu *pmu = &g->pmu;
132 struct pmu_cmd cmd;
133 u32 seq, status;
134
135 nvgpu_log_fn(g, " ");
136
137 memset(&cmd, 0, sizeof(struct pmu_cmd));
138 cmd.hdr.unit_id = PMU_UNIT_PG;
139 cmd.hdr.size = PMU_CMD_HDR_SIZE +
140 sizeof(struct pmu_pg_cmd_elpg_cmd);
141 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
142 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
143 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
144
145 /* no need to wait ack for ELPG enable but set
146 * pending to sync with follow up ELPG disable
147 */
148 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
149 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
150 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
151 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
152
153 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_ALLOW");
154 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
155 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
156 pmu, &seq, ~0);
157 WARN_ON(status != 0);
158
159 nvgpu_log_fn(g, "done");
160 return 0;
161}
162
163int nvgpu_pmu_enable_elpg(struct gk20a *g)
164{
165 struct nvgpu_pmu *pmu = &g->pmu;
166 struct gr_gk20a *gr = &g->gr;
167 u32 pg_engine_id;
168 u32 pg_engine_id_list = 0;
169
170 int ret = 0;
171
172 nvgpu_log_fn(g, " ");
173
174 if (!g->support_pmu)
175 return ret;
176
177 nvgpu_mutex_acquire(&pmu->elpg_mutex);
178
179 pmu->elpg_refcnt++;
180 if (pmu->elpg_refcnt <= 0)
181 goto exit_unlock;
182
183 /* something is not right if we end up in following code path */
184 if (unlikely(pmu->elpg_refcnt > 1)) {
185 nvgpu_warn(g,
186 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
187 __func__, pmu->elpg_refcnt);
188 WARN_ON(1);
189 }
190
191 /* do NOT enable elpg until golden ctx is created,
192 * which is related with the ctx that ELPG save and restore.
193 */
194 if (unlikely(!gr->ctx_vars.golden_image_initialized))
195 goto exit_unlock;
196
197 /* return if ELPG is already on or on_pending or off_on_pending */
198 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
199 goto exit_unlock;
200
201 if (g->ops.pmu.pmu_pg_supported_engines_list)
202 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
203
204 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
205 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
206 pg_engine_id++) {
207
208 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
209 ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
210 continue;
211
212 if (BIT(pg_engine_id) & pg_engine_id_list)
213 ret = pmu_enable_elpg_locked(g, pg_engine_id);
214 }
215
216exit_unlock:
217 nvgpu_mutex_release(&pmu->elpg_mutex);
218 nvgpu_log_fn(g, "done");
219 return ret;
220}
221
222int nvgpu_pmu_disable_elpg(struct gk20a *g)
223{
224 struct nvgpu_pmu *pmu = &g->pmu;
225 struct pmu_cmd cmd;
226 u32 seq;
227 int ret = 0;
228 u32 pg_engine_id;
229 u32 pg_engine_id_list = 0;
230 u32 *ptr = NULL;
231
232 gk20a_dbg_fn("");
233
234 if (g->ops.pmu.pmu_pg_supported_engines_list)
235 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
236
237 if (!g->support_pmu)
238 return ret;
239
240 nvgpu_mutex_acquire(&pmu->elpg_mutex);
241
242 pmu->elpg_refcnt--;
243 if (pmu->elpg_refcnt > 0) {
244 nvgpu_warn(g,
245 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
246 __func__, pmu->elpg_refcnt);
247 WARN_ON(1);
248 ret = 0;
249 goto exit_unlock;
250 }
251
252 /* cancel off_on_pending and return */
253 if (pmu->elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) {
254 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
255 ret = 0;
256 goto exit_reschedule;
257 }
258 /* wait if on_pending */
259 else if (pmu->elpg_stat == PMU_ELPG_STAT_ON_PENDING) {
260
261 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
262 &pmu->elpg_stat, PMU_ELPG_STAT_ON);
263
264 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
265 nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
266 pmu->elpg_stat);
267 pmu_dump_elpg_stats(pmu);
268 pmu_dump_falcon_stats(pmu);
269 ret = -EBUSY;
270 goto exit_unlock;
271 }
272 }
273 /* return if ELPG is already off */
274 else if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
275 ret = 0;
276 goto exit_reschedule;
277 }
278
279 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
280 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
281 pg_engine_id++) {
282
283 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
284 ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
285 continue;
286
287 if (BIT(pg_engine_id) & pg_engine_id_list) {
288 memset(&cmd, 0, sizeof(struct pmu_cmd));
289 cmd.hdr.unit_id = PMU_UNIT_PG;
290 cmd.hdr.size = PMU_CMD_HDR_SIZE +
291 sizeof(struct pmu_pg_cmd_elpg_cmd);
292 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
293 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
294 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
295
296 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
297 pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
298 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
299 pmu->mscg_transition_state =
300 PMU_ELPG_STAT_OFF_PENDING;
301
302 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
303 ptr = &pmu->elpg_stat;
304 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
305 ptr = &pmu->mscg_transition_state;
306
307 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
308 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
309 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
310 pmu, &seq, ~0);
311
312 pmu_wait_message_cond(pmu,
313 gk20a_get_gr_idle_timeout(g),
314 ptr, PMU_ELPG_STAT_OFF);
315 if (*ptr != PMU_ELPG_STAT_OFF) {
316 nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
317 pmu_dump_elpg_stats(pmu);
318 pmu_dump_falcon_stats(pmu);
319 ret = -EBUSY;
320 goto exit_unlock;
321 }
322 }
323 }
324
325exit_reschedule:
326exit_unlock:
327 nvgpu_mutex_release(&pmu->elpg_mutex);
328 nvgpu_log_fn(g, "done");
329 return ret;
330}
331
332/* PG init */
333static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
334 void *param, u32 handle, u32 status)
335{
336 struct nvgpu_pmu *pmu = param;
337
338 nvgpu_log_fn(g, " ");
339
340 if (status != 0) {
341 nvgpu_err(g, "ELPG cmd aborted");
342 /* TBD: disable ELPG */
343 return;
344 }
345
346 switch (msg->msg.pg.stat.sub_msg_id) {
347 case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
348 nvgpu_pmu_dbg(g, "ALLOC_DMEM_OFFSET is acknowledged from PMU");
349 pmu->stat_dmem_offset[msg->msg.pg.stat.engine_id] =
350 msg->msg.pg.stat.data;
351 break;
352 default:
353 break;
354 }
355}
356
357static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
358{
359 struct nvgpu_pmu *pmu = &g->pmu;
360 struct pmu_cmd cmd;
361 u32 seq;
362
363 nvgpu_log_fn(g, " ");
364
365 gk20a_pmu_pg_idle_counter_config(g, pg_engine_id);
366
367 if (g->ops.pmu.pmu_pg_init_param)
368 g->ops.pmu.pmu_pg_init_param(g, pg_engine_id);
369
370 /* init ELPG */
371 memset(&cmd, 0, sizeof(struct pmu_cmd));
372 cmd.hdr.unit_id = PMU_UNIT_PG;
373 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
374 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
375 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
376 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_INIT;
377
378 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_INIT");
379 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
380 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
381
382 /* alloc dmem for powergating state log */
383 pmu->stat_dmem_offset[pg_engine_id] = 0;
384 memset(&cmd, 0, sizeof(struct pmu_cmd));
385 cmd.hdr.unit_id = PMU_UNIT_PG;
386 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
387 cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT;
388 cmd.cmd.pg.stat.engine_id = pg_engine_id;
389 cmd.cmd.pg.stat.sub_cmd_id = PMU_PG_STAT_CMD_ALLOC_DMEM;
390 cmd.cmd.pg.stat.data = 0;
391
392 nvgpu_pmu_dbg(g, "cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
393 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
394 pmu_handle_pg_stat_msg, pmu, &seq, ~0);
395
396 /* disallow ELPG initially
397 * PMU ucode requires a disallow cmd before allow cmd
398 */
399 /* set for wait_event PMU_ELPG_STAT_OFF */
400 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
401 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
402 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
403 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
404 memset(&cmd, 0, sizeof(struct pmu_cmd));
405 cmd.hdr.unit_id = PMU_UNIT_PG;
406 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
407 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
408 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
409 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
410
411 nvgpu_pmu_dbg(g, "cmd post PMU_PG_ELPG_CMD_DISALLOW");
412 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
413 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
414
415 return 0;
416}
417
418int nvgpu_pmu_init_powergating(struct gk20a *g)
419{
420 struct nvgpu_pmu *pmu = &g->pmu;
421 u32 pg_engine_id;
422 u32 pg_engine_id_list = 0;
423
424 nvgpu_log_fn(g, " ");
425
426 if (g->ops.pmu.pmu_pg_supported_engines_list)
427 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
428
429 gk20a_gr_wait_initialized(g);
430
431 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
432 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
433 pg_engine_id++) {
434
435 if (BIT(pg_engine_id) & pg_engine_id_list) {
436 pmu_pg_init_send(g, pg_engine_id);
437 if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
438 nvgpu_pmu_state_change(g,
439 PMU_STATE_ELPG_BOOTING, false);
440 }
441 }
442
443 if (g->ops.pmu.pmu_pg_param_post_init)
444 g->ops.pmu.pmu_pg_param_post_init(g);
445
446 return 0;
447}
448
449static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
450 void *param, u32 handle, u32 status)
451{
452 struct nvgpu_pmu *pmu = param;
453 struct pmu_pg_msg_eng_buf_stat *eng_buf_stat =
454 &msg->msg.pg.eng_buf_stat;
455
456 nvgpu_log_fn(g, " ");
457
458 nvgpu_pmu_dbg(g,
459 "reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
460 if (status != 0) {
461 nvgpu_err(g, "PGENG cmd aborted");
462 /* TBD: disable ELPG */
463 return;
464 }
465
466 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
467 if ((!pmu->buf_loaded) &&
468 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
469 nvgpu_err(g, "failed to load PGENG buffer");
470 else {
471 nvgpu_pmu_state_change(g, pmu->pmu_state, true);
472 }
473}
474
475int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
476{
477 struct nvgpu_pmu *pmu = &g->pmu;
478 struct pmu_cmd cmd;
479 u32 desc;
480 int err = 0;
481 u32 gr_engine_id;
482
483 gk20a_dbg_fn("");
484
485 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
486
487 memset(&cmd, 0, sizeof(struct pmu_cmd));
488 cmd.hdr.unit_id = PMU_UNIT_PG;
489 cmd.hdr.size = PMU_CMD_HDR_SIZE +
490 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
491 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
492 PMU_PG_CMD_ID_ENG_BUF_LOAD);
493 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
494 gr_engine_id);
495 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
496 PMU_PGENG_GR_BUFFER_IDX_FECS);
497 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
498 pmu->pg_buf.size);
499 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
500 u64_lo32(pmu->pg_buf.gpu_va));
501 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
502 (u8)(pmu->pg_buf.gpu_va & 0xFF));
503 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
504 PMU_DMAIDX_VIRT);
505
506 pmu->buf_loaded = false;
507 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
508 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
509 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
510 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
511 return err;
512}
513
514void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
515{
516 struct nvgpu_pmu *pmu = &g->pmu;
517 struct pmu_cmd cmd;
518 u32 desc;
519 u32 gr_engine_id;
520
521 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
522
523 memset(&cmd, 0, sizeof(struct pmu_cmd));
524 cmd.hdr.unit_id = PMU_UNIT_PG;
525 cmd.hdr.size = PMU_CMD_HDR_SIZE +
526 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
527 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
528 PMU_PG_CMD_ID_ENG_BUF_LOAD);
529 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
530 gr_engine_id);
531 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
532 PMU_PGENG_GR_BUFFER_IDX_ZBC);
533 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
534 pmu->seq_buf.size);
535 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
536 u64_lo32(pmu->seq_buf.gpu_va));
537 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
538 (u8)(pmu->seq_buf.gpu_va & 0xFF));
539 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
540 PMU_DMAIDX_VIRT);
541
542 pmu->buf_loaded = false;
543 nvgpu_pmu_dbg(g, "cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
544 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
545 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
546 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
547}
548
549/* stats */
550int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
551 struct pmu_pg_stats_data *pg_stat_data)
552{
553 struct nvgpu_pmu *pmu = &g->pmu;
554 u32 pg_engine_id_list = 0;
555
556 if (!pmu->initialized) {
557 pg_stat_data->ingating_time = 0;
558 pg_stat_data->ungating_time = 0;
559 pg_stat_data->gating_cnt = 0;
560 return 0;
561 }
562
563 if (g->ops.pmu.pmu_pg_supported_engines_list)
564 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
565
566 if (BIT(pg_engine_id) & pg_engine_id_list)
567 g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
568 pg_stat_data);
569
570 return 0;
571}
572
573/* AELPG */
574static void ap_callback_init_and_enable_ctrl(
575 struct gk20a *g, struct pmu_msg *msg,
576 void *param, u32 seq_desc, u32 status)
577{
578 /* Define p_ap (i.e pointer to pmu_ap structure) */
579 WARN_ON(!msg);
580
581 if (!status) {
582 switch (msg->msg.pg.ap_msg.cmn.msg_id) {
583 case PMU_AP_MSG_ID_INIT_ACK:
584 nvgpu_pmu_dbg(g, "reply PMU_AP_CMD_ID_INIT");
585 break;
586
587 default:
588 nvgpu_pmu_dbg(g,
589 "%s: Invalid Adaptive Power Message: %x\n",
590 __func__, msg->msg.pg.ap_msg.cmn.msg_id);
591 break;
592 }
593 }
594}
595
596/* Send an Adaptive Power (AP) related command to PMU */
597int nvgpu_pmu_ap_send_command(struct gk20a *g,
598 union pmu_ap_cmd *p_ap_cmd, bool b_block)
599{
600 struct nvgpu_pmu *pmu = &g->pmu;
601 /* FIXME: where is the PG structure defined?? */
602 u32 status = 0;
603 struct pmu_cmd cmd;
604 u32 seq;
605 pmu_callback p_callback = NULL;
606
607 memset(&cmd, 0, sizeof(struct pmu_cmd));
608
609 /* Copy common members */
610 cmd.hdr.unit_id = PMU_UNIT_PG;
611 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(union pmu_ap_cmd);
612
613 cmd.cmd.pg.ap_cmd.cmn.cmd_type = PMU_PG_CMD_ID_AP;
614 cmd.cmd.pg.ap_cmd.cmn.cmd_id = p_ap_cmd->cmn.cmd_id;
615
616 /* Copy other members of command */
617 switch (p_ap_cmd->cmn.cmd_id) {
618 case PMU_AP_CMD_ID_INIT:
619 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT");
620 cmd.cmd.pg.ap_cmd.init.pg_sampling_period_us =
621 p_ap_cmd->init.pg_sampling_period_us;
622 break;
623
624 case PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL:
625 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL");
626 cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id =
627 p_ap_cmd->init_and_enable_ctrl.ctrl_id;
628 memcpy(
629 (void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params),
630 (void *)&(p_ap_cmd->init_and_enable_ctrl.params),
631 sizeof(struct pmu_ap_ctrl_init_params));
632
633 p_callback = ap_callback_init_and_enable_ctrl;
634 break;
635
636 case PMU_AP_CMD_ID_ENABLE_CTRL:
637 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_ENABLE_CTRL");
638 cmd.cmd.pg.ap_cmd.enable_ctrl.ctrl_id =
639 p_ap_cmd->enable_ctrl.ctrl_id;
640 break;
641
642 case PMU_AP_CMD_ID_DISABLE_CTRL:
643 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_DISABLE_CTRL");
644 cmd.cmd.pg.ap_cmd.disable_ctrl.ctrl_id =
645 p_ap_cmd->disable_ctrl.ctrl_id;
646 break;
647
648 case PMU_AP_CMD_ID_KICK_CTRL:
649 nvgpu_pmu_dbg(g, "cmd post PMU_AP_CMD_ID_KICK_CTRL");
650 cmd.cmd.pg.ap_cmd.kick_ctrl.ctrl_id =
651 p_ap_cmd->kick_ctrl.ctrl_id;
652 cmd.cmd.pg.ap_cmd.kick_ctrl.skip_count =
653 p_ap_cmd->kick_ctrl.skip_count;
654 break;
655
656 default:
657 nvgpu_pmu_dbg(g, "%s: Invalid Adaptive Power command %d\n",
658 __func__, p_ap_cmd->cmn.cmd_id);
659 return 0x2f;
660 }
661
662 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
663 p_callback, pmu, &seq, ~0);
664
665 if (status) {
666 nvgpu_pmu_dbg(g,
667 "%s: Unable to submit Adaptive Power Command %d\n",
668 __func__, p_ap_cmd->cmn.cmd_id);
669 goto err_return;
670 }
671
672 /* TODO: Implement blocking calls (b_block) */
673
674err_return:
675 return status;
676}
677
678int nvgpu_aelpg_init(struct gk20a *g)
679{
680 int status = 0;
681
682 /* Remove reliance on app_ctrl field. */
683 union pmu_ap_cmd ap_cmd;
684
685 /* TODO: Check for elpg being ready? */
686 ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
687 ap_cmd.init.pg_sampling_period_us = g->pmu.aelpg_param[0];
688
689 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
690 return status;
691}
692
693int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
694{
695 int status = 0;
696 union pmu_ap_cmd ap_cmd;
697
698 /* TODO: Probably check if ELPG is ready? */
699 ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
700 ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
701 ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
702 g->pmu.aelpg_param[1];
703 ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
704 g->pmu.aelpg_param[2];
705 ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
706 g->pmu.aelpg_param[3];
707 ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
708 g->pmu.aelpg_param[4];
709
710 switch (ctrl_id) {
711 case PMU_AP_CTRL_ID_GRAPHICS:
712 break;
713 default:
714 break;
715 }
716
717 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, true);
718 return status;
719}
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index f018ef89..77890da8 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -1308,7 +1308,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
1308 return err; 1308 return err;
1309 1309
1310 /*do elpg disable before clock gating */ 1310 /*do elpg disable before clock gating */
1311 gk20a_pmu_pg_global_enable(g, false); 1311 nvgpu_pmu_pg_global_enable(g, false);
1312 1312
1313 if (g->ops.clock_gating.slcg_gr_load_gating_prod) 1313 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
1314 g->ops.clock_gating.slcg_gr_load_gating_prod(g, 1314 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
@@ -1355,7 +1355,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
1355 g->ops.clock_gating.slcg_gr_load_gating_prod(g, 1355 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
1356 g->slcg_enabled); 1356 g->slcg_enabled);
1357 1357
1358 gk20a_pmu_pg_global_enable(g, true); 1358 nvgpu_pmu_pg_global_enable(g, true);
1359 1359
1360 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle"); 1360 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "module idle");
1361 gk20a_idle(g); 1361 gk20a_idle(g);
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index 00b26cf4..5a571dc8 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -1248,7 +1248,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1248 1248
1249 if (engine_enum == ENGINE_GR_GK20A) { 1249 if (engine_enum == ENGINE_GR_GK20A) {
1250 if (g->support_pmu && g->can_elpg) 1250 if (g->support_pmu && g->can_elpg)
1251 gk20a_pmu_disable_elpg(g); 1251 nvgpu_pmu_disable_elpg(g);
1252 /* resetting engine will alter read/write index. 1252 /* resetting engine will alter read/write index.
1253 * need to flush circular buffer before re-enabling FECS. 1253 * need to flush circular buffer before re-enabling FECS.
1254 */ 1254 */
@@ -1261,7 +1261,7 @@ void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
1261 enough, we do full init sequence */ 1261 enough, we do full init sequence */
1262 gk20a_gr_reset(g); 1262 gk20a_gr_reset(g);
1263 if (g->support_pmu && g->can_elpg) 1263 if (g->support_pmu && g->can_elpg)
1264 gk20a_pmu_enable_elpg(g); 1264 nvgpu_pmu_enable_elpg(g);
1265 } 1265 }
1266 if ((engine_enum == ENGINE_GRCE_GK20A) || 1266 if ((engine_enum == ENGINE_GRCE_GK20A) ||
1267 (engine_enum == ENGINE_ASYNC_CE_GK20A)) { 1267 (engine_enum == ENGINE_ASYNC_CE_GK20A)) {
@@ -1496,7 +1496,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1496 1496
1497 /* Disable power management */ 1497 /* Disable power management */
1498 if (g->support_pmu && g->can_elpg) 1498 if (g->support_pmu && g->can_elpg)
1499 gk20a_pmu_disable_elpg(g); 1499 nvgpu_pmu_disable_elpg(g);
1500 if (g->ops.clock_gating.slcg_gr_load_gating_prod) 1500 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
1501 g->ops.clock_gating.slcg_gr_load_gating_prod(g, 1501 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
1502 false); 1502 false);
@@ -1699,7 +1699,7 @@ static bool gk20a_fifo_handle_mmu_fault(
1699 1699
1700 /* It is safe to enable ELPG again. */ 1700 /* It is safe to enable ELPG again. */
1701 if (g->support_pmu && g->can_elpg) 1701 if (g->support_pmu && g->can_elpg)
1702 gk20a_pmu_enable_elpg(g); 1702 nvgpu_pmu_enable_elpg(g);
1703 1703
1704 return verbose; 1704 return verbose;
1705} 1705}
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
index 4a79a142..275b663f 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
@@ -456,18 +456,18 @@ static ssize_t elpg_enable_store(struct device *dev,
456 */ 456 */
457 if (val && !g->elpg_enabled) { 457 if (val && !g->elpg_enabled) {
458 g->elpg_enabled = true; 458 g->elpg_enabled = true;
459 gk20a_pmu_pg_global_enable(g, true); 459 nvgpu_pmu_pg_global_enable(g, true);
460 460
461 } else if (!val && g->elpg_enabled) { 461 } else if (!val && g->elpg_enabled) {
462 if (g->ops.pmu.pmu_pg_engines_feature_list && 462 if (g->ops.pmu.pmu_pg_engines_feature_list &&
463 g->ops.pmu.pmu_pg_engines_feature_list(g, 463 g->ops.pmu.pmu_pg_engines_feature_list(g,
464 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) != 464 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
465 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) { 465 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
466 gk20a_pmu_pg_global_enable(g, false); 466 nvgpu_pmu_pg_global_enable(g, false);
467 g->elpg_enabled = false; 467 g->elpg_enabled = false;
468 } else { 468 } else {
469 g->elpg_enabled = false; 469 g->elpg_enabled = false;
470 gk20a_pmu_pg_global_enable(g, false); 470 nvgpu_pmu_pg_global_enable(g, false);
471 } 471 }
472 } 472 }
473 gk20a_idle(g); 473 gk20a_idle(g);
@@ -524,13 +524,13 @@ static ssize_t mscg_enable_store(struct device *dev,
524 } else if (!val && g->mscg_enabled) { 524 } else if (!val && g->mscg_enabled) {
525 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g, 525 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
526 PMU_PG_LPWR_FEATURE_MSCG)) { 526 PMU_PG_LPWR_FEATURE_MSCG)) {
527 gk20a_pmu_pg_global_enable(g, false); 527 nvgpu_pmu_pg_global_enable(g, false);
528 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED); 528 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
529 /* make status visible */ 529 /* make status visible */
530 smp_mb(); 530 smp_mb();
531 g->mscg_enabled = false; 531 g->mscg_enabled = false;
532 if (g->elpg_enabled) 532 if (g->elpg_enabled)
533 gk20a_pmu_pg_global_enable(g, true); 533 nvgpu_pmu_pg_global_enable(g, true);
534 } 534 }
535 g->mscg_enabled = false; 535 g->mscg_enabled = false;
536 } 536 }
@@ -584,11 +584,11 @@ static ssize_t aelpg_param_store(struct device *dev,
584 /* Disable AELPG */ 584 /* Disable AELPG */
585 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL; 585 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
586 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS; 586 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
587 status = gk20a_pmu_ap_send_command(g, &ap_cmd, false); 587 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
588 588
589 /* Enable AELPG */ 589 /* Enable AELPG */
590 gk20a_aelpg_init(g); 590 nvgpu_aelpg_init(g);
591 gk20a_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS); 591 nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
592 } 592 }
593 593
594 return count; 594 return count;
@@ -630,13 +630,13 @@ static ssize_t aelpg_enable_store(struct device *dev,
630 /* Enable AELPG */ 630 /* Enable AELPG */
631 ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL; 631 ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL;
632 ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS; 632 ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
633 status = gk20a_pmu_ap_send_command(g, &ap_cmd, false); 633 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
634 } else if (!val && g->aelpg_enabled) { 634 } else if (!val && g->aelpg_enabled) {
635 g->aelpg_enabled = false; 635 g->aelpg_enabled = false;
636 /* Disable AELPG */ 636 /* Disable AELPG */
637 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL; 637 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
638 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS; 638 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
639 status = gk20a_pmu_ap_send_command(g, &ap_cmd, false); 639 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
640 } 640 }
641 } else { 641 } else {
642 dev_info(dev, "PMU is not ready, AELPG request failed\n"); 642 dev_info(dev, "PMU is not ready, AELPG request failed\n");
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index f56702dc..7631decf 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -3235,7 +3235,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3235 u32 lockboost; 3235 u32 lockboost;
3236 3236
3237 if (g->support_pmu) { 3237 if (g->support_pmu) {
3238 err = gk20a_pmu_disable_elpg(g); 3238 err = nvgpu_pmu_disable_elpg(g);
3239 if (err) { 3239 if (err) {
3240 nvgpu_err(g, 3240 nvgpu_err(g,
3241 "failed to set disable elpg"); 3241 "failed to set disable elpg");
@@ -3285,7 +3285,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c,
3285 args->flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO; 3285 args->flags |= NVGPU_ALLOC_OBJ_FLAGS_LOCKBOOST_ZERO;
3286 3286
3287 if (g->support_pmu && g->can_elpg) 3287 if (g->support_pmu && g->can_elpg)
3288 gk20a_pmu_enable_elpg(g); 3288 nvgpu_pmu_enable_elpg(g);
3289 } 3289 }
3290 3290
3291 /* init golden image, ELPG enabled after this is done */ 3291 /* init golden image, ELPG enabled after this is done */
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index deb8ea9c..de80c5e3 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -547,14 +547,14 @@ void gk20a_gr_clear_sm_hww(struct gk20a *g,
547 ({ \ 547 ({ \
548 int err = 0; \ 548 int err = 0; \
549 if (g->support_pmu && g->elpg_enabled) {\ 549 if (g->support_pmu && g->elpg_enabled) {\
550 err = gk20a_pmu_disable_elpg(g); \ 550 err = nvgpu_pmu_disable_elpg(g); \
551 if (err) \ 551 if (err) \
552 gk20a_pmu_enable_elpg(g); \ 552 nvgpu_pmu_enable_elpg(g); \
553 } \ 553 } \
554 if (!err) { \ 554 if (!err) { \
555 err = func; \ 555 err = func; \
556 if (g->support_pmu && g->elpg_enabled) \ 556 if (g->support_pmu && g->elpg_enabled) \
557 gk20a_pmu_enable_elpg(g); \ 557 nvgpu_pmu_enable_elpg(g); \
558 } \ 558 } \
559 err; \ 559 err; \
560 }) 560 })
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
index 247b38a5..32303c6e 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c
@@ -41,10 +41,6 @@
41#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 41#define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000
42#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 42#define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10
43 43
44static void ap_callback_init_and_enable_ctrl(
45 struct gk20a *g, struct pmu_msg *msg,
46 void *param, u32 seq_desc, u32 status);
47
48bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) 44bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos)
49{ 45{
50 u32 i = 0, j = strlen(strings); 46 u32 i = 0, j = strlen(strings);
@@ -488,6 +484,14 @@ int pmu_bootstrap(struct nvgpu_pmu *pmu)
488 return 0; 484 return 0;
489} 485}
490 486
487void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id)
488{
489 gk20a_writel(g, pwr_pmu_pg_idlefilth_r(pg_engine_id),
490 PMU_PG_IDLE_THRESHOLD);
491 gk20a_writel(g, pwr_pmu_pg_ppuidlefilth_r(pg_engine_id),
492 PMU_PG_POST_POWERUP_IDLE_THRESHOLD);
493}
494
491int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) 495int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token)
492{ 496{
493 struct gk20a *g = gk20a_from_pmu(pmu); 497 struct gk20a *g = gk20a_from_pmu(pmu);
@@ -692,30 +696,6 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set)
692 pwr_pmu_msgq_tail_val_f(*tail)); 696 pwr_pmu_msgq_tail_val_f(*tail));
693} 697}
694 698
695static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg,
696 void *param, u32 handle, u32 status)
697{
698 struct nvgpu_pmu *pmu = param;
699 struct pmu_pg_msg_eng_buf_stat *eng_buf_stat = &msg->msg.pg.eng_buf_stat;
700
701 gk20a_dbg_fn("");
702
703 gk20a_dbg_pmu("reply PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
704 if (status != 0) {
705 nvgpu_err(g, "PGENG cmd aborted");
706 /* TBD: disable ELPG */
707 return;
708 }
709
710 pmu->buf_loaded = (eng_buf_stat->status == PMU_PG_MSG_ENG_BUF_LOADED);
711 if ((!pmu->buf_loaded) &&
712 (pmu->pmu_state == PMU_STATE_LOADING_PG_BUF))
713 nvgpu_err(g, "failed to load PGENG buffer");
714 else {
715 nvgpu_pmu_state_change(g, pmu->pmu_state, true);
716 }
717}
718
719static int gk20a_init_pmu_setup_hw1(struct gk20a *g) 699static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
720{ 700{
721 struct nvgpu_pmu *pmu = &g->pmu; 701 struct nvgpu_pmu *pmu = &g->pmu;
@@ -750,80 +730,6 @@ static int gk20a_init_pmu_setup_hw1(struct gk20a *g)
750 730
751} 731}
752 732
753int nvgpu_pmu_init_bind_fecs(struct gk20a *g)
754{
755 struct nvgpu_pmu *pmu = &g->pmu;
756 struct pmu_cmd cmd;
757 u32 desc;
758 int err = 0;
759 u32 gr_engine_id;
760
761 gk20a_dbg_fn("");
762
763 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
764
765 memset(&cmd, 0, sizeof(struct pmu_cmd));
766 cmd.hdr.unit_id = PMU_UNIT_PG;
767 cmd.hdr.size = PMU_CMD_HDR_SIZE +
768 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
769 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
770 PMU_PG_CMD_ID_ENG_BUF_LOAD);
771 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
772 gr_engine_id);
773 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
774 PMU_PGENG_GR_BUFFER_IDX_FECS);
775 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
776 pmu->pg_buf.size);
777 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
778 u64_lo32(pmu->pg_buf.gpu_va));
779 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
780 (u8)(pmu->pg_buf.gpu_va & 0xFF));
781 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
782 PMU_DMAIDX_VIRT);
783
784 pmu->buf_loaded = false;
785 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_FECS");
786 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
787 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
788 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_PG_BUF, false);
789 return err;
790}
791
792void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g)
793{
794 struct nvgpu_pmu *pmu = &g->pmu;
795 struct pmu_cmd cmd;
796 u32 desc;
797 u32 gr_engine_id;
798
799 gr_engine_id = gk20a_fifo_get_gr_engine_id(g);
800
801 memset(&cmd, 0, sizeof(struct pmu_cmd));
802 cmd.hdr.unit_id = PMU_UNIT_PG;
803 cmd.hdr.size = PMU_CMD_HDR_SIZE +
804 g->ops.pmu_ver.pg_cmd_eng_buf_load_size(&cmd.cmd.pg);
805 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type(&cmd.cmd.pg,
806 PMU_PG_CMD_ID_ENG_BUF_LOAD);
807 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id(&cmd.cmd.pg,
808 gr_engine_id);
809 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx(&cmd.cmd.pg,
810 PMU_PGENG_GR_BUFFER_IDX_ZBC);
811 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size(&cmd.cmd.pg,
812 pmu->seq_buf.size);
813 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base(&cmd.cmd.pg,
814 u64_lo32(pmu->seq_buf.gpu_va));
815 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset(&cmd.cmd.pg,
816 (u8)(pmu->seq_buf.gpu_va & 0xFF));
817 g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx(&cmd.cmd.pg,
818 PMU_DMAIDX_VIRT);
819
820 pmu->buf_loaded = false;
821 gk20a_dbg_pmu("cmd post PMU_PG_CMD_ID_ENG_BUF_LOAD PMU_PGENG_GR_BUFFER_IDX_ZBC");
822 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
823 pmu_handle_pg_buf_config_msg, pmu, &desc, ~0);
824 nvgpu_pmu_state_change(g, PMU_STATE_LOADING_ZBC, false);
825}
826
827static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr) 733static void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr)
828{ 734{
829 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr); 735 gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr);
@@ -896,184 +802,6 @@ void gk20a_init_pmu_ops(struct gpu_ops *gops)
896 gops->pmu.reset = gk20a_pmu_reset; 802 gops->pmu.reset = gk20a_pmu_reset;
897} 803}
898 804
899static void pmu_handle_pg_elpg_msg(struct gk20a *g, struct pmu_msg *msg,
900 void *param, u32 handle, u32 status)
901{
902 struct nvgpu_pmu *pmu = param;
903 struct pmu_pg_msg_elpg_msg *elpg_msg = &msg->msg.pg.elpg_msg;
904
905 gk20a_dbg_fn("");
906
907 if (status != 0) {
908 nvgpu_err(g, "ELPG cmd aborted");
909 /* TBD: disable ELPG */
910 return;
911 }
912
913 switch (elpg_msg->msg) {
914 case PMU_PG_ELPG_MSG_INIT_ACK:
915 gk20a_dbg_pmu("INIT_PG is ack from PMU, eng - %d",
916 elpg_msg->engine_id);
917 break;
918 case PMU_PG_ELPG_MSG_ALLOW_ACK:
919 gk20a_dbg_pmu("ALLOW is ack from PMU, eng - %d",
920 elpg_msg->engine_id);
921 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
922 pmu->elpg_stat = PMU_ELPG_STAT_ON;
923 else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
924 pmu->mscg_transition_state = PMU_ELPG_STAT_ON;
925 break;
926 case PMU_PG_ELPG_MSG_DISALLOW_ACK:
927 gk20a_dbg_pmu("DISALLOW is ack from PMU, eng - %d",
928 elpg_msg->engine_id);
929
930 if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
931 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
932 else if (elpg_msg->engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
933 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
934
935 if (pmu->pmu_state == PMU_STATE_ELPG_BOOTING) {
936 if (g->ops.pmu.pmu_pg_engines_feature_list &&
937 g->ops.pmu.pmu_pg_engines_feature_list(g,
938 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
939 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
940 pmu->initialized = true;
941 nvgpu_pmu_state_change(g, PMU_STATE_STARTED,
942 false);
943 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
944 /* make status visible */
945 smp_mb();
946 } else
947 nvgpu_pmu_state_change(g, PMU_STATE_ELPG_BOOTED,
948 true);
949 }
950 break;
951 default:
952 nvgpu_err(g,
953 "unsupported ELPG message : 0x%04x", elpg_msg->msg);
954 }
955
956 return;
957}
958
959static void pmu_handle_pg_stat_msg(struct gk20a *g, struct pmu_msg *msg,
960 void *param, u32 handle, u32 status)
961{
962 struct nvgpu_pmu *pmu = param;
963
964 gk20a_dbg_fn("");
965
966 if (status != 0) {
967 nvgpu_err(g, "ELPG cmd aborted");
968 /* TBD: disable ELPG */
969 return;
970 }
971
972 switch (msg->msg.pg.stat.sub_msg_id) {
973 case PMU_PG_STAT_MSG_RESP_DMEM_OFFSET:
974 gk20a_dbg_pmu("ALLOC_DMEM_OFFSET is acknowledged from PMU");
975 pmu->stat_dmem_offset[msg->msg.pg.stat.engine_id] =
976 msg->msg.pg.stat.data;
977 break;
978 default:
979 break;
980 }
981}
982
983static int pmu_pg_init_send(struct gk20a *g, u32 pg_engine_id)
984{
985 struct nvgpu_pmu *pmu = &g->pmu;
986 struct pmu_cmd cmd;
987 u32 seq;
988
989 gk20a_dbg_fn("");
990
991 gk20a_writel(g, pwr_pmu_pg_idlefilth_r(pg_engine_id),
992 PMU_PG_IDLE_THRESHOLD);
993 gk20a_writel(g, pwr_pmu_pg_ppuidlefilth_r(pg_engine_id),
994 PMU_PG_POST_POWERUP_IDLE_THRESHOLD);
995
996 if (g->ops.pmu.pmu_pg_init_param)
997 g->ops.pmu.pmu_pg_init_param(g, pg_engine_id);
998
999 /* init ELPG */
1000 memset(&cmd, 0, sizeof(struct pmu_cmd));
1001 cmd.hdr.unit_id = PMU_UNIT_PG;
1002 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
1003 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
1004 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
1005 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_INIT;
1006
1007 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_INIT");
1008 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
1009 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
1010
1011 /* alloc dmem for powergating state log */
1012 pmu->stat_dmem_offset[pg_engine_id] = 0;
1013 memset(&cmd, 0, sizeof(struct pmu_cmd));
1014 cmd.hdr.unit_id = PMU_UNIT_PG;
1015 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_stat);
1016 cmd.cmd.pg.stat.cmd_type = PMU_PG_CMD_ID_PG_STAT;
1017 cmd.cmd.pg.stat.engine_id = pg_engine_id;
1018 cmd.cmd.pg.stat.sub_cmd_id = PMU_PG_STAT_CMD_ALLOC_DMEM;
1019 cmd.cmd.pg.stat.data = 0;
1020
1021 gk20a_dbg_pmu("cmd post PMU_PG_STAT_CMD_ALLOC_DMEM");
1022 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_LPQ,
1023 pmu_handle_pg_stat_msg, pmu, &seq, ~0);
1024
1025 /* disallow ELPG initially
1026 PMU ucode requires a disallow cmd before allow cmd */
1027 /* set for wait_event PMU_ELPG_STAT_OFF */
1028 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
1029 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
1030 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
1031 pmu->mscg_transition_state = PMU_ELPG_STAT_OFF;
1032 memset(&cmd, 0, sizeof(struct pmu_cmd));
1033 cmd.hdr.unit_id = PMU_UNIT_PG;
1034 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_pg_cmd_elpg_cmd);
1035 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
1036 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
1037 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
1038
1039 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_DISALLOW");
1040 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
1041 pmu_handle_pg_elpg_msg, pmu, &seq, ~0);
1042
1043 return 0;
1044}
1045
1046int nvgpu_pmu_init_powergating(struct gk20a *g)
1047{
1048 struct nvgpu_pmu *pmu = &g->pmu;
1049 u32 pg_engine_id;
1050 u32 pg_engine_id_list = 0;
1051
1052 gk20a_dbg_fn("");
1053
1054 if (g->ops.pmu.pmu_pg_supported_engines_list)
1055 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
1056
1057 gk20a_gr_wait_initialized(g);
1058
1059 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
1060 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
1061 pg_engine_id++) {
1062
1063 if (BIT(pg_engine_id) & pg_engine_id_list) {
1064 pmu_pg_init_send(g, pg_engine_id);
1065 if (pmu->pmu_state == PMU_STATE_INIT_RECEIVED)
1066 nvgpu_pmu_state_change(g,
1067 PMU_STATE_ELPG_BOOTING, false);
1068 }
1069 }
1070
1071 if (g->ops.pmu.pmu_pg_param_post_init)
1072 g->ops.pmu.pmu_pg_param_post_init(g);
1073
1074 return 0;
1075}
1076
1077static u8 get_perfmon_id(struct nvgpu_pmu *pmu) 805static u8 get_perfmon_id(struct nvgpu_pmu *pmu)
1078{ 806{
1079 struct gk20a *g = gk20a_from_pmu(pmu); 807 struct gk20a *g = gk20a_from_pmu(pmu);
@@ -1355,7 +1083,7 @@ int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
1355 return 0; 1083 return 0;
1356} 1084}
1357 1085
1358static void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) 1086void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu)
1359{ 1087{
1360 struct gk20a *g = gk20a_from_pmu(pmu); 1088 struct gk20a *g = gk20a_from_pmu(pmu);
1361 struct pmu_pg_stats stats; 1089 struct pmu_pg_stats stats;
@@ -1631,238 +1359,6 @@ void gk20a_pmu_isr(struct gk20a *g)
1631 nvgpu_mutex_release(&pmu->isr_mutex); 1359 nvgpu_mutex_release(&pmu->isr_mutex);
1632} 1360}
1633 1361
1634int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg)
1635{
1636 u32 status = 0;
1637
1638 if (enable_pg == true) {
1639 if (g->ops.pmu.pmu_pg_engines_feature_list &&
1640 g->ops.pmu.pmu_pg_engines_feature_list(g,
1641 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
1642 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
1643 if (g->ops.pmu.pmu_lpwr_enable_pg)
1644 status = g->ops.pmu.pmu_lpwr_enable_pg(g,
1645 true);
1646 } else if (g->support_pmu && g->can_elpg)
1647 status = gk20a_pmu_enable_elpg(g);
1648 } else if (enable_pg == false) {
1649 if (g->ops.pmu.pmu_pg_engines_feature_list &&
1650 g->ops.pmu.pmu_pg_engines_feature_list(g,
1651 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
1652 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
1653 if (g->ops.pmu.pmu_lpwr_disable_pg)
1654 status = g->ops.pmu.pmu_lpwr_disable_pg(g,
1655 true);
1656 } else if (g->support_pmu && g->can_elpg)
1657 status = gk20a_pmu_disable_elpg(g);
1658 }
1659
1660 return status;
1661}
1662
1663static int gk20a_pmu_enable_elpg_locked(struct gk20a *g, u32 pg_engine_id)
1664{
1665 struct nvgpu_pmu *pmu = &g->pmu;
1666 struct pmu_cmd cmd;
1667 u32 seq, status;
1668
1669 gk20a_dbg_fn("");
1670
1671 memset(&cmd, 0, sizeof(struct pmu_cmd));
1672 cmd.hdr.unit_id = PMU_UNIT_PG;
1673 cmd.hdr.size = PMU_CMD_HDR_SIZE +
1674 sizeof(struct pmu_pg_cmd_elpg_cmd);
1675 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
1676 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
1677 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_ALLOW;
1678
1679 /* no need to wait ack for ELPG enable but set
1680 * pending to sync with follow up ELPG disable
1681 */
1682 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
1683 pmu->elpg_stat = PMU_ELPG_STAT_ON_PENDING;
1684
1685 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
1686 pmu->mscg_transition_state = PMU_ELPG_STAT_ON_PENDING;
1687
1688 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_ALLOW");
1689 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
1690 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
1691 pmu, &seq, ~0);
1692 WARN_ON(status != 0);
1693
1694 gk20a_dbg_fn("done");
1695 return 0;
1696}
1697
1698int gk20a_pmu_enable_elpg(struct gk20a *g)
1699{
1700 struct nvgpu_pmu *pmu = &g->pmu;
1701 struct gr_gk20a *gr = &g->gr;
1702 u32 pg_engine_id;
1703 u32 pg_engine_id_list = 0;
1704
1705 int ret = 0;
1706
1707 gk20a_dbg_fn("");
1708
1709 if (!g->support_pmu)
1710 return ret;
1711
1712 nvgpu_mutex_acquire(&pmu->elpg_mutex);
1713
1714 pmu->elpg_refcnt++;
1715 if (pmu->elpg_refcnt <= 0)
1716 goto exit_unlock;
1717
1718 /* something is not right if we end up in following code path */
1719 if (unlikely(pmu->elpg_refcnt > 1)) {
1720 nvgpu_warn(g,
1721 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
1722 __func__, pmu->elpg_refcnt);
1723 WARN_ON(1);
1724 }
1725
1726 /* do NOT enable elpg until golden ctx is created,
1727 which is related with the ctx that ELPG save and restore. */
1728 if (unlikely(!gr->ctx_vars.golden_image_initialized))
1729 goto exit_unlock;
1730
1731 /* return if ELPG is already on or on_pending or off_on_pending */
1732 if (pmu->elpg_stat != PMU_ELPG_STAT_OFF)
1733 goto exit_unlock;
1734
1735 if (g->ops.pmu.pmu_pg_supported_engines_list)
1736 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
1737
1738 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
1739 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
1740 pg_engine_id++) {
1741
1742 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
1743 ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
1744 continue;
1745
1746 if (BIT(pg_engine_id) & pg_engine_id_list)
1747 ret = gk20a_pmu_enable_elpg_locked(g, pg_engine_id);
1748 }
1749
1750exit_unlock:
1751 nvgpu_mutex_release(&pmu->elpg_mutex);
1752 gk20a_dbg_fn("done");
1753 return ret;
1754}
1755
1756int gk20a_pmu_disable_elpg(struct gk20a *g)
1757{
1758 struct nvgpu_pmu *pmu = &g->pmu;
1759 struct pmu_cmd cmd;
1760 u32 seq;
1761 int ret = 0;
1762 u32 pg_engine_id;
1763 u32 pg_engine_id_list = 0;
1764 u32 *ptr = NULL;
1765
1766 gk20a_dbg_fn("");
1767
1768 if (g->ops.pmu.pmu_pg_supported_engines_list)
1769 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
1770
1771 if (!g->support_pmu)
1772 return ret;
1773
1774 nvgpu_mutex_acquire(&pmu->elpg_mutex);
1775
1776 pmu->elpg_refcnt--;
1777 if (pmu->elpg_refcnt > 0) {
1778 nvgpu_warn(g,
1779 "%s(): possible elpg refcnt mismatch. elpg refcnt=%d",
1780 __func__, pmu->elpg_refcnt);
1781 WARN_ON(1);
1782 ret = 0;
1783 goto exit_unlock;
1784 }
1785
1786 /* cancel off_on_pending and return */
1787 if (pmu->elpg_stat == PMU_ELPG_STAT_OFF_ON_PENDING) {
1788 pmu->elpg_stat = PMU_ELPG_STAT_OFF;
1789 ret = 0;
1790 goto exit_reschedule;
1791 }
1792 /* wait if on_pending */
1793 else if (pmu->elpg_stat == PMU_ELPG_STAT_ON_PENDING) {
1794
1795 pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g),
1796 &pmu->elpg_stat, PMU_ELPG_STAT_ON);
1797
1798 if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
1799 nvgpu_err(g, "ELPG_ALLOW_ACK failed, elpg_stat=%d",
1800 pmu->elpg_stat);
1801 pmu_dump_elpg_stats(pmu);
1802 pmu_dump_falcon_stats(pmu);
1803 ret = -EBUSY;
1804 goto exit_unlock;
1805 }
1806 }
1807 /* return if ELPG is already off */
1808 else if (pmu->elpg_stat != PMU_ELPG_STAT_ON) {
1809 ret = 0;
1810 goto exit_reschedule;
1811 }
1812
1813 for (pg_engine_id = PMU_PG_ELPG_ENGINE_ID_GRAPHICS;
1814 pg_engine_id < PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE;
1815 pg_engine_id++) {
1816
1817 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS &&
1818 ACCESS_ONCE(pmu->mscg_stat) == PMU_MSCG_DISABLED)
1819 continue;
1820
1821 if (BIT(pg_engine_id) & pg_engine_id_list) {
1822 memset(&cmd, 0, sizeof(struct pmu_cmd));
1823 cmd.hdr.unit_id = PMU_UNIT_PG;
1824 cmd.hdr.size = PMU_CMD_HDR_SIZE +
1825 sizeof(struct pmu_pg_cmd_elpg_cmd);
1826 cmd.cmd.pg.elpg_cmd.cmd_type = PMU_PG_CMD_ID_ELPG_CMD;
1827 cmd.cmd.pg.elpg_cmd.engine_id = pg_engine_id;
1828 cmd.cmd.pg.elpg_cmd.cmd = PMU_PG_ELPG_CMD_DISALLOW;
1829
1830 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
1831 pmu->elpg_stat = PMU_ELPG_STAT_OFF_PENDING;
1832 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
1833 pmu->mscg_transition_state =
1834 PMU_ELPG_STAT_OFF_PENDING;
1835
1836 if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS)
1837 ptr = &pmu->elpg_stat;
1838 else if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_MS)
1839 ptr = &pmu->mscg_transition_state;
1840
1841 gk20a_dbg_pmu("cmd post PMU_PG_ELPG_CMD_DISALLOW");
1842 gk20a_pmu_cmd_post(g, &cmd, NULL, NULL,
1843 PMU_COMMAND_QUEUE_HPQ, pmu_handle_pg_elpg_msg,
1844 pmu, &seq, ~0);
1845
1846 pmu_wait_message_cond(pmu,
1847 gk20a_get_gr_idle_timeout(g),
1848 ptr, PMU_ELPG_STAT_OFF);
1849 if (*ptr != PMU_ELPG_STAT_OFF) {
1850 nvgpu_err(g, "ELPG_DISALLOW_ACK failed");
1851 pmu_dump_elpg_stats(pmu);
1852 pmu_dump_falcon_stats(pmu);
1853 ret = -EBUSY;
1854 goto exit_unlock;
1855 }
1856 }
1857 }
1858
1859exit_reschedule:
1860exit_unlock:
1861 nvgpu_mutex_release(&pmu->elpg_mutex);
1862 gk20a_dbg_fn("done");
1863 return ret;
1864}
1865
1866int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable) 1362int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable)
1867{ 1363{
1868 struct nvgpu_pmu *pmu = &g->pmu; 1364 struct nvgpu_pmu *pmu = &g->pmu;
@@ -1947,173 +1443,3 @@ void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id,
1947 pg_stat_data->avg_entry_latency_us = stats.pg_avg_entry_time_us; 1443 pg_stat_data->avg_entry_latency_us = stats.pg_avg_entry_time_us;
1948 pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us; 1444 pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us;
1949} 1445}
1950
1951int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
1952 struct pmu_pg_stats_data *pg_stat_data)
1953{
1954 struct nvgpu_pmu *pmu = &g->pmu;
1955 u32 pg_engine_id_list = 0;
1956
1957 if (!pmu->initialized) {
1958 pg_stat_data->ingating_time = 0;
1959 pg_stat_data->ungating_time = 0;
1960 pg_stat_data->gating_cnt = 0;
1961 return 0;
1962 }
1963
1964 if (g->ops.pmu.pmu_pg_supported_engines_list)
1965 pg_engine_id_list = g->ops.pmu.pmu_pg_supported_engines_list(g);
1966
1967 if (BIT(pg_engine_id) & pg_engine_id_list)
1968 g->ops.pmu.pmu_elpg_statistics(g, pg_engine_id,
1969 pg_stat_data);
1970
1971 return 0;
1972}
1973
1974/* Send an Adaptive Power (AP) related command to PMU */
1975int gk20a_pmu_ap_send_command(struct gk20a *g,
1976 union pmu_ap_cmd *p_ap_cmd, bool b_block)
1977{
1978 struct nvgpu_pmu *pmu = &g->pmu;
1979 /* FIXME: where is the PG structure defined?? */
1980 u32 status = 0;
1981 struct pmu_cmd cmd;
1982 u32 seq;
1983 pmu_callback p_callback = NULL;
1984
1985 memset(&cmd, 0, sizeof(struct pmu_cmd));
1986
1987 /* Copy common members */
1988 cmd.hdr.unit_id = PMU_UNIT_PG;
1989 cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(union pmu_ap_cmd);
1990
1991 cmd.cmd.pg.ap_cmd.cmn.cmd_type = PMU_PG_CMD_ID_AP;
1992 cmd.cmd.pg.ap_cmd.cmn.cmd_id = p_ap_cmd->cmn.cmd_id;
1993
1994 /* Copy other members of command */
1995 switch (p_ap_cmd->cmn.cmd_id) {
1996 case PMU_AP_CMD_ID_INIT:
1997 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_INIT");
1998 cmd.cmd.pg.ap_cmd.init.pg_sampling_period_us =
1999 p_ap_cmd->init.pg_sampling_period_us;
2000 break;
2001
2002 case PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL:
2003 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL");
2004 cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.ctrl_id =
2005 p_ap_cmd->init_and_enable_ctrl.ctrl_id;
2006 memcpy(
2007 (void *)&(cmd.cmd.pg.ap_cmd.init_and_enable_ctrl.params),
2008 (void *)&(p_ap_cmd->init_and_enable_ctrl.params),
2009 sizeof(struct pmu_ap_ctrl_init_params));
2010
2011 p_callback = ap_callback_init_and_enable_ctrl;
2012 break;
2013
2014 case PMU_AP_CMD_ID_ENABLE_CTRL:
2015 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_ENABLE_CTRL");
2016 cmd.cmd.pg.ap_cmd.enable_ctrl.ctrl_id =
2017 p_ap_cmd->enable_ctrl.ctrl_id;
2018 break;
2019
2020 case PMU_AP_CMD_ID_DISABLE_CTRL:
2021 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_DISABLE_CTRL");
2022 cmd.cmd.pg.ap_cmd.disable_ctrl.ctrl_id =
2023 p_ap_cmd->disable_ctrl.ctrl_id;
2024 break;
2025
2026 case PMU_AP_CMD_ID_KICK_CTRL:
2027 gk20a_dbg_pmu("cmd post PMU_AP_CMD_ID_KICK_CTRL");
2028 cmd.cmd.pg.ap_cmd.kick_ctrl.ctrl_id =
2029 p_ap_cmd->kick_ctrl.ctrl_id;
2030 cmd.cmd.pg.ap_cmd.kick_ctrl.skip_count =
2031 p_ap_cmd->kick_ctrl.skip_count;
2032 break;
2033
2034 default:
2035 gk20a_dbg_pmu("%s: Invalid Adaptive Power command %d\n",
2036 __func__, p_ap_cmd->cmn.cmd_id);
2037 return 0x2f;
2038 }
2039
2040 status = gk20a_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ,
2041 p_callback, pmu, &seq, ~0);
2042
2043 if (status) {
2044 gk20a_dbg_pmu(
2045 "%s: Unable to submit Adaptive Power Command %d\n",
2046 __func__, p_ap_cmd->cmn.cmd_id);
2047 goto err_return;
2048 }
2049
2050 /* TODO: Implement blocking calls (b_block) */
2051
2052err_return:
2053 return status;
2054}
2055
2056static void ap_callback_init_and_enable_ctrl(
2057 struct gk20a *g, struct pmu_msg *msg,
2058 void *param, u32 seq_desc, u32 status)
2059{
2060 /* Define p_ap (i.e pointer to pmu_ap structure) */
2061 WARN_ON(!msg);
2062
2063 if (!status) {
2064 switch (msg->msg.pg.ap_msg.cmn.msg_id) {
2065 case PMU_AP_MSG_ID_INIT_ACK:
2066 gk20a_dbg_pmu("reply PMU_AP_CMD_ID_INIT");
2067 break;
2068
2069 default:
2070 gk20a_dbg_pmu(
2071 "%s: Invalid Adaptive Power Message: %x\n",
2072 __func__, msg->msg.pg.ap_msg.cmn.msg_id);
2073 break;
2074 }
2075 }
2076}
2077
2078int gk20a_aelpg_init(struct gk20a *g)
2079{
2080 int status = 0;
2081
2082 /* Remove reliance on app_ctrl field. */
2083 union pmu_ap_cmd ap_cmd;
2084
2085 /* TODO: Check for elpg being ready? */
2086 ap_cmd.init.cmd_id = PMU_AP_CMD_ID_INIT;
2087 ap_cmd.init.pg_sampling_period_us = g->pmu.aelpg_param[0];
2088
2089 status = gk20a_pmu_ap_send_command(g, &ap_cmd, false);
2090 return status;
2091}
2092
2093int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id)
2094{
2095 int status = 0;
2096 union pmu_ap_cmd ap_cmd;
2097
2098 /* TODO: Probably check if ELPG is ready? */
2099 ap_cmd.init_and_enable_ctrl.cmd_id = PMU_AP_CMD_ID_INIT_AND_ENABLE_CTRL;
2100 ap_cmd.init_and_enable_ctrl.ctrl_id = ctrl_id;
2101 ap_cmd.init_and_enable_ctrl.params.min_idle_filter_us =
2102 g->pmu.aelpg_param[1];
2103 ap_cmd.init_and_enable_ctrl.params.min_target_saving_us =
2104 g->pmu.aelpg_param[2];
2105 ap_cmd.init_and_enable_ctrl.params.power_break_even_us =
2106 g->pmu.aelpg_param[3];
2107 ap_cmd.init_and_enable_ctrl.params.cycles_per_sample_max =
2108 g->pmu.aelpg_param[4];
2109
2110 switch (ctrl_id) {
2111 case PMU_AP_CTRL_ID_GRAPHICS:
2112 break;
2113 default:
2114 break;
2115 }
2116
2117 status = gk20a_pmu_ap_send_command(g, &ap_cmd, true);
2118 return status;
2119}
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
index b5038bd4..55d6f72c 100644
--- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h
@@ -35,51 +35,9 @@ struct nvgpu_firmware;
35#define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 35#define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8
36#endif 36#endif
37 37
38#define PMU_PGENG_GR_BUFFER_IDX_INIT (0)
39#define PMU_PGENG_GR_BUFFER_IDX_ZBC (1)
40#define PMU_PGENG_GR_BUFFER_IDX_FECS (2)
41
42#define PMU_PG_IDLE_THRESHOLD_SIM 1000
43#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
44/* TBD: QT or else ? */
45#define PMU_PG_IDLE_THRESHOLD 15000
46#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000
47
48#define PMU_PG_LPWR_FEATURE_RPPG 0x0
49#define PMU_PG_LPWR_FEATURE_MSCG 0x1
50
51/* state transition :
52 OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
53 ON => OFF is always synchronized */
54#define PMU_ELPG_STAT_OFF 0 /* elpg is off */
55#define PMU_ELPG_STAT_ON 1 /* elpg is on */
56#define PMU_ELPG_STAT_ON_PENDING 2 /* elpg is off, ALLOW cmd has been sent, wait for ack */
57#define PMU_ELPG_STAT_OFF_PENDING 3 /* elpg is on, DISALLOW cmd has been sent, wait for ack */
58#define PMU_ELPG_STAT_OFF_ON_PENDING 4 /* elpg is off, caller has requested on, but ALLOW
59 cmd hasn't been sent due to ENABLE_ALLOW delay */
60
61#define PG_REQUEST_TYPE_GLOBAL 0x0
62#define PG_REQUEST_TYPE_PSTATE 0x1
63
64#define PMU_MSCG_DISABLED 0
65#define PMU_MSCG_ENABLED 1
66
67/* Default Sampling Period of AELPG */
68#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000)
69
70/* Default values of APCTRL parameters */
71#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100)
72#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000)
73#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
74#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200)
75
76bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu); 38bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu);
77void gk20a_pmu_isr(struct gk20a *g); 39void gk20a_pmu_isr(struct gk20a *g);
78 40
79int gk20a_pmu_enable_elpg(struct gk20a *g);
80int gk20a_pmu_disable_elpg(struct gk20a *g);
81int gk20a_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg);
82
83u32 gk20a_pmu_pg_engines_list(struct gk20a *g); 41u32 gk20a_pmu_pg_engines_list(struct gk20a *g);
84u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id); 42u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id);
85 43
@@ -87,6 +45,8 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries);
87 45
88int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable); 46int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable);
89 47
48void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id);
49
90int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); 50int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token);
91int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); 51int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
92 52
@@ -109,12 +69,10 @@ void pmu_copy_from_dmem(struct nvgpu_pmu *pmu,
109 u32 src, u8 *dst, u32 size, u8 port); 69 u32 src, u8 *dst, u32 size, u8 port);
110int pmu_reset(struct nvgpu_pmu *pmu); 70int pmu_reset(struct nvgpu_pmu *pmu);
111int pmu_bootstrap(struct nvgpu_pmu *pmu); 71int pmu_bootstrap(struct nvgpu_pmu *pmu);
72
73void pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
112void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); 74void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
113 75
114int gk20a_pmu_ap_send_command(struct gk20a *g,
115 union pmu_ap_cmd *p_ap_cmd, bool b_block);
116int gk20a_aelpg_init(struct gk20a *g);
117int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
118void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable); 76void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable);
119int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, 77int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
120 u32 *var, u32 val); 78 u32 *var, u32 val);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
index 15f37bda..c4972f67 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h
@@ -112,6 +112,28 @@ enum {
112 PMU_SEQ_STATE_CANCELLED 112 PMU_SEQ_STATE_CANCELLED
113}; 113};
114 114
115/*PG defines used by nvpgu-pmu*/
116#define PMU_PG_IDLE_THRESHOLD_SIM 1000
117#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
118/* TBD: QT or else ? */
119#define PMU_PG_IDLE_THRESHOLD 15000
120#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000
121
122#define PMU_PG_LPWR_FEATURE_RPPG 0x0
123#define PMU_PG_LPWR_FEATURE_MSCG 0x1
124
125#define PMU_MSCG_DISABLED 0
126#define PMU_MSCG_ENABLED 1
127
128/* Default Sampling Period of AELPG */
129#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000)
130
131/* Default values of APCTRL parameters */
132#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100)
133#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000)
134#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
135#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200)
136
115typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32, 137typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32,
116 u32); 138 u32);
117 139
@@ -384,14 +406,6 @@ int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
384void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state, 406void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
385 bool post_change_event); 407 bool post_change_event);
386 408
387/* PG */
388int nvgpu_pmu_init_powergating(struct gk20a *g);
389int nvgpu_pmu_init_bind_fecs(struct gk20a *g);
390void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g);
391
392int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
393 struct pmu_pg_stats_data *pg_stat_data);
394
395/* NVGPU-PMU MEM alloc */ 409/* NVGPU-PMU MEM alloc */
396void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem); 410void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem);
397void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, 411void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
@@ -405,4 +419,23 @@ int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
405int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu); 419int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu);
406int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g); 420int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g);
407 421
422/* PG init*/
423int nvgpu_pmu_init_powergating(struct gk20a *g);
424int nvgpu_pmu_init_bind_fecs(struct gk20a *g);
425void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g);
426
427/* PG enable/disable */
428int nvgpu_pmu_enable_elpg(struct gk20a *g);
429int nvgpu_pmu_disable_elpg(struct gk20a *g);
430int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg);
431
432int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
433 struct pmu_pg_stats_data *pg_stat_data);
434
435/* AELPG */
436int nvgpu_aelpg_init(struct gk20a *g);
437int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
438int nvgpu_pmu_ap_send_command(struct gk20a *g,
439 union pmu_ap_cmd *p_ap_cmd, bool b_block);
440
408#endif /* __NVGPU_PMU_H__ */ 441#endif /* __NVGPU_PMU_H__ */
diff --git a/drivers/gpu/nvgpu/lpwr/lpwr.c b/drivers/gpu/nvgpu/lpwr/lpwr.c
index 85acfd67..95eea2e3 100644
--- a/drivers/gpu/nvgpu/lpwr/lpwr.c
+++ b/drivers/gpu/nvgpu/lpwr/lpwr.c
@@ -363,7 +363,7 @@ int nvgpu_lpwr_enable_pg(struct gk20a *g, bool pstate_lock)
363 present_pstate); 363 present_pstate);
364 if (is_rppg_supported) { 364 if (is_rppg_supported) {
365 if (g->support_pmu && g->can_elpg) 365 if (g->support_pmu && g->can_elpg)
366 status = gk20a_pmu_enable_elpg(g); 366 status = nvgpu_pmu_enable_elpg(g);
367 } 367 }
368 368
369 nvgpu_mutex_release(&pmu->pg_mutex); 369 nvgpu_mutex_release(&pmu->pg_mutex);
@@ -393,7 +393,7 @@ int nvgpu_lpwr_disable_pg(struct gk20a *g, bool pstate_lock)
393 present_pstate); 393 present_pstate);
394 if (is_rppg_supported) { 394 if (is_rppg_supported) {
395 if (g->support_pmu && g->elpg_enabled) { 395 if (g->support_pmu && g->elpg_enabled) {
396 status = gk20a_pmu_disable_elpg(g); 396 status = nvgpu_pmu_disable_elpg(g);
397 if (status) 397 if (status)
398 goto exit_unlock; 398 goto exit_unlock;
399 } 399 }