aboutsummaryrefslogtreecommitdiffstats
path: root/include/nvgpu/pmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/nvgpu/pmu.h')
-rw-r--r--include/nvgpu/pmu.h545
1 files changed, 0 insertions, 545 deletions
diff --git a/include/nvgpu/pmu.h b/include/nvgpu/pmu.h
deleted file mode 100644
index fb1b016..0000000
--- a/include/nvgpu/pmu.h
+++ /dev/null
@@ -1,545 +0,0 @@
1/*
2 * Copyright (c) 2017-2022, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef NVGPU_PMU_H
24#define NVGPU_PMU_H
25
26#include <nvgpu/kmem.h>
27#include <nvgpu/nvgpu_mem.h>
28#include <nvgpu/allocator.h>
29#include <nvgpu/lock.h>
30#include <nvgpu/cond.h>
31#include <nvgpu/thread.h>
32#include <nvgpu/nvgpu_common.h>
33#include <nvgpu/flcnif_cmn.h>
34#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
35#include <nvgpu/falcon.h>
36
37#define nvgpu_pmu_dbg(g, fmt, args...) \
38 nvgpu_log(g, gpu_dbg_pmu, fmt, ##args)
39
40/* defined by pmu hw spec */
41#define GK20A_PMU_VA_SIZE (512 * 1024 * 1024)
42#define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024)
43#define GK20A_PMU_SEQ_BUF_SIZE 4096
44
45#define GK20A_PMU_TRACE_BUFSIZE 0x4000 /* 4K */
46#define GK20A_PMU_DMEM_BLKSIZE2 8
47
48#define PMU_MODE_MISMATCH_STATUS_MAILBOX_R 6
49#define PMU_MODE_MISMATCH_STATUS_VAL 0xDEADDEAD
50
51/* Falcon Register index */
52#define PMU_FALCON_REG_R0 (0)
53#define PMU_FALCON_REG_R1 (1)
54#define PMU_FALCON_REG_R2 (2)
55#define PMU_FALCON_REG_R3 (3)
56#define PMU_FALCON_REG_R4 (4)
57#define PMU_FALCON_REG_R5 (5)
58#define PMU_FALCON_REG_R6 (6)
59#define PMU_FALCON_REG_R7 (7)
60#define PMU_FALCON_REG_R8 (8)
61#define PMU_FALCON_REG_R9 (9)
62#define PMU_FALCON_REG_R10 (10)
63#define PMU_FALCON_REG_R11 (11)
64#define PMU_FALCON_REG_R12 (12)
65#define PMU_FALCON_REG_R13 (13)
66#define PMU_FALCON_REG_R14 (14)
67#define PMU_FALCON_REG_R15 (15)
68#define PMU_FALCON_REG_IV0 (16)
69#define PMU_FALCON_REG_IV1 (17)
70#define PMU_FALCON_REG_UNDEFINED (18)
71#define PMU_FALCON_REG_EV (19)
72#define PMU_FALCON_REG_SP (20)
73#define PMU_FALCON_REG_PC (21)
74#define PMU_FALCON_REG_IMB (22)
75#define PMU_FALCON_REG_DMB (23)
76#define PMU_FALCON_REG_CSW (24)
77#define PMU_FALCON_REG_CCR (25)
78#define PMU_FALCON_REG_SEC (26)
79#define PMU_FALCON_REG_CTX (27)
80#define PMU_FALCON_REG_EXCI (28)
81#define PMU_FALCON_REG_RSVD0 (29)
82#define PMU_FALCON_REG_RSVD1 (30)
83#define PMU_FALCON_REG_RSVD2 (31)
84#define PMU_FALCON_REG_SIZE (32)
85
86/* Choices for pmu_state */
87#define PMU_STATE_OFF 0U /* PMU is off */
88#define PMU_STATE_STARTING 1U /* PMU is on, but not booted */
89#define PMU_STATE_INIT_RECEIVED 2U /* PMU init message received */
90#define PMU_STATE_ELPG_BOOTING 3U /* PMU is booting */
91#define PMU_STATE_ELPG_BOOTED 4U /* ELPG is initialized */
92#define PMU_STATE_LOADING_PG_BUF 5U /* Loading PG buf */
93#define PMU_STATE_LOADING_ZBC 6U /* Loading ZBC buf */
94#define PMU_STATE_STARTED 7U /* Fully unitialized */
95#define PMU_STATE_EXIT 8U /* Exit PMU state machine */
96
97/* state transition :
98 * OFF => [OFF_ON_PENDING optional] => ON_PENDING => ON => OFF
99 * ON => OFF is always synchronized
100 */
101/* elpg is off */
102#define PMU_ELPG_STAT_OFF 0U
103/* elpg is on */
104#define PMU_ELPG_STAT_ON 1U
105/* elpg is off, ALLOW cmd has been sent, wait for ack */
106#define PMU_ELPG_STAT_ON_PENDING 2U
107/* elpg is on, DISALLOW cmd has been sent, wait for ack */
108#define PMU_ELPG_STAT_OFF_PENDING 3U
109/* elpg is off, caller has requested on, but ALLOW
110 * cmd hasn't been sent due to ENABLE_ALLOW delay
111 */
112#define PMU_ELPG_STAT_OFF_ON_PENDING 4U
113
114#define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32U
115#define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64U
116
117#define PMU_MAX_NUM_SEQUENCES (256U)
118#define PMU_SEQ_BIT_SHIFT (5U)
119#define PMU_SEQ_TBL_SIZE \
120 (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT)
121
122#define PMU_INVALID_SEQ_DESC (~0)
123
124enum {
125 GK20A_PMU_DMAIDX_UCODE = 0,
126 GK20A_PMU_DMAIDX_VIRT = 1,
127 GK20A_PMU_DMAIDX_PHYS_VID = 2,
128 GK20A_PMU_DMAIDX_PHYS_SYS_COH = 3,
129 GK20A_PMU_DMAIDX_PHYS_SYS_NCOH = 4,
130 GK20A_PMU_DMAIDX_RSVD = 5,
131 GK20A_PMU_DMAIDX_PELPG = 6,
132 GK20A_PMU_DMAIDX_END = 7
133};
134
135enum {
136 PMU_SEQ_STATE_FREE = 0,
137 PMU_SEQ_STATE_PENDING,
138 PMU_SEQ_STATE_USED,
139 PMU_SEQ_STATE_CANCELLED
140};
141
142/*PG defines used by nvpgu-pmu*/
143#define PMU_PG_IDLE_THRESHOLD_SIM 1000
144#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD_SIM 4000000
145/* TBD: QT or else ? */
146#define PMU_PG_IDLE_THRESHOLD 15000
147#define PMU_PG_POST_POWERUP_IDLE_THRESHOLD 1000000
148
149#define PMU_PG_LPWR_FEATURE_RPPG 0x0
150#define PMU_PG_LPWR_FEATURE_MSCG 0x1
151
152#define PMU_MSCG_DISABLED 0U
153#define PMU_MSCG_ENABLED 1U
154
155/* Default Sampling Period of AELPG */
156#define APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US (1000000)
157
158/* Default values of APCTRL parameters */
159#define APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US (100)
160#define APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US (10000)
161#define APCTRL_POWER_BREAKEVEN_DEFAULT_US (2000)
162#define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200)
163
164/* pmu load const defines */
165#define PMU_BUSY_CYCLES_NORM_MAX (1000U)
166
167/* RPC */
168#define PMU_RPC_EXECUTE(_stat, _pmu, _unit, _func, _prpc, _size)\
169 do { \
170 memset(&((_prpc)->hdr), 0, sizeof((_prpc)->hdr));\
171 \
172 (_prpc)->hdr.unit_id = PMU_UNIT_##_unit; \
173 (_prpc)->hdr.function = NV_PMU_RPC_ID_##_unit##_##_func;\
174 (_prpc)->hdr.flags = 0x0; \
175 \
176 _stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
177 (sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
178 (_size), NULL, NULL, false); \
179 } while (0)
180
181/* RPC blocking call to copy back data from PMU to _prpc */
182#define PMU_RPC_EXECUTE_CPB(_stat, _pmu, _unit, _func, _prpc, _size)\
183 do { \
184 memset(&((_prpc)->hdr), 0, sizeof((_prpc)->hdr));\
185 \
186 (_prpc)->hdr.unit_id = PMU_UNIT_##_unit; \
187 (_prpc)->hdr.function = NV_PMU_RPC_ID_##_unit##_##_func;\
188 (_prpc)->hdr.flags = 0x0; \
189 \
190 _stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
191 (sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
192 (_size), NULL, NULL, true); \
193 } while (0)
194
195/* RPC non-blocking with call_back handler option */
196#define PMU_RPC_EXECUTE_CB(_stat, _pmu, _unit, _func, _prpc, _size, _cb, _cbp)\
197 do { \
198 memset(&((_prpc)->hdr), 0, sizeof((_prpc)->hdr));\
199 \
200 (_prpc)->hdr.unit_id = PMU_UNIT_##_unit; \
201 (_prpc)->hdr.function = NV_PMU_RPC_ID_##_unit##_##_func;\
202 (_prpc)->hdr.flags = 0x0; \
203 \
204 _stat = nvgpu_pmu_rpc_execute(_pmu, &((_prpc)->hdr), \
205 (sizeof(*(_prpc)) - sizeof((_prpc)->scratch)),\
206 (_size), _cb, _cbp, false); \
207 } while (0)
208
209typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32,
210 u32);
211
212struct rpc_handler_payload {
213 void *rpc_buff;
214 bool is_mem_free_set;
215 bool complete;
216};
217
218struct pmu_rpc_desc {
219 void *prpc;
220 u16 size_rpc;
221 u16 size_scratch;
222};
223
224struct pmu_payload {
225 struct {
226 void *buf;
227 u32 offset;
228 u32 size;
229 u32 fb_size;
230 } in, out;
231 struct pmu_rpc_desc rpc;
232};
233
234struct pmu_ucode_desc {
235 u32 descriptor_size;
236 u32 image_size;
237 u32 tools_version;
238 u32 app_version;
239 char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH];
240 u32 bootloader_start_offset;
241 u32 bootloader_size;
242 u32 bootloader_imem_offset;
243 u32 bootloader_entry_point;
244 u32 app_start_offset;
245 u32 app_size;
246 u32 app_imem_offset;
247 u32 app_imem_entry;
248 u32 app_dmem_offset;
249 /* Offset from appStartOffset */
250 u32 app_resident_code_offset;
251 /* Exact size of the resident code
252 * ( potentially contains CRC inside at the end )
253 */
254 u32 app_resident_code_size;
255 /* Offset from appStartOffset */
256 u32 app_resident_data_offset;
257 /* Exact size of the resident code
258 * ( potentially contains CRC inside at the end )
259 */
260 u32 app_resident_data_size;
261 u32 nb_overlays;
262 struct {u32 start; u32 size; } load_ovl[GK20A_PMU_UCODE_NB_MAX_OVERLAY];
263 u32 compressed;
264};
265
266struct pmu_ucode_desc_v1 {
267 u32 descriptor_size;
268 u32 image_size;
269 u32 tools_version;
270 u32 app_version;
271 char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH];
272 u32 bootloader_start_offset;
273 u32 bootloader_size;
274 u32 bootloader_imem_offset;
275 u32 bootloader_entry_point;
276 u32 app_start_offset;
277 u32 app_size;
278 u32 app_imem_offset;
279 u32 app_imem_entry;
280 u32 app_dmem_offset;
281 u32 app_resident_code_offset;
282 u32 app_resident_code_size;
283 u32 app_resident_data_offset;
284 u32 app_resident_data_size;
285 u32 nb_imem_overlays;
286 u32 nb_dmem_overlays;
287 struct {u32 start; u32 size; } load_ovl[64];
288 u32 compressed;
289};
290
291struct pmu_mutex {
292 u32 id;
293 u32 index;
294 u32 ref_cnt;
295};
296
297struct pmu_sequence {
298 u8 id;
299 u32 state;
300 u32 desc;
301 struct pmu_msg *msg;
302 union {
303 struct pmu_allocation_v1 in_v1;
304 struct pmu_allocation_v2 in_v2;
305 struct pmu_allocation_v3 in_v3;
306 };
307 struct nvgpu_mem *in_mem;
308 union {
309 struct pmu_allocation_v1 out_v1;
310 struct pmu_allocation_v2 out_v2;
311 struct pmu_allocation_v3 out_v3;
312 };
313 struct nvgpu_mem *out_mem;
314 u8 *out_payload;
315 pmu_callback callback;
316 void *cb_params;
317};
318
319struct nvgpu_pg_init {
320 bool state_change;
321 bool state_destroy;
322 struct nvgpu_cond wq;
323 struct nvgpu_thread state_task;
324};
325
326struct nvgpu_pmu {
327 struct gk20a *g;
328 struct nvgpu_falcon *flcn;
329
330 union {
331 struct pmu_ucode_desc *desc;
332 struct pmu_ucode_desc_v1 *desc_v1;
333 };
334 struct nvgpu_mem ucode;
335
336 struct nvgpu_mem pg_buf;
337
338 /* TBD: remove this if ZBC seq is fixed */
339 struct nvgpu_mem seq_buf;
340 struct nvgpu_mem trace_buf;
341 struct nvgpu_mem super_surface_buf;
342
343 bool buf_loaded;
344
345 struct pmu_sha1_gid gid_info;
346
347 struct nvgpu_falcon_queue queue[PMU_QUEUE_COUNT];
348
349 struct pmu_sequence *seq;
350 unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE];
351 u32 next_seq_desc;
352
353 struct pmu_mutex *mutex;
354 u32 mutex_cnt;
355
356 struct nvgpu_mutex pmu_copy_lock;
357 struct nvgpu_mutex pmu_seq_lock;
358
359 struct nvgpu_allocator dmem;
360
361 u32 *ucode_image;
362 bool pmu_ready;
363
364 u32 perfmon_query;
365
366 u32 zbc_save_done;
367
368 u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE];
369
370 u32 elpg_stat;
371 u32 disallow_state;
372
373 u32 mscg_stat;
374 u32 mscg_transition_state;
375
376 u32 pmu_state;
377
378#define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */
379 struct nvgpu_pg_init pg_init;
380 struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */
381 struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */
382 /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */
383 int elpg_refcnt;
384
385 union {
386 struct pmu_perfmon_counter_v2 perfmon_counter_v2;
387 };
388 u32 perfmon_state_id[PMU_DOMAIN_GROUP_NUM];
389
390 bool initialized;
391
392 void (*remove_support)(struct nvgpu_pmu *pmu);
393 bool sw_ready;
394 bool perfmon_ready;
395
396 u32 sample_buffer;
397 u32 load_shadow;
398 u32 load_avg;
399 u32 load;
400
401 struct nvgpu_mutex isr_mutex;
402 bool isr_enabled;
403
404 bool zbc_ready;
405 union {
406 struct pmu_cmdline_args_v3 args_v3;
407 struct pmu_cmdline_args_v4 args_v4;
408 struct pmu_cmdline_args_v5 args_v5;
409 struct pmu_cmdline_args_v6 args_v6;
410 };
411 unsigned long perfmon_events_cnt;
412 bool perfmon_sampling_enabled;
413 u8 pmu_mode; /*Added for GM20b, and ACR*/
414 u32 falcon_id;
415 u32 aelpg_param[5];
416 u32 override_done;
417
418 struct nvgpu_firmware *fw;
419};
420
421struct pmu_surface {
422 struct nvgpu_mem vidmem_desc;
423 struct nvgpu_mem sysmem_desc;
424 struct flcn_mem_desc_v0 params;
425};
426
427/*PG defines used by nvpgu-pmu*/
428struct pmu_pg_stats_data {
429 u32 gating_cnt;
430 u32 ingating_time;
431 u32 ungating_time;
432 u32 avg_entry_latency_us;
433 u32 avg_exit_latency_us;
434};
435
436/*!
437 * Structure/object which single register write need to be done during PG init
438 * sequence to set PROD values.
439 */
440struct pg_init_sequence_list {
441 u32 regaddr;
442 u32 writeval;
443};
444
445/* PMU IPC Methods */
446void nvgpu_pmu_seq_init(struct nvgpu_pmu *pmu);
447
448int nvgpu_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token);
449int nvgpu_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token);
450
451int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu, u32 id,
452 union pmu_init_msg_pmu *init);
453
454/* send a cmd to pmu */
455int nvgpu_pmu_cmd_post(struct gk20a *g, struct pmu_cmd *cmd,
456 struct pmu_msg *msg, struct pmu_payload *payload,
457 u32 queue_id, pmu_callback callback, void *cb_param,
458 u32 *seq_desc, unsigned long timeout);
459
460int nvgpu_pmu_process_message(struct nvgpu_pmu *pmu);
461
462/* perfmon */
463int nvgpu_pmu_init_perfmon(struct nvgpu_pmu *pmu);
464int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu);
465int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu);
466int nvgpu_pmu_perfmon_start_sampling_rpc(struct nvgpu_pmu *pmu);
467int nvgpu_pmu_perfmon_stop_sampling_rpc(struct nvgpu_pmu *pmu);
468int nvgpu_pmu_perfmon_get_samples_rpc(struct nvgpu_pmu *pmu);
469int nvgpu_pmu_handle_perfmon_event(struct nvgpu_pmu *pmu,
470 struct pmu_perfmon_msg *msg);
471int nvgpu_pmu_init_perfmon_rpc(struct nvgpu_pmu *pmu);
472int nvgpu_pmu_load_norm(struct gk20a *g, u32 *load);
473int nvgpu_pmu_load_update(struct gk20a *g);
474int nvgpu_pmu_busy_cycles_norm(struct gk20a *g, u32 *norm);
475void nvgpu_pmu_reset_load_counters(struct gk20a *g);
476void nvgpu_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles,
477 u32 *total_cycles);
478
479int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu,
480 struct nv_pmu_therm_msg *msg);
481
482/* PMU init */
483int nvgpu_init_pmu_support(struct gk20a *g);
484int nvgpu_pmu_destroy(struct gk20a *g);
485int nvgpu_pmu_process_init_msg(struct nvgpu_pmu *pmu,
486 struct pmu_msg *msg);
487int nvgpu_pmu_super_surface_alloc(struct gk20a *g,
488 struct nvgpu_mem *mem_surface, u32 size);
489
490void nvgpu_pmu_state_change(struct gk20a *g, u32 pmu_state,
491 bool post_change_event);
492void nvgpu_kill_task_pg_init(struct gk20a *g);
493
494/* NVGPU-PMU MEM alloc */
495void nvgpu_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem);
496void nvgpu_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem,
497 struct flcn_mem_desc_v0 *fb);
498int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
499 u32 size);
500int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem,
501 u32 size);
502
503/* PMU F/W support */
504int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu);
505int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g);
506
507/* PG init*/
508int nvgpu_pmu_init_powergating(struct gk20a *g);
509int nvgpu_pmu_init_bind_fecs(struct gk20a *g);
510void nvgpu_pmu_setup_hw_load_zbc(struct gk20a *g);
511
512/* PMU reset */
513int nvgpu_pmu_reset(struct gk20a *g);
514
515/* PG enable/disable */
516int nvgpu_pmu_reenable_elpg(struct gk20a *g);
517int nvgpu_pmu_enable_elpg(struct gk20a *g);
518int nvgpu_pmu_disable_elpg(struct gk20a *g);
519int nvgpu_pmu_pg_global_enable(struct gk20a *g, u32 enable_pg);
520
521int nvgpu_pmu_get_pg_stats(struct gk20a *g, u32 pg_engine_id,
522 struct pmu_pg_stats_data *pg_stat_data);
523
524/* AELPG */
525int nvgpu_aelpg_init(struct gk20a *g);
526int nvgpu_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id);
527int nvgpu_pmu_ap_send_command(struct gk20a *g,
528 union pmu_ap_cmd *p_ap_cmd, bool b_block);
529
530/* PMU debug */
531void nvgpu_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu);
532void nvgpu_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu);
533bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos);
534
535/* PMU RPC */
536int nvgpu_pmu_rpc_execute(struct nvgpu_pmu *pmu, struct nv_pmu_rpc_header *rpc,
537 u16 size_rpc, u16 size_scratch, pmu_callback callback, void *cb_param,
538 bool is_copy_back);
539
540/* PMU wait*/
541int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms,
542 void *var, u8 val);
543
544struct gk20a *gk20a_from_pmu(struct nvgpu_pmu *pmu);
545#endif /* NVGPU_PMU_H */