diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2017-05-10 11:05:24 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-06-05 02:05:18 -0400 |
commit | 673dd971600b26131c0afdb221e13c080da022fd (patch) | |
tree | 7c8416ac2ef61891812773d55c8c8dc61da824aa /drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | |
parent | 7668ccb2a2e4a8c13d82b427c65be79c725afe08 (diff) |
gpu: nvgpu: moved & renamed "struct pmu_gk20a"
- Renamed "struct pmu_gk20a" to "struct nvgpu_pmu" then moved
to file "pmu.h" under folder "drivers/gpu/nvgpu/include/nvgpu/"
- Included header file "pmu.h" to dependent file &
removed "pmu_gk20a.h" include if its usage is not present.
- Replaced "struct pmu_gk20a" with "struct nvgpu_pmu" in dependent
source & header files.
JIRA NVGPU-56
Change-Id: Ia3c606616831027093d5c216959c6a40d7c2632e
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: http://git-master/r/1479209
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/pmu_gk20a.h')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | 324 |
1 files changed, 18 insertions, 306 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index cfcf3947..3941d90f 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | |||
@@ -24,14 +24,10 @@ | |||
24 | #include <linux/version.h> | 24 | #include <linux/version.h> |
25 | #include <nvgpu/flcnif_cmn.h> | 25 | #include <nvgpu/flcnif_cmn.h> |
26 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> | 26 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> |
27 | #include <nvgpu/pmu.h> | ||
27 | 28 | ||
28 | struct nvgpu_firmware; | 29 | struct nvgpu_firmware; |
29 | 30 | ||
30 | /* defined by pmu hw spec */ | ||
31 | #define GK20A_PMU_VA_SIZE (512 * 1024 * 1024) | ||
32 | #define GK20A_PMU_UCODE_SIZE_MAX (256 * 1024) | ||
33 | #define GK20A_PMU_SEQ_BUF_SIZE 4096 | ||
34 | |||
35 | #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) | 31 | #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) |
36 | 32 | ||
37 | #define APP_VERSION_NC_3 21688026 | 33 | #define APP_VERSION_NC_3 21688026 |
@@ -56,127 +52,11 @@ struct nvgpu_firmware; | |||
56 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) | 52 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) |
57 | #define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 | 53 | #define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 |
58 | #endif | 54 | #endif |
59 | #define PMU_MODE_MISMATCH_STATUS_MAILBOX_R 6 | ||
60 | #define PMU_MODE_MISMATCH_STATUS_VAL 0xDEADDEAD | ||
61 | |||
62 | enum { | ||
63 | GK20A_PMU_DMAIDX_UCODE = 0, | ||
64 | GK20A_PMU_DMAIDX_VIRT = 1, | ||
65 | GK20A_PMU_DMAIDX_PHYS_VID = 2, | ||
66 | GK20A_PMU_DMAIDX_PHYS_SYS_COH = 3, | ||
67 | GK20A_PMU_DMAIDX_PHYS_SYS_NCOH = 4, | ||
68 | GK20A_PMU_DMAIDX_RSVD = 5, | ||
69 | GK20A_PMU_DMAIDX_PELPG = 6, | ||
70 | GK20A_PMU_DMAIDX_END = 7 | ||
71 | }; | ||
72 | |||
73 | #define GK20A_PMU_TRACE_BUFSIZE 0x4000 /* 4K */ | ||
74 | #define GK20A_PMU_DMEM_BLKSIZE2 8 | ||
75 | |||
76 | #define GK20A_PMU_UCODE_NB_MAX_OVERLAY 32 | ||
77 | #define GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH 64 | ||
78 | |||
79 | struct pmu_ucode_desc { | ||
80 | u32 descriptor_size; | ||
81 | u32 image_size; | ||
82 | u32 tools_version; | ||
83 | u32 app_version; | ||
84 | char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH]; | ||
85 | u32 bootloader_start_offset; | ||
86 | u32 bootloader_size; | ||
87 | u32 bootloader_imem_offset; | ||
88 | u32 bootloader_entry_point; | ||
89 | u32 app_start_offset; | ||
90 | u32 app_size; | ||
91 | u32 app_imem_offset; | ||
92 | u32 app_imem_entry; | ||
93 | u32 app_dmem_offset; | ||
94 | u32 app_resident_code_offset; /* Offset from appStartOffset */ | ||
95 | u32 app_resident_code_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */ | ||
96 | u32 app_resident_data_offset; /* Offset from appStartOffset */ | ||
97 | u32 app_resident_data_size; /* Exact size of the resident code ( potentially contains CRC inside at the end ) */ | ||
98 | u32 nb_overlays; | ||
99 | struct {u32 start; u32 size;} load_ovl[GK20A_PMU_UCODE_NB_MAX_OVERLAY]; | ||
100 | u32 compressed; | ||
101 | }; | ||
102 | |||
103 | struct pmu_ucode_desc_v1 { | ||
104 | u32 descriptor_size; | ||
105 | u32 image_size; | ||
106 | u32 tools_version; | ||
107 | u32 app_version; | ||
108 | char date[GK20A_PMU_UCODE_NB_MAX_DATE_LENGTH]; | ||
109 | u32 bootloader_start_offset; | ||
110 | u32 bootloader_size; | ||
111 | u32 bootloader_imem_offset; | ||
112 | u32 bootloader_entry_point; | ||
113 | u32 app_start_offset; | ||
114 | u32 app_size; | ||
115 | u32 app_imem_offset; | ||
116 | u32 app_imem_entry; | ||
117 | u32 app_dmem_offset; | ||
118 | u32 app_resident_code_offset; | ||
119 | u32 app_resident_code_size; | ||
120 | u32 app_resident_data_offset; | ||
121 | u32 app_resident_data_size; | ||
122 | u32 nb_imem_overlays; | ||
123 | u32 nb_dmem_overlays; | ||
124 | struct {u32 start; u32 size; } load_ovl[64]; | ||
125 | u32 compressed; | ||
126 | }; | ||
127 | 55 | ||
128 | #define PMU_PGENG_GR_BUFFER_IDX_INIT (0) | 56 | #define PMU_PGENG_GR_BUFFER_IDX_INIT (0) |
129 | #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) | 57 | #define PMU_PGENG_GR_BUFFER_IDX_ZBC (1) |
130 | #define PMU_PGENG_GR_BUFFER_IDX_FECS (2) | 58 | #define PMU_PGENG_GR_BUFFER_IDX_FECS (2) |
131 | 59 | ||
132 | struct pmu_gk20a; | ||
133 | struct pmu_queue; | ||
134 | |||
135 | struct pmu_queue { | ||
136 | |||
137 | /* used by hw, for BIOS/SMI queue */ | ||
138 | u32 mutex_id; | ||
139 | u32 mutex_lock; | ||
140 | /* used by sw, for LPQ/HPQ queue */ | ||
141 | struct nvgpu_mutex mutex; | ||
142 | |||
143 | /* current write position */ | ||
144 | u32 position; | ||
145 | /* physical dmem offset where this queue begins */ | ||
146 | u32 offset; | ||
147 | /* logical queue identifier */ | ||
148 | u32 id; | ||
149 | /* physical queue index */ | ||
150 | u32 index; | ||
151 | /* in bytes */ | ||
152 | u32 size; | ||
153 | |||
154 | /* open-flag */ | ||
155 | u32 oflag; | ||
156 | bool opened; /* opened implies locked */ | ||
157 | }; | ||
158 | |||
159 | struct pmu_mutex { | ||
160 | u32 id; | ||
161 | u32 index; | ||
162 | u32 ref_cnt; | ||
163 | }; | ||
164 | |||
165 | #define PMU_MAX_NUM_SEQUENCES (256) | ||
166 | #define PMU_SEQ_BIT_SHIFT (5) | ||
167 | #define PMU_SEQ_TBL_SIZE \ | ||
168 | (PMU_MAX_NUM_SEQUENCES >> PMU_SEQ_BIT_SHIFT) | ||
169 | |||
170 | #define PMU_INVALID_SEQ_DESC (~0) | ||
171 | |||
172 | enum | ||
173 | { | ||
174 | PMU_SEQ_STATE_FREE = 0, | ||
175 | PMU_SEQ_STATE_PENDING, | ||
176 | PMU_SEQ_STATE_USED, | ||
177 | PMU_SEQ_STATE_CANCELLED | ||
178 | }; | ||
179 | |||
180 | struct pmu_payload { | 60 | struct pmu_payload { |
181 | struct { | 61 | struct { |
182 | void *buf; | 62 | void *buf; |
@@ -192,33 +72,6 @@ struct pmu_surface { | |||
192 | struct flcn_mem_desc_v0 params; | 72 | struct flcn_mem_desc_v0 params; |
193 | }; | 73 | }; |
194 | 74 | ||
195 | typedef void (*pmu_callback)(struct gk20a *, struct pmu_msg *, void *, u32, | ||
196 | u32); | ||
197 | |||
198 | struct pmu_sequence { | ||
199 | u8 id; | ||
200 | u32 state; | ||
201 | u32 desc; | ||
202 | struct pmu_msg *msg; | ||
203 | union { | ||
204 | struct pmu_allocation_v0 in_v0; | ||
205 | struct pmu_allocation_v1 in_v1; | ||
206 | struct pmu_allocation_v2 in_v2; | ||
207 | struct pmu_allocation_v3 in_v3; | ||
208 | }; | ||
209 | struct nvgpu_mem *in_mem; | ||
210 | union { | ||
211 | struct pmu_allocation_v0 out_v0; | ||
212 | struct pmu_allocation_v1 out_v1; | ||
213 | struct pmu_allocation_v2 out_v2; | ||
214 | struct pmu_allocation_v3 out_v3; | ||
215 | }; | ||
216 | struct nvgpu_mem *out_mem; | ||
217 | u8 *out_payload; | ||
218 | pmu_callback callback; | ||
219 | void* cb_params; | ||
220 | }; | ||
221 | |||
222 | /*PG defines used by nvpgu-pmu*/ | 75 | /*PG defines used by nvpgu-pmu*/ |
223 | struct pmu_pg_stats_data { | 76 | struct pmu_pg_stats_data { |
224 | u32 gating_cnt; | 77 | u32 gating_cnt; |
@@ -263,147 +116,6 @@ struct pmu_pg_stats_data { | |||
263 | #define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200) | 116 | #define APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT (200) |
264 | /*PG defines used by nvpgu-pmu*/ | 117 | /*PG defines used by nvpgu-pmu*/ |
265 | 118 | ||
266 | /* Falcon Register index */ | ||
267 | #define PMU_FALCON_REG_R0 (0) | ||
268 | #define PMU_FALCON_REG_R1 (1) | ||
269 | #define PMU_FALCON_REG_R2 (2) | ||
270 | #define PMU_FALCON_REG_R3 (3) | ||
271 | #define PMU_FALCON_REG_R4 (4) | ||
272 | #define PMU_FALCON_REG_R5 (5) | ||
273 | #define PMU_FALCON_REG_R6 (6) | ||
274 | #define PMU_FALCON_REG_R7 (7) | ||
275 | #define PMU_FALCON_REG_R8 (8) | ||
276 | #define PMU_FALCON_REG_R9 (9) | ||
277 | #define PMU_FALCON_REG_R10 (10) | ||
278 | #define PMU_FALCON_REG_R11 (11) | ||
279 | #define PMU_FALCON_REG_R12 (12) | ||
280 | #define PMU_FALCON_REG_R13 (13) | ||
281 | #define PMU_FALCON_REG_R14 (14) | ||
282 | #define PMU_FALCON_REG_R15 (15) | ||
283 | #define PMU_FALCON_REG_IV0 (16) | ||
284 | #define PMU_FALCON_REG_IV1 (17) | ||
285 | #define PMU_FALCON_REG_UNDEFINED (18) | ||
286 | #define PMU_FALCON_REG_EV (19) | ||
287 | #define PMU_FALCON_REG_SP (20) | ||
288 | #define PMU_FALCON_REG_PC (21) | ||
289 | #define PMU_FALCON_REG_IMB (22) | ||
290 | #define PMU_FALCON_REG_DMB (23) | ||
291 | #define PMU_FALCON_REG_CSW (24) | ||
292 | #define PMU_FALCON_REG_CCR (25) | ||
293 | #define PMU_FALCON_REG_SEC (26) | ||
294 | #define PMU_FALCON_REG_CTX (27) | ||
295 | #define PMU_FALCON_REG_EXCI (28) | ||
296 | #define PMU_FALCON_REG_RSVD0 (29) | ||
297 | #define PMU_FALCON_REG_RSVD1 (30) | ||
298 | #define PMU_FALCON_REG_RSVD2 (31) | ||
299 | #define PMU_FALCON_REG_SIZE (32) | ||
300 | |||
301 | /* Choices for pmu_state */ | ||
302 | #define PMU_STATE_OFF 0 /* PMU is off */ | ||
303 | #define PMU_STATE_STARTING 1 /* PMU is on, but not booted */ | ||
304 | #define PMU_STATE_INIT_RECEIVED 2 /* PMU init message received */ | ||
305 | #define PMU_STATE_ELPG_BOOTING 3 /* PMU is booting */ | ||
306 | #define PMU_STATE_ELPG_BOOTED 4 /* ELPG is initialized */ | ||
307 | #define PMU_STATE_LOADING_PG_BUF 5 /* Loading PG buf */ | ||
308 | #define PMU_STATE_LOADING_ZBC 6 /* Loading ZBC buf */ | ||
309 | #define PMU_STATE_STARTED 7 /* Fully unitialized */ | ||
310 | #define PMU_STATE_EXIT 8 /* Exit PMU state machine */ | ||
311 | |||
312 | struct nvgpu_pg_init { | ||
313 | bool state_change; | ||
314 | struct nvgpu_cond wq; | ||
315 | struct nvgpu_thread state_task; | ||
316 | }; | ||
317 | |||
318 | struct pmu_gk20a { | ||
319 | |||
320 | union { | ||
321 | struct pmu_ucode_desc *desc; | ||
322 | struct pmu_ucode_desc_v1 *desc_v1; | ||
323 | }; | ||
324 | struct nvgpu_mem ucode; | ||
325 | |||
326 | struct nvgpu_mem pg_buf; | ||
327 | /* TBD: remove this if ZBC seq is fixed */ | ||
328 | struct nvgpu_mem seq_buf; | ||
329 | struct nvgpu_mem trace_buf; | ||
330 | struct nvgpu_mem wpr_buf; | ||
331 | bool buf_loaded; | ||
332 | |||
333 | struct pmu_sha1_gid gid_info; | ||
334 | |||
335 | struct pmu_queue queue[PMU_QUEUE_COUNT]; | ||
336 | |||
337 | struct pmu_sequence *seq; | ||
338 | unsigned long pmu_seq_tbl[PMU_SEQ_TBL_SIZE]; | ||
339 | u32 next_seq_desc; | ||
340 | |||
341 | struct pmu_mutex *mutex; | ||
342 | u32 mutex_cnt; | ||
343 | |||
344 | struct nvgpu_mutex pmu_copy_lock; | ||
345 | struct nvgpu_mutex pmu_seq_lock; | ||
346 | |||
347 | struct nvgpu_allocator dmem; | ||
348 | |||
349 | u32 *ucode_image; | ||
350 | bool pmu_ready; | ||
351 | |||
352 | u32 zbc_save_done; | ||
353 | |||
354 | u32 stat_dmem_offset[PMU_PG_ELPG_ENGINE_ID_INVALID_ENGINE]; | ||
355 | |||
356 | u32 elpg_stat; | ||
357 | |||
358 | u32 mscg_stat; | ||
359 | u32 mscg_transition_state; | ||
360 | |||
361 | int pmu_state; | ||
362 | |||
363 | #define PMU_ELPG_ENABLE_ALLOW_DELAY_MSEC 1 /* msec */ | ||
364 | struct nvgpu_pg_init pg_init; | ||
365 | struct nvgpu_mutex pg_mutex; /* protect pg-RPPG/MSCG enable/disable */ | ||
366 | struct nvgpu_mutex elpg_mutex; /* protect elpg enable/disable */ | ||
367 | int elpg_refcnt; /* disable -1, enable +1, <=0 elpg disabled, > 0 elpg enabled */ | ||
368 | |||
369 | union { | ||
370 | struct pmu_perfmon_counter_v2 perfmon_counter_v2; | ||
371 | struct pmu_perfmon_counter_v0 perfmon_counter_v0; | ||
372 | }; | ||
373 | u32 perfmon_state_id[PMU_DOMAIN_GROUP_NUM]; | ||
374 | |||
375 | bool initialized; | ||
376 | |||
377 | void (*remove_support)(struct pmu_gk20a *pmu); | ||
378 | bool sw_ready; | ||
379 | bool perfmon_ready; | ||
380 | |||
381 | u32 sample_buffer; | ||
382 | u32 load_shadow; | ||
383 | u32 load_avg; | ||
384 | |||
385 | struct nvgpu_mutex isr_mutex; | ||
386 | bool isr_enabled; | ||
387 | |||
388 | bool zbc_ready; | ||
389 | union { | ||
390 | struct pmu_cmdline_args_v0 args_v0; | ||
391 | struct pmu_cmdline_args_v1 args_v1; | ||
392 | struct pmu_cmdline_args_v2 args_v2; | ||
393 | struct pmu_cmdline_args_v3 args_v3; | ||
394 | struct pmu_cmdline_args_v4 args_v4; | ||
395 | struct pmu_cmdline_args_v5 args_v5; | ||
396 | }; | ||
397 | unsigned long perfmon_events_cnt; | ||
398 | bool perfmon_sampling_enabled; | ||
399 | u8 pmu_mode; /*Added for GM20b, and ACR*/ | ||
400 | u32 falcon_id; | ||
401 | u32 aelpg_param[5]; | ||
402 | u32 override_done; | ||
403 | |||
404 | struct nvgpu_firmware *fw; | ||
405 | }; | ||
406 | |||
407 | int gk20a_init_pmu_support(struct gk20a *g); | 119 | int gk20a_init_pmu_support(struct gk20a *g); |
408 | int gk20a_init_pmu_bind_fecs(struct gk20a *g); | 120 | int gk20a_init_pmu_bind_fecs(struct gk20a *g); |
409 | 121 | ||
@@ -426,8 +138,8 @@ void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries); | |||
426 | 138 | ||
427 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable); | 139 | int gk20a_pmu_perfmon_enable(struct gk20a *g, bool enable); |
428 | 140 | ||
429 | int pmu_mutex_acquire(struct pmu_gk20a *pmu, u32 id, u32 *token); | 141 | int pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token); |
430 | int pmu_mutex_release(struct pmu_gk20a *pmu, u32 id, u32 *token); | 142 | int pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token); |
431 | int gk20a_pmu_destroy(struct gk20a *g); | 143 | int gk20a_pmu_destroy(struct gk20a *g); |
432 | int gk20a_pmu_load_norm(struct gk20a *g, u32 *load); | 144 | int gk20a_pmu_load_norm(struct gk20a *g, u32 *load); |
433 | int gk20a_pmu_load_update(struct gk20a *g); | 145 | int gk20a_pmu_load_update(struct gk20a *g); |
@@ -436,33 +148,33 @@ void gk20a_pmu_get_load_counters(struct gk20a *g, u32 *busy_cycles, | |||
436 | u32 *total_cycles); | 148 | u32 *total_cycles); |
437 | void gk20a_init_pmu_ops(struct gpu_ops *gops); | 149 | void gk20a_init_pmu_ops(struct gpu_ops *gops); |
438 | 150 | ||
439 | void pmu_copy_to_dmem(struct pmu_gk20a *pmu, | 151 | void pmu_copy_to_dmem(struct nvgpu_pmu *pmu, |
440 | u32 dst, u8 *src, u32 size, u8 port); | 152 | u32 dst, u8 *src, u32 size, u8 port); |
441 | void pmu_copy_from_dmem(struct pmu_gk20a *pmu, | 153 | void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, |
442 | u32 src, u8 *dst, u32 size, u8 port); | 154 | u32 src, u8 *dst, u32 size, u8 port); |
443 | int pmu_reset(struct pmu_gk20a *pmu); | 155 | int pmu_reset(struct nvgpu_pmu *pmu); |
444 | int pmu_bootstrap(struct pmu_gk20a *pmu); | 156 | int pmu_bootstrap(struct nvgpu_pmu *pmu); |
445 | int gk20a_init_pmu(struct pmu_gk20a *pmu); | 157 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); |
446 | void pmu_dump_falcon_stats(struct pmu_gk20a *pmu); | 158 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); |
447 | void gk20a_remove_pmu_support(struct pmu_gk20a *pmu); | 159 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu); |
448 | void pmu_seq_init(struct pmu_gk20a *pmu); | 160 | void pmu_seq_init(struct nvgpu_pmu *pmu); |
449 | 161 | ||
450 | int gk20a_init_pmu(struct pmu_gk20a *pmu); | 162 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); |
451 | 163 | ||
452 | int gk20a_pmu_ap_send_command(struct gk20a *g, | 164 | int gk20a_pmu_ap_send_command(struct gk20a *g, |
453 | union pmu_ap_cmd *p_ap_cmd, bool b_block); | 165 | union pmu_ap_cmd *p_ap_cmd, bool b_block); |
454 | int gk20a_aelpg_init(struct gk20a *g); | 166 | int gk20a_aelpg_init(struct gk20a *g); |
455 | int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id); | 167 | int gk20a_aelpg_init_and_enable(struct gk20a *g, u8 ctrl_id); |
456 | void pmu_enable_irq(struct pmu_gk20a *pmu, bool enable); | 168 | void pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable); |
457 | int pmu_wait_message_cond(struct pmu_gk20a *pmu, u32 timeout_ms, | 169 | int pmu_wait_message_cond(struct nvgpu_pmu *pmu, u32 timeout_ms, |
458 | u32 *var, u32 val); | 170 | u32 *var, u32 val); |
459 | void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, | 171 | void pmu_handle_fecs_boot_acr_msg(struct gk20a *g, struct pmu_msg *msg, |
460 | void *param, u32 handle, u32 status); | 172 | void *param, u32 handle, u32 status); |
461 | void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, | 173 | void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, |
462 | struct pmu_pg_stats_data *pg_stat_data); | 174 | struct pmu_pg_stats_data *pg_stat_data); |
463 | int gk20a_pmu_reset(struct gk20a *g); | 175 | int gk20a_pmu_reset(struct gk20a *g); |
464 | int pmu_idle(struct pmu_gk20a *pmu); | 176 | int pmu_idle(struct nvgpu_pmu *pmu); |
465 | int pmu_enable_hw(struct pmu_gk20a *pmu, bool enable); | 177 | int pmu_enable_hw(struct nvgpu_pmu *pmu, bool enable); |
466 | 178 | ||
467 | void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem); | 179 | void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem); |
468 | void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, | 180 | void gk20a_pmu_surface_describe(struct gk20a *g, struct nvgpu_mem *mem, |
@@ -475,7 +187,7 @@ int gk20a_pmu_get_pg_stats(struct gk20a *g, | |||
475 | u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data); | 187 | u32 pg_engine_id, struct pmu_pg_stats_data *pg_stat_data); |
476 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); | 188 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos); |
477 | 189 | ||
478 | int nvgpu_pmu_perfmon_start_sampling(struct pmu_gk20a *pmu); | 190 | int nvgpu_pmu_perfmon_start_sampling(struct nvgpu_pmu *pmu); |
479 | int nvgpu_pmu_perfmon_stop_sampling(struct pmu_gk20a *pmu); | 191 | int nvgpu_pmu_perfmon_stop_sampling(struct nvgpu_pmu *pmu); |
480 | 192 | ||
481 | #endif /*__PMU_GK20A_H__*/ | 193 | #endif /*__PMU_GK20A_H__*/ |