diff options
author | Mahantesh Kumbar <mkumbar@nvidia.com> | 2017-05-12 04:28:43 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-06-13 05:40:16 -0400 |
commit | e9267bb3e5624825114fedc03d511035785f2fd7 (patch) | |
tree | 06b1e72c311f63c15254f5e13e33e363dea62d39 /drivers/gpu/nvgpu/gk20a | |
parent | 8c66aef3bdbfbbeb1d3c3ef3bd6b1bee3ac05411 (diff) |
gpu: nvgpu: reorganize PMU F/W support
- Moved pmu f/w related support from pmu_gk20a.c
to "drivers/gpu/nvgpu/common/pmu/pmu_fw.c" file
- Prepended with nvgpu_ for global functions & replaced
wherever used
- Moved below list related to PMU f/w
init/remove,
PMU version specific ops,
non-secure ucode blob prepare,
JIRA NVGPU-56
Change-Id: Ifdad8c560bd233e98728717d5868119e9d8e8d90
Signed-off-by: Mahantesh Kumbar <mkumbar@nvidia.com>
Reviewed-on: http://git-master/r/1480636
GVS: Gerrit_Virtual_Submit
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 2168 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | 22 |
2 files changed, 5 insertions, 2185 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index a9457330..247b38a5 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -35,249 +35,20 @@ | |||
35 | #include "nvgpu_gpuid_t19x.h" | 35 | #include "nvgpu_gpuid_t19x.h" |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #define GK20A_PMU_UCODE_IMAGE "gpmu_ucode.bin" | 38 | #define gk20a_dbg_pmu(fmt, arg...) \ |
39 | gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) | ||
39 | 40 | ||
40 | #define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 | 41 | #define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 |
41 | #define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 | 42 | #define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 |
42 | 43 | ||
43 | #define gk20a_dbg_pmu(fmt, arg...) \ | ||
44 | gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) | ||
45 | |||
46 | static void ap_callback_init_and_enable_ctrl( | 44 | static void ap_callback_init_and_enable_ctrl( |
47 | struct gk20a *g, struct pmu_msg *msg, | 45 | struct gk20a *g, struct pmu_msg *msg, |
48 | void *param, u32 seq_desc, u32 status); | 46 | void *param, u32 seq_desc, u32 status); |
49 | 47 | ||
50 | static u32 pmu_perfmon_cntr_sz_v0(struct nvgpu_pmu *pmu) | ||
51 | { | ||
52 | return sizeof(struct pmu_perfmon_counter_v0); | ||
53 | } | ||
54 | |||
55 | static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) | ||
56 | { | ||
57 | return sizeof(struct pmu_perfmon_counter_v2); | ||
58 | } | ||
59 | |||
60 | static void *get_perfmon_cntr_ptr_v2(struct nvgpu_pmu *pmu) | ||
61 | { | ||
62 | return (void *)(&pmu->perfmon_counter_v2); | ||
63 | } | ||
64 | |||
65 | static void *get_perfmon_cntr_ptr_v0(struct nvgpu_pmu *pmu) | ||
66 | { | ||
67 | return (void *)(&pmu->perfmon_counter_v0); | ||
68 | } | ||
69 | |||
70 | static void set_perfmon_cntr_ut_v2(struct nvgpu_pmu *pmu, u16 ut) | ||
71 | { | ||
72 | pmu->perfmon_counter_v2.upper_threshold = ut; | ||
73 | } | ||
74 | |||
75 | static void set_perfmon_cntr_ut_v0(struct nvgpu_pmu *pmu, u16 ut) | ||
76 | { | ||
77 | pmu->perfmon_counter_v0.upper_threshold = ut; | ||
78 | } | ||
79 | |||
80 | static void set_perfmon_cntr_lt_v2(struct nvgpu_pmu *pmu, u16 lt) | ||
81 | { | ||
82 | pmu->perfmon_counter_v2.lower_threshold = lt; | ||
83 | } | ||
84 | |||
85 | static void set_perfmon_cntr_lt_v0(struct nvgpu_pmu *pmu, u16 lt) | ||
86 | { | ||
87 | pmu->perfmon_counter_v0.lower_threshold = lt; | ||
88 | } | ||
89 | |||
90 | static void set_perfmon_cntr_valid_v2(struct nvgpu_pmu *pmu, u8 valid) | ||
91 | { | ||
92 | pmu->perfmon_counter_v2.valid = valid; | ||
93 | } | ||
94 | |||
95 | static void set_perfmon_cntr_valid_v0(struct nvgpu_pmu *pmu, u8 valid) | ||
96 | { | ||
97 | pmu->perfmon_counter_v0.valid = valid; | ||
98 | } | ||
99 | |||
100 | static void set_perfmon_cntr_index_v2(struct nvgpu_pmu *pmu, u8 index) | ||
101 | { | ||
102 | pmu->perfmon_counter_v2.index = index; | ||
103 | } | ||
104 | |||
105 | static void set_perfmon_cntr_index_v0(struct nvgpu_pmu *pmu, u8 index) | ||
106 | { | ||
107 | pmu->perfmon_counter_v0.index = index; | ||
108 | } | ||
109 | |||
110 | static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid) | ||
111 | { | ||
112 | pmu->perfmon_counter_v2.group_id = gid; | ||
113 | } | ||
114 | |||
115 | static void set_perfmon_cntr_group_id_v0(struct nvgpu_pmu *pmu, u8 gid) | ||
116 | { | ||
117 | pmu->perfmon_counter_v0.group_id = gid; | ||
118 | } | ||
119 | |||
120 | static u32 pmu_cmdline_size_v0(struct nvgpu_pmu *pmu) | ||
121 | { | ||
122 | return sizeof(struct pmu_cmdline_args_v0); | ||
123 | } | ||
124 | |||
125 | static u32 pmu_cmdline_size_v1(struct nvgpu_pmu *pmu) | ||
126 | { | ||
127 | return sizeof(struct pmu_cmdline_args_v1); | ||
128 | } | ||
129 | |||
130 | static u32 pmu_cmdline_size_v2(struct nvgpu_pmu *pmu) | ||
131 | { | ||
132 | return sizeof(struct pmu_cmdline_args_v2); | ||
133 | } | ||
134 | |||
135 | static void set_pmu_cmdline_args_cpufreq_v2(struct nvgpu_pmu *pmu, u32 freq) | ||
136 | { | ||
137 | pmu->args_v2.cpu_freq_hz = freq; | ||
138 | } | ||
139 | static void set_pmu_cmdline_args_secure_mode_v2(struct nvgpu_pmu *pmu, u32 val) | ||
140 | { | ||
141 | pmu->args_v2.secure_mode = val; | ||
142 | } | ||
143 | |||
144 | static void set_pmu_cmdline_args_falctracesize_v2( | ||
145 | struct nvgpu_pmu *pmu, u32 size) | ||
146 | { | ||
147 | pmu->args_v2.falc_trace_size = size; | ||
148 | } | ||
149 | |||
150 | static void set_pmu_cmdline_args_falctracedmabase_v2(struct nvgpu_pmu *pmu) | ||
151 | { | ||
152 | pmu->args_v2.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
153 | } | ||
154 | |||
155 | static void set_pmu_cmdline_args_falctracedmaidx_v2( | ||
156 | struct nvgpu_pmu *pmu, u32 idx) | ||
157 | { | ||
158 | pmu->args_v2.falc_trace_dma_idx = idx; | ||
159 | } | ||
160 | |||
161 | |||
162 | static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) | ||
163 | { | ||
164 | pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
165 | pmu->args_v4.dma_addr.dma_base1 = 0; | ||
166 | pmu->args_v4.dma_addr.dma_offset = 0; | ||
167 | } | ||
168 | |||
169 | static u32 pmu_cmdline_size_v4(struct nvgpu_pmu *pmu) | ||
170 | { | ||
171 | return sizeof(struct pmu_cmdline_args_v4); | ||
172 | } | ||
173 | |||
174 | static void set_pmu_cmdline_args_cpufreq_v4(struct nvgpu_pmu *pmu, u32 freq) | ||
175 | { | ||
176 | pmu->args_v4.cpu_freq_hz = freq; | ||
177 | } | ||
178 | static void set_pmu_cmdline_args_secure_mode_v4(struct nvgpu_pmu *pmu, u32 val) | ||
179 | { | ||
180 | pmu->args_v4.secure_mode = val; | ||
181 | } | ||
182 | |||
183 | static void set_pmu_cmdline_args_falctracesize_v4( | ||
184 | struct nvgpu_pmu *pmu, u32 size) | ||
185 | { | ||
186 | pmu->args_v4.falc_trace_size = size; | ||
187 | } | ||
188 | static void set_pmu_cmdline_args_falctracedmaidx_v4( | ||
189 | struct nvgpu_pmu *pmu, u32 idx) | ||
190 | { | ||
191 | pmu->args_v4.falc_trace_dma_idx = idx; | ||
192 | } | ||
193 | |||
194 | static u32 pmu_cmdline_size_v5(struct nvgpu_pmu *pmu) | ||
195 | { | ||
196 | return sizeof(struct pmu_cmdline_args_v5); | ||
197 | } | ||
198 | |||
199 | static u32 pmu_cmdline_size_v6(struct nvgpu_pmu *pmu) | ||
200 | { | ||
201 | return sizeof(struct pmu_cmdline_args_v6); | ||
202 | } | ||
203 | |||
204 | static void set_pmu_cmdline_args_cpufreq_v5(struct nvgpu_pmu *pmu, u32 freq) | ||
205 | { | ||
206 | pmu->args_v5.cpu_freq_hz = 204000000; | ||
207 | } | ||
208 | static void set_pmu_cmdline_args_secure_mode_v5(struct nvgpu_pmu *pmu, u32 val) | ||
209 | { | ||
210 | pmu->args_v5.secure_mode = val; | ||
211 | } | ||
212 | |||
213 | static void set_pmu_cmdline_args_falctracesize_v5( | ||
214 | struct nvgpu_pmu *pmu, u32 size) | ||
215 | { | ||
216 | /* set by surface describe */ | ||
217 | } | ||
218 | |||
219 | static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu) | ||
220 | { | ||
221 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
222 | |||
223 | nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf); | ||
224 | } | ||
225 | |||
226 | static void set_pmu_cmdline_args_falctracedmaidx_v5( | ||
227 | struct nvgpu_pmu *pmu, u32 idx) | ||
228 | { | ||
229 | /* set by surface describe */ | ||
230 | } | ||
231 | |||
232 | static u32 pmu_cmdline_size_v3(struct nvgpu_pmu *pmu) | ||
233 | { | ||
234 | return sizeof(struct pmu_cmdline_args_v3); | ||
235 | } | ||
236 | |||
237 | static void set_pmu_cmdline_args_cpufreq_v3(struct nvgpu_pmu *pmu, u32 freq) | ||
238 | { | ||
239 | pmu->args_v3.cpu_freq_hz = freq; | ||
240 | } | ||
241 | static void set_pmu_cmdline_args_secure_mode_v3(struct nvgpu_pmu *pmu, u32 val) | ||
242 | { | ||
243 | pmu->args_v3.secure_mode = val; | ||
244 | } | ||
245 | |||
246 | static void set_pmu_cmdline_args_falctracesize_v3( | ||
247 | struct nvgpu_pmu *pmu, u32 size) | ||
248 | { | ||
249 | pmu->args_v3.falc_trace_size = size; | ||
250 | } | ||
251 | |||
252 | static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) | ||
253 | { | ||
254 | pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
255 | } | ||
256 | |||
257 | static void set_pmu_cmdline_args_falctracedmaidx_v3( | ||
258 | struct nvgpu_pmu *pmu, u32 idx) | ||
259 | { | ||
260 | pmu->args_v3.falc_trace_dma_idx = idx; | ||
261 | } | ||
262 | |||
263 | static void set_pmu_cmdline_args_cpufreq_v1(struct nvgpu_pmu *pmu, u32 freq) | ||
264 | { | ||
265 | pmu->args_v1.cpu_freq_hz = freq; | ||
266 | } | ||
267 | static void set_pmu_cmdline_args_secure_mode_v1(struct nvgpu_pmu *pmu, u32 val) | ||
268 | { | ||
269 | pmu->args_v1.secure_mode = val; | ||
270 | } | ||
271 | |||
272 | static void set_pmu_cmdline_args_falctracesize_v1( | ||
273 | struct nvgpu_pmu *pmu, u32 size) | ||
274 | { | ||
275 | pmu->args_v1.falc_trace_size = size; | ||
276 | } | ||
277 | |||
278 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) | 48 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) |
279 | { | 49 | { |
280 | u32 i = 0, j = strlen(strings); | 50 | u32 i = 0, j = strlen(strings); |
51 | |||
281 | for (; i < j; i++) { | 52 | for (; i < j; i++) { |
282 | if (strings[i] == '%') | 53 | if (strings[i] == '%') |
283 | if (strings[i + 1] == 'x' || strings[i + 1] == 'X') { | 54 | if (strings[i + 1] == 'x' || strings[i + 1] == 'X') { |
@@ -330,1885 +101,13 @@ static void printtrace(struct nvgpu_pmu *pmu) | |||
330 | l++; | 101 | l++; |
331 | m += k + 2; | 102 | m += k + 2; |
332 | } | 103 | } |
104 | |||
333 | scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); | 105 | scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); |
334 | nvgpu_err(g, "%s", buf); | 106 | nvgpu_err(g, "%s", buf); |
335 | } | 107 | } |
336 | nvgpu_kfree(g, tracebuffer); | 108 | nvgpu_kfree(g, tracebuffer); |
337 | } | 109 | } |
338 | 110 | ||
339 | static void set_pmu_cmdline_args_falctracedmabase_v1(struct nvgpu_pmu *pmu) | ||
340 | { | ||
341 | pmu->args_v1.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
342 | } | ||
343 | |||
344 | static void set_pmu_cmdline_args_falctracedmaidx_v1( | ||
345 | struct nvgpu_pmu *pmu, u32 idx) | ||
346 | { | ||
347 | pmu->args_v1.falc_trace_dma_idx = idx; | ||
348 | } | ||
349 | |||
350 | static void set_pmu_cmdline_args_cpufreq_v0(struct nvgpu_pmu *pmu, u32 freq) | ||
351 | { | ||
352 | pmu->args_v0.cpu_freq_hz = freq; | ||
353 | } | ||
354 | |||
355 | static void *get_pmu_cmdline_args_ptr_v4(struct nvgpu_pmu *pmu) | ||
356 | { | ||
357 | return (void *)(&pmu->args_v4); | ||
358 | } | ||
359 | |||
360 | static void *get_pmu_cmdline_args_ptr_v3(struct nvgpu_pmu *pmu) | ||
361 | { | ||
362 | return (void *)(&pmu->args_v3); | ||
363 | } | ||
364 | |||
365 | static void *get_pmu_cmdline_args_ptr_v2(struct nvgpu_pmu *pmu) | ||
366 | { | ||
367 | return (void *)(&pmu->args_v2); | ||
368 | } | ||
369 | |||
370 | static void *get_pmu_cmdline_args_ptr_v5(struct nvgpu_pmu *pmu) | ||
371 | { | ||
372 | return (void *)(&pmu->args_v5); | ||
373 | } | ||
374 | static void *get_pmu_cmdline_args_ptr_v1(struct nvgpu_pmu *pmu) | ||
375 | { | ||
376 | return (void *)(&pmu->args_v1); | ||
377 | } | ||
378 | |||
379 | static void *get_pmu_cmdline_args_ptr_v0(struct nvgpu_pmu *pmu) | ||
380 | { | ||
381 | return (void *)(&pmu->args_v0); | ||
382 | } | ||
383 | |||
384 | static u32 get_pmu_allocation_size_v3(struct nvgpu_pmu *pmu) | ||
385 | { | ||
386 | return sizeof(struct pmu_allocation_v3); | ||
387 | } | ||
388 | |||
389 | static u32 get_pmu_allocation_size_v2(struct nvgpu_pmu *pmu) | ||
390 | { | ||
391 | return sizeof(struct pmu_allocation_v2); | ||
392 | } | ||
393 | |||
394 | static u32 get_pmu_allocation_size_v1(struct nvgpu_pmu *pmu) | ||
395 | { | ||
396 | return sizeof(struct pmu_allocation_v1); | ||
397 | } | ||
398 | |||
399 | static u32 get_pmu_allocation_size_v0(struct nvgpu_pmu *pmu) | ||
400 | { | ||
401 | return sizeof(struct pmu_allocation_v0); | ||
402 | } | ||
403 | |||
404 | static void set_pmu_allocation_ptr_v3(struct nvgpu_pmu *pmu, | ||
405 | void **pmu_alloc_ptr, void *assign_ptr) | ||
406 | { | ||
407 | struct pmu_allocation_v3 **pmu_a_ptr = | ||
408 | (struct pmu_allocation_v3 **)pmu_alloc_ptr; | ||
409 | *pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr; | ||
410 | } | ||
411 | |||
412 | static void set_pmu_allocation_ptr_v2(struct nvgpu_pmu *pmu, | ||
413 | void **pmu_alloc_ptr, void *assign_ptr) | ||
414 | { | ||
415 | struct pmu_allocation_v2 **pmu_a_ptr = | ||
416 | (struct pmu_allocation_v2 **)pmu_alloc_ptr; | ||
417 | *pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr; | ||
418 | } | ||
419 | |||
420 | static void set_pmu_allocation_ptr_v1(struct nvgpu_pmu *pmu, | ||
421 | void **pmu_alloc_ptr, void *assign_ptr) | ||
422 | { | ||
423 | struct pmu_allocation_v1 **pmu_a_ptr = | ||
424 | (struct pmu_allocation_v1 **)pmu_alloc_ptr; | ||
425 | *pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr; | ||
426 | } | ||
427 | |||
428 | static void set_pmu_allocation_ptr_v0(struct nvgpu_pmu *pmu, | ||
429 | void **pmu_alloc_ptr, void *assign_ptr) | ||
430 | { | ||
431 | struct pmu_allocation_v0 **pmu_a_ptr = | ||
432 | (struct pmu_allocation_v0 **)pmu_alloc_ptr; | ||
433 | *pmu_a_ptr = (struct pmu_allocation_v0 *)assign_ptr; | ||
434 | } | ||
435 | |||
436 | static void pmu_allocation_set_dmem_size_v3(struct nvgpu_pmu *pmu, | ||
437 | void *pmu_alloc_ptr, u16 size) | ||
438 | { | ||
439 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
440 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
441 | pmu_a_ptr->alloc.dmem.size = size; | ||
442 | } | ||
443 | |||
444 | static void pmu_allocation_set_dmem_size_v2(struct nvgpu_pmu *pmu, | ||
445 | void *pmu_alloc_ptr, u16 size) | ||
446 | { | ||
447 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
448 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
449 | pmu_a_ptr->alloc.dmem.size = size; | ||
450 | } | ||
451 | |||
452 | static void pmu_allocation_set_dmem_size_v1(struct nvgpu_pmu *pmu, | ||
453 | void *pmu_alloc_ptr, u16 size) | ||
454 | { | ||
455 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
456 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
457 | pmu_a_ptr->alloc.dmem.size = size; | ||
458 | } | ||
459 | |||
460 | static void pmu_allocation_set_dmem_size_v0(struct nvgpu_pmu *pmu, | ||
461 | void *pmu_alloc_ptr, u16 size) | ||
462 | { | ||
463 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
464 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
465 | pmu_a_ptr->alloc.dmem.size = size; | ||
466 | } | ||
467 | |||
468 | static u16 pmu_allocation_get_dmem_size_v3(struct nvgpu_pmu *pmu, | ||
469 | void *pmu_alloc_ptr) | ||
470 | { | ||
471 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
472 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
473 | return pmu_a_ptr->alloc.dmem.size; | ||
474 | } | ||
475 | |||
476 | static u16 pmu_allocation_get_dmem_size_v2(struct nvgpu_pmu *pmu, | ||
477 | void *pmu_alloc_ptr) | ||
478 | { | ||
479 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
480 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
481 | return pmu_a_ptr->alloc.dmem.size; | ||
482 | } | ||
483 | |||
484 | static u16 pmu_allocation_get_dmem_size_v1(struct nvgpu_pmu *pmu, | ||
485 | void *pmu_alloc_ptr) | ||
486 | { | ||
487 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
488 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
489 | return pmu_a_ptr->alloc.dmem.size; | ||
490 | } | ||
491 | |||
492 | static u16 pmu_allocation_get_dmem_size_v0(struct nvgpu_pmu *pmu, | ||
493 | void *pmu_alloc_ptr) | ||
494 | { | ||
495 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
496 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
497 | return pmu_a_ptr->alloc.dmem.size; | ||
498 | } | ||
499 | |||
500 | static u32 pmu_allocation_get_dmem_offset_v3(struct nvgpu_pmu *pmu, | ||
501 | void *pmu_alloc_ptr) | ||
502 | { | ||
503 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
504 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
505 | return pmu_a_ptr->alloc.dmem.offset; | ||
506 | } | ||
507 | |||
508 | static u32 pmu_allocation_get_dmem_offset_v2(struct nvgpu_pmu *pmu, | ||
509 | void *pmu_alloc_ptr) | ||
510 | { | ||
511 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
512 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
513 | return pmu_a_ptr->alloc.dmem.offset; | ||
514 | } | ||
515 | |||
516 | static u32 pmu_allocation_get_dmem_offset_v1(struct nvgpu_pmu *pmu, | ||
517 | void *pmu_alloc_ptr) | ||
518 | { | ||
519 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
520 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
521 | return pmu_a_ptr->alloc.dmem.offset; | ||
522 | } | ||
523 | |||
524 | static u32 pmu_allocation_get_dmem_offset_v0(struct nvgpu_pmu *pmu, | ||
525 | void *pmu_alloc_ptr) | ||
526 | { | ||
527 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
528 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
529 | return pmu_a_ptr->alloc.dmem.offset; | ||
530 | } | ||
531 | |||
532 | static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct nvgpu_pmu *pmu, | ||
533 | void *pmu_alloc_ptr) | ||
534 | { | ||
535 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
536 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
537 | return &pmu_a_ptr->alloc.dmem.offset; | ||
538 | } | ||
539 | |||
540 | static void *pmu_allocation_get_fb_addr_v3( | ||
541 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) | ||
542 | { | ||
543 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
544 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
545 | return (void *)&pmu_a_ptr->alloc.fb; | ||
546 | } | ||
547 | |||
548 | static u32 pmu_allocation_get_fb_size_v3( | ||
549 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) | ||
550 | { | ||
551 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
552 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
553 | return sizeof(pmu_a_ptr->alloc.fb); | ||
554 | } | ||
555 | |||
556 | static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct nvgpu_pmu *pmu, | ||
557 | void *pmu_alloc_ptr) | ||
558 | { | ||
559 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
560 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
561 | return &pmu_a_ptr->alloc.dmem.offset; | ||
562 | } | ||
563 | |||
564 | static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct nvgpu_pmu *pmu, | ||
565 | void *pmu_alloc_ptr) | ||
566 | { | ||
567 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
568 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
569 | return &pmu_a_ptr->alloc.dmem.offset; | ||
570 | } | ||
571 | |||
572 | static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct nvgpu_pmu *pmu, | ||
573 | void *pmu_alloc_ptr) | ||
574 | { | ||
575 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
576 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
577 | return &pmu_a_ptr->alloc.dmem.offset; | ||
578 | } | ||
579 | |||
580 | static void pmu_allocation_set_dmem_offset_v3(struct nvgpu_pmu *pmu, | ||
581 | void *pmu_alloc_ptr, u32 offset) | ||
582 | { | ||
583 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
584 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
585 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
586 | } | ||
587 | |||
588 | static void pmu_allocation_set_dmem_offset_v2(struct nvgpu_pmu *pmu, | ||
589 | void *pmu_alloc_ptr, u32 offset) | ||
590 | { | ||
591 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
592 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
593 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
594 | } | ||
595 | |||
596 | static void pmu_allocation_set_dmem_offset_v1(struct nvgpu_pmu *pmu, | ||
597 | void *pmu_alloc_ptr, u32 offset) | ||
598 | { | ||
599 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
600 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
601 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
602 | } | ||
603 | |||
604 | static void pmu_allocation_set_dmem_offset_v0(struct nvgpu_pmu *pmu, | ||
605 | void *pmu_alloc_ptr, u32 offset) | ||
606 | { | ||
607 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
608 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
609 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
610 | } | ||
611 | |||
612 | static void *get_pmu_msg_pmu_init_msg_ptr_v4(struct pmu_init_msg *init) | ||
613 | { | ||
614 | return (void *)(&(init->pmu_init_v4)); | ||
615 | } | ||
616 | |||
617 | static void *get_pmu_msg_pmu_init_msg_ptr_v3(struct pmu_init_msg *init) | ||
618 | { | ||
619 | return (void *)(&(init->pmu_init_v3)); | ||
620 | } | ||
621 | |||
622 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v4(union pmu_init_msg_pmu *init_msg) | ||
623 | { | ||
624 | struct pmu_init_msg_pmu_v4 *init = | ||
625 | (struct pmu_init_msg_pmu_v4 *)(&init_msg->v4); | ||
626 | |||
627 | return init->sw_managed_area_offset; | ||
628 | } | ||
629 | |||
630 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v3(union pmu_init_msg_pmu *init_msg) | ||
631 | { | ||
632 | struct pmu_init_msg_pmu_v3 *init = | ||
633 | (struct pmu_init_msg_pmu_v3 *)(&init_msg->v3); | ||
634 | |||
635 | return init->sw_managed_area_offset; | ||
636 | } | ||
637 | |||
638 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v4(union pmu_init_msg_pmu *init_msg) | ||
639 | { | ||
640 | struct pmu_init_msg_pmu_v4 *init = | ||
641 | (struct pmu_init_msg_pmu_v4 *)(&init_msg->v4); | ||
642 | |||
643 | return init->sw_managed_area_size; | ||
644 | } | ||
645 | |||
646 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v3(union pmu_init_msg_pmu *init_msg) | ||
647 | { | ||
648 | struct pmu_init_msg_pmu_v3 *init = | ||
649 | (struct pmu_init_msg_pmu_v3 *)(&init_msg->v3); | ||
650 | |||
651 | return init->sw_managed_area_size; | ||
652 | } | ||
653 | |||
654 | static void *get_pmu_msg_pmu_init_msg_ptr_v2(struct pmu_init_msg *init) | ||
655 | { | ||
656 | return (void *)(&(init->pmu_init_v2)); | ||
657 | } | ||
658 | |||
659 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v2(union pmu_init_msg_pmu *init_msg) | ||
660 | { | ||
661 | struct pmu_init_msg_pmu_v2 *init = | ||
662 | (struct pmu_init_msg_pmu_v2 *)(&init_msg->v1); | ||
663 | return init->sw_managed_area_offset; | ||
664 | } | ||
665 | |||
666 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v2(union pmu_init_msg_pmu *init_msg) | ||
667 | { | ||
668 | struct pmu_init_msg_pmu_v2 *init = | ||
669 | (struct pmu_init_msg_pmu_v2 *)(&init_msg->v1); | ||
670 | return init->sw_managed_area_size; | ||
671 | } | ||
672 | |||
673 | static void *get_pmu_msg_pmu_init_msg_ptr_v1(struct pmu_init_msg *init) | ||
674 | { | ||
675 | return (void *)(&(init->pmu_init_v1)); | ||
676 | } | ||
677 | |||
678 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v1(union pmu_init_msg_pmu *init_msg) | ||
679 | { | ||
680 | struct pmu_init_msg_pmu_v1 *init = | ||
681 | (struct pmu_init_msg_pmu_v1 *)(&init_msg->v1); | ||
682 | return init->sw_managed_area_offset; | ||
683 | } | ||
684 | |||
685 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v1(union pmu_init_msg_pmu *init_msg) | ||
686 | { | ||
687 | struct pmu_init_msg_pmu_v1 *init = | ||
688 | (struct pmu_init_msg_pmu_v1 *)(&init_msg->v1); | ||
689 | return init->sw_managed_area_size; | ||
690 | } | ||
691 | |||
692 | static void *get_pmu_msg_pmu_init_msg_ptr_v0(struct pmu_init_msg *init) | ||
693 | { | ||
694 | return (void *)(&(init->pmu_init_v0)); | ||
695 | } | ||
696 | |||
697 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v0(union pmu_init_msg_pmu *init_msg) | ||
698 | { | ||
699 | struct pmu_init_msg_pmu_v0 *init = | ||
700 | (struct pmu_init_msg_pmu_v0 *)(&init_msg->v0); | ||
701 | return init->sw_managed_area_offset; | ||
702 | } | ||
703 | |||
704 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v0(union pmu_init_msg_pmu *init_msg) | ||
705 | { | ||
706 | struct pmu_init_msg_pmu_v0 *init = | ||
707 | (struct pmu_init_msg_pmu_v0 *)(&init_msg->v0); | ||
708 | return init->sw_managed_area_size; | ||
709 | } | ||
710 | |||
711 | static u32 get_pmu_perfmon_cmd_start_size_v3(void) | ||
712 | { | ||
713 | return sizeof(struct pmu_perfmon_cmd_start_v3); | ||
714 | } | ||
715 | |||
716 | static u32 get_pmu_perfmon_cmd_start_size_v2(void) | ||
717 | { | ||
718 | return sizeof(struct pmu_perfmon_cmd_start_v2); | ||
719 | } | ||
720 | |||
721 | static u32 get_pmu_perfmon_cmd_start_size_v1(void) | ||
722 | { | ||
723 | return sizeof(struct pmu_perfmon_cmd_start_v1); | ||
724 | } | ||
725 | |||
726 | static u32 get_pmu_perfmon_cmd_start_size_v0(void) | ||
727 | { | ||
728 | return sizeof(struct pmu_perfmon_cmd_start_v0); | ||
729 | } | ||
730 | |||
731 | static int get_perfmon_cmd_start_offsetofvar_v3( | ||
732 | enum pmu_perfmon_cmd_start_fields field) | ||
733 | { | ||
734 | switch (field) { | ||
735 | case COUNTER_ALLOC: | ||
736 | return offsetof(struct pmu_perfmon_cmd_start_v3, | ||
737 | counter_alloc); | ||
738 | default: | ||
739 | return -EINVAL; | ||
740 | } | ||
741 | |||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | static int get_perfmon_cmd_start_offsetofvar_v2( | ||
746 | enum pmu_perfmon_cmd_start_fields field) | ||
747 | { | ||
748 | switch (field) { | ||
749 | case COUNTER_ALLOC: | ||
750 | return offsetof(struct pmu_perfmon_cmd_start_v2, | ||
751 | counter_alloc); | ||
752 | default: | ||
753 | return -EINVAL; | ||
754 | } | ||
755 | |||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | static int get_perfmon_cmd_start_offsetofvar_v1( | ||
760 | enum pmu_perfmon_cmd_start_fields field) | ||
761 | { | ||
762 | switch (field) { | ||
763 | case COUNTER_ALLOC: | ||
764 | return offsetof(struct pmu_perfmon_cmd_start_v1, | ||
765 | counter_alloc); | ||
766 | default: | ||
767 | return -EINVAL; | ||
768 | } | ||
769 | |||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | static int get_perfmon_cmd_start_offsetofvar_v0( | ||
774 | enum pmu_perfmon_cmd_start_fields field) | ||
775 | { | ||
776 | switch (field) { | ||
777 | case COUNTER_ALLOC: | ||
778 | return offsetof(struct pmu_perfmon_cmd_start_v0, | ||
779 | counter_alloc); | ||
780 | default: | ||
781 | return -EINVAL; | ||
782 | break; | ||
783 | } | ||
784 | return 0; | ||
785 | } | ||
786 | |||
787 | static u32 get_pmu_perfmon_cmd_init_size_v3(void) | ||
788 | { | ||
789 | return sizeof(struct pmu_perfmon_cmd_init_v3); | ||
790 | } | ||
791 | |||
792 | static u32 get_pmu_perfmon_cmd_init_size_v2(void) | ||
793 | { | ||
794 | return sizeof(struct pmu_perfmon_cmd_init_v2); | ||
795 | } | ||
796 | |||
797 | static u32 get_pmu_perfmon_cmd_init_size_v1(void) | ||
798 | { | ||
799 | return sizeof(struct pmu_perfmon_cmd_init_v1); | ||
800 | } | ||
801 | |||
802 | static u32 get_pmu_perfmon_cmd_init_size_v0(void) | ||
803 | { | ||
804 | return sizeof(struct pmu_perfmon_cmd_init_v0); | ||
805 | } | ||
806 | |||
807 | static int get_perfmon_cmd_init_offsetofvar_v3( | ||
808 | enum pmu_perfmon_cmd_start_fields field) | ||
809 | { | ||
810 | switch (field) { | ||
811 | case COUNTER_ALLOC: | ||
812 | return offsetof(struct pmu_perfmon_cmd_init_v3, | ||
813 | counter_alloc); | ||
814 | default: | ||
815 | return -EINVAL; | ||
816 | } | ||
817 | return 0; | ||
818 | } | ||
819 | |||
820 | static int get_perfmon_cmd_init_offsetofvar_v2( | ||
821 | enum pmu_perfmon_cmd_start_fields field) | ||
822 | { | ||
823 | switch (field) { | ||
824 | case COUNTER_ALLOC: | ||
825 | return offsetof(struct pmu_perfmon_cmd_init_v2, | ||
826 | counter_alloc); | ||
827 | default: | ||
828 | return -EINVAL; | ||
829 | break; | ||
830 | } | ||
831 | return 0; | ||
832 | } | ||
833 | |||
834 | static int get_perfmon_cmd_init_offsetofvar_v1( | ||
835 | enum pmu_perfmon_cmd_start_fields field) | ||
836 | { | ||
837 | switch (field) { | ||
838 | case COUNTER_ALLOC: | ||
839 | return offsetof(struct pmu_perfmon_cmd_init_v1, | ||
840 | counter_alloc); | ||
841 | default: | ||
842 | return -EINVAL; | ||
843 | break; | ||
844 | } | ||
845 | return 0; | ||
846 | } | ||
847 | |||
848 | static int get_perfmon_cmd_init_offsetofvar_v0( | ||
849 | enum pmu_perfmon_cmd_start_fields field) | ||
850 | { | ||
851 | switch (field) { | ||
852 | case COUNTER_ALLOC: | ||
853 | return offsetof(struct pmu_perfmon_cmd_init_v0, | ||
854 | counter_alloc); | ||
855 | default: | ||
856 | return -EINVAL; | ||
857 | break; | ||
858 | } | ||
859 | return 0; | ||
860 | } | ||
861 | |||
862 | static void perfmon_start_set_cmd_type_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
863 | { | ||
864 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
865 | |||
866 | start->cmd_type = value; | ||
867 | } | ||
868 | |||
869 | static void perfmon_start_set_cmd_type_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
870 | { | ||
871 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
872 | start->cmd_type = value; | ||
873 | } | ||
874 | |||
875 | static void perfmon_start_set_cmd_type_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
876 | { | ||
877 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
878 | start->cmd_type = value; | ||
879 | } | ||
880 | |||
881 | static void perfmon_start_set_cmd_type_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
882 | { | ||
883 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
884 | start->cmd_type = value; | ||
885 | } | ||
886 | |||
887 | static void perfmon_start_set_group_id_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
888 | { | ||
889 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
890 | |||
891 | start->group_id = value; | ||
892 | } | ||
893 | |||
894 | static void perfmon_start_set_group_id_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
895 | { | ||
896 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
897 | start->group_id = value; | ||
898 | } | ||
899 | |||
900 | static void perfmon_start_set_group_id_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
901 | { | ||
902 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
903 | start->group_id = value; | ||
904 | } | ||
905 | |||
906 | static void perfmon_start_set_group_id_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
907 | { | ||
908 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
909 | start->group_id = value; | ||
910 | } | ||
911 | |||
912 | static void perfmon_start_set_state_id_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
913 | { | ||
914 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
915 | |||
916 | start->state_id = value; | ||
917 | } | ||
918 | |||
919 | static void perfmon_start_set_state_id_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
920 | { | ||
921 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
922 | start->state_id = value; | ||
923 | } | ||
924 | |||
925 | static void perfmon_start_set_state_id_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
926 | { | ||
927 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
928 | start->state_id = value; | ||
929 | } | ||
930 | |||
931 | static void perfmon_start_set_state_id_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
932 | { | ||
933 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
934 | start->state_id = value; | ||
935 | } | ||
936 | |||
937 | static void perfmon_start_set_flags_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
938 | { | ||
939 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
940 | |||
941 | start->flags = value; | ||
942 | } | ||
943 | |||
944 | static void perfmon_start_set_flags_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
945 | { | ||
946 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
947 | start->flags = value; | ||
948 | } | ||
949 | |||
950 | static void perfmon_start_set_flags_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
951 | { | ||
952 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
953 | start->flags = value; | ||
954 | } | ||
955 | |||
956 | static void perfmon_start_set_flags_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
957 | { | ||
958 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
959 | start->flags = value; | ||
960 | } | ||
961 | |||
962 | static u8 perfmon_start_get_flags_v3(struct pmu_perfmon_cmd *pc) | ||
963 | { | ||
964 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
965 | |||
966 | return start->flags; | ||
967 | } | ||
968 | |||
969 | static u8 perfmon_start_get_flags_v2(struct pmu_perfmon_cmd *pc) | ||
970 | { | ||
971 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
972 | return start->flags; | ||
973 | } | ||
974 | |||
975 | static u8 perfmon_start_get_flags_v1(struct pmu_perfmon_cmd *pc) | ||
976 | { | ||
977 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
978 | return start->flags; | ||
979 | } | ||
980 | |||
981 | static u8 perfmon_start_get_flags_v0(struct pmu_perfmon_cmd *pc) | ||
982 | { | ||
983 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
984 | return start->flags; | ||
985 | } | ||
986 | |||
987 | static void perfmon_cmd_init_set_sample_buffer_v3(struct pmu_perfmon_cmd *pc, | ||
988 | u16 value) | ||
989 | { | ||
990 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
991 | |||
992 | init->sample_buffer = value; | ||
993 | } | ||
994 | |||
995 | static void perfmon_cmd_init_set_sample_buffer_v2(struct pmu_perfmon_cmd *pc, | ||
996 | u16 value) | ||
997 | { | ||
998 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
999 | init->sample_buffer = value; | ||
1000 | } | ||
1001 | |||
1002 | |||
1003 | static void perfmon_cmd_init_set_sample_buffer_v1(struct pmu_perfmon_cmd *pc, | ||
1004 | u16 value) | ||
1005 | { | ||
1006 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1007 | init->sample_buffer = value; | ||
1008 | } | ||
1009 | |||
1010 | static void perfmon_cmd_init_set_sample_buffer_v0(struct pmu_perfmon_cmd *pc, | ||
1011 | u16 value) | ||
1012 | { | ||
1013 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1014 | init->sample_buffer = value; | ||
1015 | } | ||
1016 | |||
1017 | static void perfmon_cmd_init_set_dec_cnt_v3(struct pmu_perfmon_cmd *pc, | ||
1018 | u8 value) | ||
1019 | { | ||
1020 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1021 | |||
1022 | init->to_decrease_count = value; | ||
1023 | } | ||
1024 | |||
1025 | static void perfmon_cmd_init_set_dec_cnt_v2(struct pmu_perfmon_cmd *pc, | ||
1026 | u8 value) | ||
1027 | { | ||
1028 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1029 | init->to_decrease_count = value; | ||
1030 | } | ||
1031 | |||
1032 | static void perfmon_cmd_init_set_dec_cnt_v1(struct pmu_perfmon_cmd *pc, | ||
1033 | u8 value) | ||
1034 | { | ||
1035 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1036 | init->to_decrease_count = value; | ||
1037 | } | ||
1038 | |||
1039 | static void perfmon_cmd_init_set_dec_cnt_v0(struct pmu_perfmon_cmd *pc, | ||
1040 | u8 value) | ||
1041 | { | ||
1042 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1043 | init->to_decrease_count = value; | ||
1044 | } | ||
1045 | |||
1046 | static void perfmon_cmd_init_set_base_cnt_id_v3(struct pmu_perfmon_cmd *pc, | ||
1047 | u8 value) | ||
1048 | { | ||
1049 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1050 | |||
1051 | init->base_counter_id = value; | ||
1052 | } | ||
1053 | |||
1054 | static void perfmon_cmd_init_set_base_cnt_id_v2(struct pmu_perfmon_cmd *pc, | ||
1055 | u8 value) | ||
1056 | { | ||
1057 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1058 | init->base_counter_id = value; | ||
1059 | } | ||
1060 | |||
1061 | static void perfmon_cmd_init_set_base_cnt_id_v1(struct pmu_perfmon_cmd *pc, | ||
1062 | u8 value) | ||
1063 | { | ||
1064 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1065 | init->base_counter_id = value; | ||
1066 | } | ||
1067 | |||
1068 | static void perfmon_cmd_init_set_base_cnt_id_v0(struct pmu_perfmon_cmd *pc, | ||
1069 | u8 value) | ||
1070 | { | ||
1071 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1072 | init->base_counter_id = value; | ||
1073 | } | ||
1074 | |||
1075 | static void perfmon_cmd_init_set_samp_period_us_v3(struct pmu_perfmon_cmd *pc, | ||
1076 | u32 value) | ||
1077 | { | ||
1078 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1079 | |||
1080 | init->sample_period_us = value; | ||
1081 | } | ||
1082 | |||
1083 | static void perfmon_cmd_init_set_samp_period_us_v2(struct pmu_perfmon_cmd *pc, | ||
1084 | u32 value) | ||
1085 | { | ||
1086 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1087 | init->sample_period_us = value; | ||
1088 | } | ||
1089 | |||
1090 | static void perfmon_cmd_init_set_samp_period_us_v1(struct pmu_perfmon_cmd *pc, | ||
1091 | u32 value) | ||
1092 | { | ||
1093 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1094 | init->sample_period_us = value; | ||
1095 | } | ||
1096 | |||
1097 | static void perfmon_cmd_init_set_samp_period_us_v0(struct pmu_perfmon_cmd *pc, | ||
1098 | u32 value) | ||
1099 | { | ||
1100 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1101 | init->sample_period_us = value; | ||
1102 | } | ||
1103 | |||
1104 | static void perfmon_cmd_init_set_num_cnt_v3(struct pmu_perfmon_cmd *pc, | ||
1105 | u8 value) | ||
1106 | { | ||
1107 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1108 | |||
1109 | init->num_counters = value; | ||
1110 | } | ||
1111 | |||
1112 | static void perfmon_cmd_init_set_num_cnt_v2(struct pmu_perfmon_cmd *pc, | ||
1113 | u8 value) | ||
1114 | { | ||
1115 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1116 | init->num_counters = value; | ||
1117 | } | ||
1118 | |||
1119 | static void perfmon_cmd_init_set_num_cnt_v1(struct pmu_perfmon_cmd *pc, | ||
1120 | u8 value) | ||
1121 | { | ||
1122 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1123 | init->num_counters = value; | ||
1124 | } | ||
1125 | |||
1126 | static void perfmon_cmd_init_set_num_cnt_v0(struct pmu_perfmon_cmd *pc, | ||
1127 | u8 value) | ||
1128 | { | ||
1129 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1130 | init->num_counters = value; | ||
1131 | } | ||
1132 | |||
1133 | static void perfmon_cmd_init_set_mov_avg_v3(struct pmu_perfmon_cmd *pc, | ||
1134 | u8 value) | ||
1135 | { | ||
1136 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1137 | |||
1138 | init->samples_in_moving_avg = value; | ||
1139 | } | ||
1140 | |||
1141 | static void perfmon_cmd_init_set_mov_avg_v2(struct pmu_perfmon_cmd *pc, | ||
1142 | u8 value) | ||
1143 | { | ||
1144 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1145 | init->samples_in_moving_avg = value; | ||
1146 | } | ||
1147 | |||
1148 | static void perfmon_cmd_init_set_mov_avg_v1(struct pmu_perfmon_cmd *pc, | ||
1149 | u8 value) | ||
1150 | { | ||
1151 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1152 | init->samples_in_moving_avg = value; | ||
1153 | } | ||
1154 | |||
1155 | static void perfmon_cmd_init_set_mov_avg_v0(struct pmu_perfmon_cmd *pc, | ||
1156 | u8 value) | ||
1157 | { | ||
1158 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1159 | init->samples_in_moving_avg = value; | ||
1160 | } | ||
1161 | |||
1162 | static void get_pmu_init_msg_pmu_queue_params_v0(struct pmu_queue *queue, | ||
1163 | u32 id, void *pmu_init_msg) | ||
1164 | { | ||
1165 | struct pmu_init_msg_pmu_v0 *init = | ||
1166 | (struct pmu_init_msg_pmu_v0 *)pmu_init_msg; | ||
1167 | queue->index = init->queue_info[id].index; | ||
1168 | queue->offset = init->queue_info[id].offset; | ||
1169 | queue->size = init->queue_info[id].size; | ||
1170 | } | ||
1171 | |||
1172 | static void get_pmu_init_msg_pmu_queue_params_v1(struct pmu_queue *queue, | ||
1173 | u32 id, void *pmu_init_msg) | ||
1174 | { | ||
1175 | struct pmu_init_msg_pmu_v1 *init = | ||
1176 | (struct pmu_init_msg_pmu_v1 *)pmu_init_msg; | ||
1177 | queue->index = init->queue_info[id].index; | ||
1178 | queue->offset = init->queue_info[id].offset; | ||
1179 | queue->size = init->queue_info[id].size; | ||
1180 | } | ||
1181 | |||
1182 | static void get_pmu_init_msg_pmu_queue_params_v2(struct pmu_queue *queue, | ||
1183 | u32 id, void *pmu_init_msg) | ||
1184 | { | ||
1185 | struct pmu_init_msg_pmu_v2 *init = | ||
1186 | (struct pmu_init_msg_pmu_v2 *)pmu_init_msg; | ||
1187 | queue->index = init->queue_info[id].index; | ||
1188 | queue->offset = init->queue_info[id].offset; | ||
1189 | queue->size = init->queue_info[id].size; | ||
1190 | } | ||
1191 | |||
1192 | static void get_pmu_init_msg_pmu_queue_params_v4(struct pmu_queue *queue, | ||
1193 | u32 id, void *pmu_init_msg) | ||
1194 | { | ||
1195 | struct pmu_init_msg_pmu_v4 *init = pmu_init_msg; | ||
1196 | u32 current_ptr = 0; | ||
1197 | u8 i; | ||
1198 | u8 tmp_id = id; | ||
1199 | |||
1200 | if (tmp_id == PMU_COMMAND_QUEUE_HPQ) | ||
1201 | tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; | ||
1202 | else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) | ||
1203 | tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; | ||
1204 | else if (tmp_id == PMU_MESSAGE_QUEUE) | ||
1205 | tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; | ||
1206 | else | ||
1207 | return; | ||
1208 | |||
1209 | queue->index = init->queue_index[tmp_id]; | ||
1210 | queue->size = init->queue_size[tmp_id]; | ||
1211 | if (tmp_id != 0) { | ||
1212 | for (i = 0 ; i < tmp_id; i++) | ||
1213 | current_ptr += init->queue_size[i]; | ||
1214 | } | ||
1215 | queue->offset = init->queue_offset + current_ptr; | ||
1216 | } | ||
1217 | static void get_pmu_init_msg_pmu_queue_params_v3(struct pmu_queue *queue, | ||
1218 | u32 id, void *pmu_init_msg) | ||
1219 | { | ||
1220 | struct pmu_init_msg_pmu_v3 *init = | ||
1221 | (struct pmu_init_msg_pmu_v3 *)pmu_init_msg; | ||
1222 | u32 current_ptr = 0; | ||
1223 | u8 i; | ||
1224 | u8 tmp_id = id; | ||
1225 | |||
1226 | if (tmp_id == PMU_COMMAND_QUEUE_HPQ) | ||
1227 | tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; | ||
1228 | else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) | ||
1229 | tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; | ||
1230 | else if (tmp_id == PMU_MESSAGE_QUEUE) | ||
1231 | tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; | ||
1232 | else | ||
1233 | return; | ||
1234 | queue->index = init->queue_index[tmp_id]; | ||
1235 | queue->size = init->queue_size[tmp_id]; | ||
1236 | if (tmp_id != 0) { | ||
1237 | for (i = 0 ; i < tmp_id; i++) | ||
1238 | current_ptr += init->queue_size[i]; | ||
1239 | } | ||
1240 | queue->offset = init->queue_offset + current_ptr; | ||
1241 | } | ||
1242 | |||
1243 | static void *get_pmu_sequence_in_alloc_ptr_v3(struct pmu_sequence *seq) | ||
1244 | { | ||
1245 | return (void *)(&seq->in_v3); | ||
1246 | } | ||
1247 | |||
1248 | static void *get_pmu_sequence_in_alloc_ptr_v1(struct pmu_sequence *seq) | ||
1249 | { | ||
1250 | return (void *)(&seq->in_v1); | ||
1251 | } | ||
1252 | |||
1253 | static void *get_pmu_sequence_in_alloc_ptr_v0(struct pmu_sequence *seq) | ||
1254 | { | ||
1255 | return (void *)(&seq->in_v0); | ||
1256 | } | ||
1257 | |||
1258 | static void *get_pmu_sequence_out_alloc_ptr_v3(struct pmu_sequence *seq) | ||
1259 | { | ||
1260 | return (void *)(&seq->out_v3); | ||
1261 | } | ||
1262 | |||
1263 | static void *get_pmu_sequence_out_alloc_ptr_v1(struct pmu_sequence *seq) | ||
1264 | { | ||
1265 | return (void *)(&seq->out_v1); | ||
1266 | } | ||
1267 | |||
1268 | static void *get_pmu_sequence_out_alloc_ptr_v0(struct pmu_sequence *seq) | ||
1269 | { | ||
1270 | return (void *)(&seq->out_v0); | ||
1271 | } | ||
1272 | |||
1273 | static u8 pg_cmd_eng_buf_load_size_v0(struct pmu_pg_cmd *pg) | ||
1274 | { | ||
1275 | return sizeof(pg->eng_buf_load_v0); | ||
1276 | } | ||
1277 | |||
1278 | static u8 pg_cmd_eng_buf_load_size_v1(struct pmu_pg_cmd *pg) | ||
1279 | { | ||
1280 | return sizeof(pg->eng_buf_load_v1); | ||
1281 | } | ||
1282 | |||
1283 | static u8 pg_cmd_eng_buf_load_size_v2(struct pmu_pg_cmd *pg) | ||
1284 | { | ||
1285 | return sizeof(pg->eng_buf_load_v2); | ||
1286 | } | ||
1287 | |||
1288 | static void pg_cmd_eng_buf_load_set_cmd_type_v0(struct pmu_pg_cmd *pg, | ||
1289 | u8 value) | ||
1290 | { | ||
1291 | pg->eng_buf_load_v0.cmd_type = value; | ||
1292 | } | ||
1293 | |||
1294 | static void pg_cmd_eng_buf_load_set_cmd_type_v1(struct pmu_pg_cmd *pg, | ||
1295 | u8 value) | ||
1296 | { | ||
1297 | pg->eng_buf_load_v1.cmd_type = value; | ||
1298 | } | ||
1299 | |||
1300 | static void pg_cmd_eng_buf_load_set_cmd_type_v2(struct pmu_pg_cmd *pg, | ||
1301 | u8 value) | ||
1302 | { | ||
1303 | pg->eng_buf_load_v2.cmd_type = value; | ||
1304 | } | ||
1305 | |||
1306 | static void pg_cmd_eng_buf_load_set_engine_id_v0(struct pmu_pg_cmd *pg, | ||
1307 | u8 value) | ||
1308 | { | ||
1309 | pg->eng_buf_load_v0.engine_id = value; | ||
1310 | } | ||
1311 | static void pg_cmd_eng_buf_load_set_engine_id_v1(struct pmu_pg_cmd *pg, | ||
1312 | u8 value) | ||
1313 | { | ||
1314 | pg->eng_buf_load_v1.engine_id = value; | ||
1315 | } | ||
1316 | static void pg_cmd_eng_buf_load_set_engine_id_v2(struct pmu_pg_cmd *pg, | ||
1317 | u8 value) | ||
1318 | { | ||
1319 | pg->eng_buf_load_v2.engine_id = value; | ||
1320 | } | ||
1321 | static void pg_cmd_eng_buf_load_set_buf_idx_v0(struct pmu_pg_cmd *pg, | ||
1322 | u8 value) | ||
1323 | { | ||
1324 | pg->eng_buf_load_v0.buf_idx = value; | ||
1325 | } | ||
1326 | static void pg_cmd_eng_buf_load_set_buf_idx_v1(struct pmu_pg_cmd *pg, | ||
1327 | u8 value) | ||
1328 | { | ||
1329 | pg->eng_buf_load_v1.buf_idx = value; | ||
1330 | } | ||
1331 | static void pg_cmd_eng_buf_load_set_buf_idx_v2(struct pmu_pg_cmd *pg, | ||
1332 | u8 value) | ||
1333 | { | ||
1334 | pg->eng_buf_load_v2.buf_idx = value; | ||
1335 | } | ||
1336 | |||
1337 | static void pg_cmd_eng_buf_load_set_pad_v0(struct pmu_pg_cmd *pg, | ||
1338 | u8 value) | ||
1339 | { | ||
1340 | pg->eng_buf_load_v0.pad = value; | ||
1341 | } | ||
1342 | static void pg_cmd_eng_buf_load_set_pad_v1(struct pmu_pg_cmd *pg, | ||
1343 | u8 value) | ||
1344 | { | ||
1345 | pg->eng_buf_load_v1.pad = value; | ||
1346 | } | ||
1347 | static void pg_cmd_eng_buf_load_set_pad_v2(struct pmu_pg_cmd *pg, | ||
1348 | u8 value) | ||
1349 | { | ||
1350 | pg->eng_buf_load_v2.pad = value; | ||
1351 | } | ||
1352 | |||
1353 | static void pg_cmd_eng_buf_load_set_buf_size_v0(struct pmu_pg_cmd *pg, | ||
1354 | u16 value) | ||
1355 | { | ||
1356 | pg->eng_buf_load_v0.buf_size = value; | ||
1357 | } | ||
1358 | static void pg_cmd_eng_buf_load_set_buf_size_v1(struct pmu_pg_cmd *pg, | ||
1359 | u16 value) | ||
1360 | { | ||
1361 | pg->eng_buf_load_v1.dma_desc.dma_size = value; | ||
1362 | } | ||
1363 | static void pg_cmd_eng_buf_load_set_buf_size_v2(struct pmu_pg_cmd *pg, | ||
1364 | u16 value) | ||
1365 | { | ||
1366 | pg->eng_buf_load_v2.dma_desc.params = value; | ||
1367 | } | ||
1368 | |||
1369 | static void pg_cmd_eng_buf_load_set_dma_base_v0(struct pmu_pg_cmd *pg, | ||
1370 | u32 value) | ||
1371 | { | ||
1372 | pg->eng_buf_load_v0.dma_base = (value >> 8); | ||
1373 | } | ||
1374 | static void pg_cmd_eng_buf_load_set_dma_base_v1(struct pmu_pg_cmd *pg, | ||
1375 | u32 value) | ||
1376 | { | ||
1377 | pg->eng_buf_load_v1.dma_desc.dma_addr.lo |= u64_lo32(value); | ||
1378 | pg->eng_buf_load_v1.dma_desc.dma_addr.hi |= u64_hi32(value); | ||
1379 | } | ||
1380 | static void pg_cmd_eng_buf_load_set_dma_base_v2(struct pmu_pg_cmd *pg, | ||
1381 | u32 value) | ||
1382 | { | ||
1383 | pg->eng_buf_load_v2.dma_desc.address.lo = u64_lo32(value); | ||
1384 | pg->eng_buf_load_v2.dma_desc.address.hi = u64_lo32(value); | ||
1385 | } | ||
1386 | |||
1387 | static void pg_cmd_eng_buf_load_set_dma_offset_v0(struct pmu_pg_cmd *pg, | ||
1388 | u8 value) | ||
1389 | { | ||
1390 | pg->eng_buf_load_v0.dma_offset = value; | ||
1391 | } | ||
1392 | static void pg_cmd_eng_buf_load_set_dma_offset_v1(struct pmu_pg_cmd *pg, | ||
1393 | u8 value) | ||
1394 | { | ||
1395 | pg->eng_buf_load_v1.dma_desc.dma_addr.lo |= value; | ||
1396 | } | ||
1397 | static void pg_cmd_eng_buf_load_set_dma_offset_v2(struct pmu_pg_cmd *pg, | ||
1398 | u8 value) | ||
1399 | { | ||
1400 | pg->eng_buf_load_v2.dma_desc.address.lo |= u64_lo32(value); | ||
1401 | pg->eng_buf_load_v2.dma_desc.address.hi |= u64_lo32(value); | ||
1402 | } | ||
1403 | |||
1404 | static void pg_cmd_eng_buf_load_set_dma_idx_v0(struct pmu_pg_cmd *pg, | ||
1405 | u8 value) | ||
1406 | { | ||
1407 | pg->eng_buf_load_v0.dma_idx = value; | ||
1408 | } | ||
1409 | static void pg_cmd_eng_buf_load_set_dma_idx_v1(struct pmu_pg_cmd *pg, | ||
1410 | u8 value) | ||
1411 | { | ||
1412 | pg->eng_buf_load_v1.dma_desc.dma_idx = value; | ||
1413 | } | ||
1414 | static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg, | ||
1415 | u8 value) | ||
1416 | { | ||
1417 | pg->eng_buf_load_v2.dma_desc.params |= (value << 24); | ||
1418 | } | ||
1419 | |||
1420 | int gk20a_init_pmu(struct nvgpu_pmu *pmu) | ||
1421 | { | ||
1422 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
1423 | struct pmu_v *pv = &g->ops.pmu_ver; | ||
1424 | int err; | ||
1425 | |||
1426 | err = nvgpu_mutex_init(&pmu->elpg_mutex); | ||
1427 | if (err) | ||
1428 | return err; | ||
1429 | |||
1430 | err = nvgpu_mutex_init(&pmu->pg_mutex); | ||
1431 | if (err) | ||
1432 | goto fail_elpg; | ||
1433 | |||
1434 | err = nvgpu_mutex_init(&pmu->isr_mutex); | ||
1435 | if (err) | ||
1436 | goto fail_pg; | ||
1437 | |||
1438 | err = nvgpu_mutex_init(&pmu->pmu_copy_lock); | ||
1439 | if (err) | ||
1440 | goto fail_isr; | ||
1441 | |||
1442 | err = nvgpu_mutex_init(&pmu->pmu_seq_lock); | ||
1443 | if (err) | ||
1444 | goto fail_pmu_copy; | ||
1445 | |||
1446 | pmu->remove_support = gk20a_remove_pmu_support; | ||
1447 | |||
1448 | switch (pmu->desc->app_version) { | ||
1449 | case APP_VERSION_NC_2: | ||
1450 | case APP_VERSION_NC_1: | ||
1451 | case APP_VERSION_NC_0: | ||
1452 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1453 | pg_cmd_eng_buf_load_size_v1; | ||
1454 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1455 | pg_cmd_eng_buf_load_set_cmd_type_v1; | ||
1456 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1457 | pg_cmd_eng_buf_load_set_engine_id_v1; | ||
1458 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1459 | pg_cmd_eng_buf_load_set_buf_idx_v1; | ||
1460 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1461 | pg_cmd_eng_buf_load_set_pad_v1; | ||
1462 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1463 | pg_cmd_eng_buf_load_set_buf_size_v1; | ||
1464 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1465 | pg_cmd_eng_buf_load_set_dma_base_v1; | ||
1466 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1467 | pg_cmd_eng_buf_load_set_dma_offset_v1; | ||
1468 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1469 | pg_cmd_eng_buf_load_set_dma_idx_v1; | ||
1470 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1471 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1472 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1473 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1474 | set_perfmon_cntr_valid_v2; | ||
1475 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1476 | set_perfmon_cntr_index_v2; | ||
1477 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1478 | set_perfmon_cntr_group_id_v2; | ||
1479 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1480 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1481 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1482 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1483 | pmu_cmdline_size_v4; | ||
1484 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1485 | set_pmu_cmdline_args_cpufreq_v4; | ||
1486 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1487 | set_pmu_cmdline_args_secure_mode_v4; | ||
1488 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1489 | set_pmu_cmdline_args_falctracesize_v4; | ||
1490 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1491 | set_pmu_cmdline_args_falctracedmabase_v4; | ||
1492 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1493 | set_pmu_cmdline_args_falctracedmaidx_v4; | ||
1494 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1495 | get_pmu_cmdline_args_ptr_v4; | ||
1496 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1497 | get_pmu_allocation_size_v2; | ||
1498 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1499 | set_pmu_allocation_ptr_v2; | ||
1500 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1501 | pmu_allocation_set_dmem_size_v2; | ||
1502 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1503 | pmu_allocation_get_dmem_size_v2; | ||
1504 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1505 | pmu_allocation_get_dmem_offset_v2; | ||
1506 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1507 | pmu_allocation_get_dmem_offset_addr_v2; | ||
1508 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1509 | pmu_allocation_set_dmem_offset_v2; | ||
1510 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1511 | get_pmu_init_msg_pmu_queue_params_v1; | ||
1512 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1513 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
1514 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1515 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
1516 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1517 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
1518 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1519 | get_pmu_perfmon_cmd_start_size_v2; | ||
1520 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1521 | get_perfmon_cmd_start_offsetofvar_v2; | ||
1522 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1523 | perfmon_start_set_cmd_type_v2; | ||
1524 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1525 | perfmon_start_set_group_id_v2; | ||
1526 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1527 | perfmon_start_set_state_id_v2; | ||
1528 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1529 | perfmon_start_set_flags_v2; | ||
1530 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1531 | perfmon_start_get_flags_v2; | ||
1532 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1533 | get_pmu_perfmon_cmd_init_size_v2; | ||
1534 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1535 | get_perfmon_cmd_init_offsetofvar_v2; | ||
1536 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1537 | perfmon_cmd_init_set_sample_buffer_v2; | ||
1538 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1539 | perfmon_cmd_init_set_dec_cnt_v2; | ||
1540 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1541 | perfmon_cmd_init_set_base_cnt_id_v2; | ||
1542 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1543 | perfmon_cmd_init_set_samp_period_us_v2; | ||
1544 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1545 | perfmon_cmd_init_set_num_cnt_v2; | ||
1546 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1547 | perfmon_cmd_init_set_mov_avg_v2; | ||
1548 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1549 | get_pmu_sequence_in_alloc_ptr_v1; | ||
1550 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1551 | get_pmu_sequence_out_alloc_ptr_v1; | ||
1552 | break; | ||
1553 | case APP_VERSION_NC_3: | ||
1554 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1555 | pg_cmd_eng_buf_load_size_v2; | ||
1556 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1557 | pg_cmd_eng_buf_load_set_cmd_type_v2; | ||
1558 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1559 | pg_cmd_eng_buf_load_set_engine_id_v2; | ||
1560 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1561 | pg_cmd_eng_buf_load_set_buf_idx_v2; | ||
1562 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1563 | pg_cmd_eng_buf_load_set_pad_v2; | ||
1564 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1565 | pg_cmd_eng_buf_load_set_buf_size_v2; | ||
1566 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1567 | pg_cmd_eng_buf_load_set_dma_base_v2; | ||
1568 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1569 | pg_cmd_eng_buf_load_set_dma_offset_v2; | ||
1570 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1571 | pg_cmd_eng_buf_load_set_dma_idx_v2; | ||
1572 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1573 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1574 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1575 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1576 | set_perfmon_cntr_valid_v2; | ||
1577 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1578 | set_perfmon_cntr_index_v2; | ||
1579 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1580 | set_perfmon_cntr_group_id_v2; | ||
1581 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1582 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1583 | g->ops.pmu_ver.is_pmu_zbc_save_supported = false; | ||
1584 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1585 | pmu_cmdline_size_v6; | ||
1586 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1587 | set_pmu_cmdline_args_cpufreq_v5; | ||
1588 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1589 | set_pmu_cmdline_args_secure_mode_v5; | ||
1590 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1591 | set_pmu_cmdline_args_falctracesize_v5; | ||
1592 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1593 | set_pmu_cmdline_args_falctracedmabase_v5; | ||
1594 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1595 | set_pmu_cmdline_args_falctracedmaidx_v5; | ||
1596 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1597 | get_pmu_cmdline_args_ptr_v5; | ||
1598 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1599 | get_pmu_allocation_size_v3; | ||
1600 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1601 | set_pmu_allocation_ptr_v3; | ||
1602 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1603 | pmu_allocation_set_dmem_size_v3; | ||
1604 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1605 | pmu_allocation_get_dmem_size_v3; | ||
1606 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1607 | pmu_allocation_get_dmem_offset_v3; | ||
1608 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1609 | pmu_allocation_get_dmem_offset_addr_v3; | ||
1610 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1611 | pmu_allocation_set_dmem_offset_v3; | ||
1612 | g->ops.pmu_ver.pmu_allocation_get_fb_addr = | ||
1613 | pmu_allocation_get_fb_addr_v3; | ||
1614 | g->ops.pmu_ver.pmu_allocation_get_fb_size = | ||
1615 | pmu_allocation_get_fb_size_v3; | ||
1616 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1617 | get_pmu_init_msg_pmu_queue_params_v4; | ||
1618 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1619 | get_pmu_msg_pmu_init_msg_ptr_v4; | ||
1620 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1621 | get_pmu_init_msg_pmu_sw_mg_off_v4; | ||
1622 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1623 | get_pmu_init_msg_pmu_sw_mg_size_v4; | ||
1624 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1625 | get_pmu_perfmon_cmd_start_size_v3; | ||
1626 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1627 | get_perfmon_cmd_start_offsetofvar_v3; | ||
1628 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1629 | perfmon_start_set_cmd_type_v3; | ||
1630 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1631 | perfmon_start_set_group_id_v3; | ||
1632 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1633 | perfmon_start_set_state_id_v3; | ||
1634 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1635 | perfmon_start_set_flags_v3; | ||
1636 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1637 | perfmon_start_get_flags_v3; | ||
1638 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1639 | get_pmu_perfmon_cmd_init_size_v3; | ||
1640 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1641 | get_perfmon_cmd_init_offsetofvar_v3; | ||
1642 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1643 | perfmon_cmd_init_set_sample_buffer_v3; | ||
1644 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1645 | perfmon_cmd_init_set_dec_cnt_v3; | ||
1646 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1647 | perfmon_cmd_init_set_base_cnt_id_v3; | ||
1648 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1649 | perfmon_cmd_init_set_samp_period_us_v3; | ||
1650 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1651 | perfmon_cmd_init_set_num_cnt_v3; | ||
1652 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1653 | perfmon_cmd_init_set_mov_avg_v3; | ||
1654 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1655 | get_pmu_sequence_in_alloc_ptr_v3; | ||
1656 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1657 | get_pmu_sequence_out_alloc_ptr_v3; | ||
1658 | break; | ||
1659 | case APP_VERSION_GM206: | ||
1660 | case APP_VERSION_NV_GPU: | ||
1661 | case APP_VERSION_NV_GPU_1: | ||
1662 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1663 | pg_cmd_eng_buf_load_size_v2; | ||
1664 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1665 | pg_cmd_eng_buf_load_set_cmd_type_v2; | ||
1666 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1667 | pg_cmd_eng_buf_load_set_engine_id_v2; | ||
1668 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1669 | pg_cmd_eng_buf_load_set_buf_idx_v2; | ||
1670 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1671 | pg_cmd_eng_buf_load_set_pad_v2; | ||
1672 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1673 | pg_cmd_eng_buf_load_set_buf_size_v2; | ||
1674 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1675 | pg_cmd_eng_buf_load_set_dma_base_v2; | ||
1676 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1677 | pg_cmd_eng_buf_load_set_dma_offset_v2; | ||
1678 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1679 | pg_cmd_eng_buf_load_set_dma_idx_v2; | ||
1680 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1681 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1682 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1683 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1684 | set_perfmon_cntr_valid_v2; | ||
1685 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1686 | set_perfmon_cntr_index_v2; | ||
1687 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1688 | set_perfmon_cntr_group_id_v2; | ||
1689 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1690 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1691 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1692 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1693 | pmu_cmdline_size_v5; | ||
1694 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1695 | set_pmu_cmdline_args_cpufreq_v5; | ||
1696 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1697 | set_pmu_cmdline_args_secure_mode_v5; | ||
1698 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1699 | set_pmu_cmdline_args_falctracesize_v5; | ||
1700 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1701 | set_pmu_cmdline_args_falctracedmabase_v5; | ||
1702 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1703 | set_pmu_cmdline_args_falctracedmaidx_v5; | ||
1704 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1705 | get_pmu_cmdline_args_ptr_v5; | ||
1706 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1707 | get_pmu_allocation_size_v3; | ||
1708 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1709 | set_pmu_allocation_ptr_v3; | ||
1710 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1711 | pmu_allocation_set_dmem_size_v3; | ||
1712 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1713 | pmu_allocation_get_dmem_size_v3; | ||
1714 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1715 | pmu_allocation_get_dmem_offset_v3; | ||
1716 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1717 | pmu_allocation_get_dmem_offset_addr_v3; | ||
1718 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1719 | pmu_allocation_set_dmem_offset_v3; | ||
1720 | g->ops.pmu_ver.pmu_allocation_get_fb_addr = | ||
1721 | pmu_allocation_get_fb_addr_v3; | ||
1722 | g->ops.pmu_ver.pmu_allocation_get_fb_size = | ||
1723 | pmu_allocation_get_fb_size_v3; | ||
1724 | if(pmu->desc->app_version != APP_VERSION_NV_GPU && | ||
1725 | pmu->desc->app_version != APP_VERSION_NV_GPU_1) { | ||
1726 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1727 | get_pmu_init_msg_pmu_queue_params_v2; | ||
1728 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1729 | get_pmu_msg_pmu_init_msg_ptr_v2; | ||
1730 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1731 | get_pmu_init_msg_pmu_sw_mg_off_v2; | ||
1732 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1733 | get_pmu_init_msg_pmu_sw_mg_size_v2; | ||
1734 | } | ||
1735 | else | ||
1736 | { | ||
1737 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1738 | get_pmu_init_msg_pmu_queue_params_v3; | ||
1739 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1740 | get_pmu_msg_pmu_init_msg_ptr_v3; | ||
1741 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1742 | get_pmu_init_msg_pmu_sw_mg_off_v3; | ||
1743 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1744 | get_pmu_init_msg_pmu_sw_mg_size_v3; | ||
1745 | } | ||
1746 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1747 | get_pmu_perfmon_cmd_start_size_v3; | ||
1748 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1749 | get_perfmon_cmd_start_offsetofvar_v3; | ||
1750 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1751 | perfmon_start_set_cmd_type_v3; | ||
1752 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1753 | perfmon_start_set_group_id_v3; | ||
1754 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1755 | perfmon_start_set_state_id_v3; | ||
1756 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1757 | perfmon_start_set_flags_v3; | ||
1758 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1759 | perfmon_start_get_flags_v3; | ||
1760 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1761 | get_pmu_perfmon_cmd_init_size_v3; | ||
1762 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1763 | get_perfmon_cmd_init_offsetofvar_v3; | ||
1764 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1765 | perfmon_cmd_init_set_sample_buffer_v3; | ||
1766 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1767 | perfmon_cmd_init_set_dec_cnt_v3; | ||
1768 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1769 | perfmon_cmd_init_set_base_cnt_id_v3; | ||
1770 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1771 | perfmon_cmd_init_set_samp_period_us_v3; | ||
1772 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1773 | perfmon_cmd_init_set_num_cnt_v3; | ||
1774 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1775 | perfmon_cmd_init_set_mov_avg_v3; | ||
1776 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1777 | get_pmu_sequence_in_alloc_ptr_v3; | ||
1778 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1779 | get_pmu_sequence_out_alloc_ptr_v3; | ||
1780 | break; | ||
1781 | case APP_VERSION_GM20B_5: | ||
1782 | case APP_VERSION_GM20B_4: | ||
1783 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1784 | pg_cmd_eng_buf_load_size_v0; | ||
1785 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1786 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
1787 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1788 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
1789 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1790 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
1791 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1792 | pg_cmd_eng_buf_load_set_pad_v0; | ||
1793 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1794 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
1795 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1796 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
1797 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1798 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
1799 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1800 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
1801 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1802 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1803 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1804 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1805 | set_perfmon_cntr_valid_v2; | ||
1806 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1807 | set_perfmon_cntr_index_v2; | ||
1808 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1809 | set_perfmon_cntr_group_id_v2; | ||
1810 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1811 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1812 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1813 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1814 | pmu_cmdline_size_v3; | ||
1815 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1816 | set_pmu_cmdline_args_cpufreq_v3; | ||
1817 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1818 | set_pmu_cmdline_args_secure_mode_v3; | ||
1819 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1820 | set_pmu_cmdline_args_falctracesize_v3; | ||
1821 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1822 | set_pmu_cmdline_args_falctracedmabase_v3; | ||
1823 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1824 | set_pmu_cmdline_args_falctracedmaidx_v3; | ||
1825 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1826 | get_pmu_cmdline_args_ptr_v3; | ||
1827 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1828 | get_pmu_allocation_size_v1; | ||
1829 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1830 | set_pmu_allocation_ptr_v1; | ||
1831 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1832 | pmu_allocation_set_dmem_size_v1; | ||
1833 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1834 | pmu_allocation_get_dmem_size_v1; | ||
1835 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1836 | pmu_allocation_get_dmem_offset_v1; | ||
1837 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1838 | pmu_allocation_get_dmem_offset_addr_v1; | ||
1839 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1840 | pmu_allocation_set_dmem_offset_v1; | ||
1841 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1842 | get_pmu_init_msg_pmu_queue_params_v1; | ||
1843 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1844 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
1845 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1846 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
1847 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1848 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
1849 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1850 | get_pmu_perfmon_cmd_start_size_v1; | ||
1851 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1852 | get_perfmon_cmd_start_offsetofvar_v1; | ||
1853 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1854 | perfmon_start_set_cmd_type_v1; | ||
1855 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1856 | perfmon_start_set_group_id_v1; | ||
1857 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1858 | perfmon_start_set_state_id_v1; | ||
1859 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1860 | perfmon_start_set_flags_v1; | ||
1861 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1862 | perfmon_start_get_flags_v1; | ||
1863 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1864 | get_pmu_perfmon_cmd_init_size_v1; | ||
1865 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1866 | get_perfmon_cmd_init_offsetofvar_v1; | ||
1867 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1868 | perfmon_cmd_init_set_sample_buffer_v1; | ||
1869 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1870 | perfmon_cmd_init_set_dec_cnt_v1; | ||
1871 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1872 | perfmon_cmd_init_set_base_cnt_id_v1; | ||
1873 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1874 | perfmon_cmd_init_set_samp_period_us_v1; | ||
1875 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1876 | perfmon_cmd_init_set_num_cnt_v1; | ||
1877 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1878 | perfmon_cmd_init_set_mov_avg_v1; | ||
1879 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1880 | get_pmu_sequence_in_alloc_ptr_v1; | ||
1881 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1882 | get_pmu_sequence_out_alloc_ptr_v1; | ||
1883 | break; | ||
1884 | case APP_VERSION_GM20B_3: | ||
1885 | case APP_VERSION_GM20B_2: | ||
1886 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1887 | pg_cmd_eng_buf_load_size_v0; | ||
1888 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1889 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
1890 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1891 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
1892 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1893 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
1894 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1895 | pg_cmd_eng_buf_load_set_pad_v0; | ||
1896 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1897 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
1898 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1899 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
1900 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1901 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
1902 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1903 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
1904 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1905 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1906 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1907 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1908 | set_perfmon_cntr_valid_v2; | ||
1909 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1910 | set_perfmon_cntr_index_v2; | ||
1911 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1912 | set_perfmon_cntr_group_id_v2; | ||
1913 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1914 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1915 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1916 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1917 | pmu_cmdline_size_v2; | ||
1918 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1919 | set_pmu_cmdline_args_cpufreq_v2; | ||
1920 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1921 | set_pmu_cmdline_args_secure_mode_v2; | ||
1922 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1923 | set_pmu_cmdline_args_falctracesize_v2; | ||
1924 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1925 | set_pmu_cmdline_args_falctracedmabase_v2; | ||
1926 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1927 | set_pmu_cmdline_args_falctracedmaidx_v2; | ||
1928 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1929 | get_pmu_cmdline_args_ptr_v2; | ||
1930 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1931 | get_pmu_allocation_size_v1; | ||
1932 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1933 | set_pmu_allocation_ptr_v1; | ||
1934 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1935 | pmu_allocation_set_dmem_size_v1; | ||
1936 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1937 | pmu_allocation_get_dmem_size_v1; | ||
1938 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1939 | pmu_allocation_get_dmem_offset_v1; | ||
1940 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1941 | pmu_allocation_get_dmem_offset_addr_v1; | ||
1942 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1943 | pmu_allocation_set_dmem_offset_v1; | ||
1944 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1945 | get_pmu_init_msg_pmu_queue_params_v1; | ||
1946 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1947 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
1948 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1949 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
1950 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1951 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
1952 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1953 | get_pmu_perfmon_cmd_start_size_v1; | ||
1954 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1955 | get_perfmon_cmd_start_offsetofvar_v1; | ||
1956 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1957 | perfmon_start_set_cmd_type_v1; | ||
1958 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1959 | perfmon_start_set_group_id_v1; | ||
1960 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1961 | perfmon_start_set_state_id_v1; | ||
1962 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1963 | perfmon_start_set_flags_v1; | ||
1964 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1965 | perfmon_start_get_flags_v1; | ||
1966 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1967 | get_pmu_perfmon_cmd_init_size_v1; | ||
1968 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1969 | get_perfmon_cmd_init_offsetofvar_v1; | ||
1970 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1971 | perfmon_cmd_init_set_sample_buffer_v1; | ||
1972 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1973 | perfmon_cmd_init_set_dec_cnt_v1; | ||
1974 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1975 | perfmon_cmd_init_set_base_cnt_id_v1; | ||
1976 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1977 | perfmon_cmd_init_set_samp_period_us_v1; | ||
1978 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1979 | perfmon_cmd_init_set_num_cnt_v1; | ||
1980 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1981 | perfmon_cmd_init_set_mov_avg_v1; | ||
1982 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1983 | get_pmu_sequence_in_alloc_ptr_v1; | ||
1984 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1985 | get_pmu_sequence_out_alloc_ptr_v1; | ||
1986 | break; | ||
1987 | case APP_VERSION_GM20B_1: | ||
1988 | case APP_VERSION_GM20B: | ||
1989 | case APP_VERSION_1: | ||
1990 | case APP_VERSION_2: | ||
1991 | case APP_VERSION_3: | ||
1992 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1993 | pg_cmd_eng_buf_load_size_v0; | ||
1994 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1995 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
1996 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1997 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
1998 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1999 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
2000 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
2001 | pg_cmd_eng_buf_load_set_pad_v0; | ||
2002 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
2003 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
2004 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
2005 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
2006 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
2007 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
2008 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
2009 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
2010 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
2011 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
2012 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0; | ||
2013 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0; | ||
2014 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0; | ||
2015 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
2016 | set_perfmon_cntr_valid_v0; | ||
2017 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
2018 | set_perfmon_cntr_index_v0; | ||
2019 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
2020 | set_perfmon_cntr_group_id_v0; | ||
2021 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v0; | ||
2022 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
2023 | pmu_cmdline_size_v1; | ||
2024 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
2025 | set_pmu_cmdline_args_cpufreq_v1; | ||
2026 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
2027 | set_pmu_cmdline_args_secure_mode_v1; | ||
2028 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
2029 | set_pmu_cmdline_args_falctracesize_v1; | ||
2030 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
2031 | set_pmu_cmdline_args_falctracedmabase_v1; | ||
2032 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
2033 | set_pmu_cmdline_args_falctracedmaidx_v1; | ||
2034 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
2035 | get_pmu_cmdline_args_ptr_v1; | ||
2036 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
2037 | get_pmu_allocation_size_v1; | ||
2038 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
2039 | set_pmu_allocation_ptr_v1; | ||
2040 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
2041 | pmu_allocation_set_dmem_size_v1; | ||
2042 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
2043 | pmu_allocation_get_dmem_size_v1; | ||
2044 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
2045 | pmu_allocation_get_dmem_offset_v1; | ||
2046 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
2047 | pmu_allocation_get_dmem_offset_addr_v1; | ||
2048 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
2049 | pmu_allocation_set_dmem_offset_v1; | ||
2050 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
2051 | get_pmu_init_msg_pmu_queue_params_v1; | ||
2052 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
2053 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
2054 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
2055 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
2056 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
2057 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
2058 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
2059 | get_pmu_perfmon_cmd_start_size_v1; | ||
2060 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
2061 | get_perfmon_cmd_start_offsetofvar_v1; | ||
2062 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
2063 | perfmon_start_set_cmd_type_v1; | ||
2064 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
2065 | perfmon_start_set_group_id_v1; | ||
2066 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
2067 | perfmon_start_set_state_id_v1; | ||
2068 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
2069 | perfmon_start_set_flags_v1; | ||
2070 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
2071 | perfmon_start_get_flags_v1; | ||
2072 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
2073 | get_pmu_perfmon_cmd_init_size_v1; | ||
2074 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
2075 | get_perfmon_cmd_init_offsetofvar_v1; | ||
2076 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
2077 | perfmon_cmd_init_set_sample_buffer_v1; | ||
2078 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
2079 | perfmon_cmd_init_set_dec_cnt_v1; | ||
2080 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
2081 | perfmon_cmd_init_set_base_cnt_id_v1; | ||
2082 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
2083 | perfmon_cmd_init_set_samp_period_us_v1; | ||
2084 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
2085 | perfmon_cmd_init_set_num_cnt_v1; | ||
2086 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
2087 | perfmon_cmd_init_set_mov_avg_v1; | ||
2088 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
2089 | get_pmu_sequence_in_alloc_ptr_v1; | ||
2090 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
2091 | get_pmu_sequence_out_alloc_ptr_v1; | ||
2092 | break; | ||
2093 | case APP_VERSION_0: | ||
2094 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
2095 | pg_cmd_eng_buf_load_size_v0; | ||
2096 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
2097 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
2098 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
2099 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
2100 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
2101 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
2102 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
2103 | pg_cmd_eng_buf_load_set_pad_v0; | ||
2104 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
2105 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
2106 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
2107 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
2108 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
2109 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
2110 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
2111 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
2112 | g->ops.pmu_ver.cmd_id_zbc_table_update = 14; | ||
2113 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
2114 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0; | ||
2115 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0; | ||
2116 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0; | ||
2117 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
2118 | set_perfmon_cntr_valid_v0; | ||
2119 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
2120 | set_perfmon_cntr_index_v0; | ||
2121 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
2122 | set_perfmon_cntr_group_id_v0; | ||
2123 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v0; | ||
2124 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
2125 | pmu_cmdline_size_v0; | ||
2126 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
2127 | set_pmu_cmdline_args_cpufreq_v0; | ||
2128 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
2129 | NULL; | ||
2130 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
2131 | get_pmu_cmdline_args_ptr_v0; | ||
2132 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
2133 | get_pmu_allocation_size_v0; | ||
2134 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
2135 | set_pmu_allocation_ptr_v0; | ||
2136 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
2137 | pmu_allocation_set_dmem_size_v0; | ||
2138 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
2139 | pmu_allocation_get_dmem_size_v0; | ||
2140 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
2141 | pmu_allocation_get_dmem_offset_v0; | ||
2142 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
2143 | pmu_allocation_get_dmem_offset_addr_v0; | ||
2144 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
2145 | pmu_allocation_set_dmem_offset_v0; | ||
2146 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
2147 | get_pmu_init_msg_pmu_queue_params_v0; | ||
2148 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
2149 | get_pmu_msg_pmu_init_msg_ptr_v0; | ||
2150 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
2151 | get_pmu_init_msg_pmu_sw_mg_off_v0; | ||
2152 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
2153 | get_pmu_init_msg_pmu_sw_mg_size_v0; | ||
2154 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
2155 | get_pmu_perfmon_cmd_start_size_v0; | ||
2156 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
2157 | get_perfmon_cmd_start_offsetofvar_v0; | ||
2158 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
2159 | perfmon_start_set_cmd_type_v0; | ||
2160 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
2161 | perfmon_start_set_group_id_v0; | ||
2162 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
2163 | perfmon_start_set_state_id_v0; | ||
2164 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
2165 | perfmon_start_set_flags_v0; | ||
2166 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
2167 | perfmon_start_get_flags_v0; | ||
2168 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
2169 | get_pmu_perfmon_cmd_init_size_v0; | ||
2170 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
2171 | get_perfmon_cmd_init_offsetofvar_v0; | ||
2172 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
2173 | perfmon_cmd_init_set_sample_buffer_v0; | ||
2174 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
2175 | perfmon_cmd_init_set_dec_cnt_v0; | ||
2176 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
2177 | perfmon_cmd_init_set_base_cnt_id_v0; | ||
2178 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
2179 | perfmon_cmd_init_set_samp_period_us_v0; | ||
2180 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
2181 | perfmon_cmd_init_set_num_cnt_v0; | ||
2182 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
2183 | perfmon_cmd_init_set_mov_avg_v0; | ||
2184 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
2185 | get_pmu_sequence_in_alloc_ptr_v0; | ||
2186 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
2187 | get_pmu_sequence_out_alloc_ptr_v0; | ||
2188 | break; | ||
2189 | default: | ||
2190 | nvgpu_err(g, "PMU code version not supported version: %d", | ||
2191 | pmu->desc->app_version); | ||
2192 | err = -EINVAL; | ||
2193 | goto fail_pmu_seq; | ||
2194 | } | ||
2195 | pv->set_perfmon_cntr_index(pmu, 3); /* GR & CE2 */ | ||
2196 | pv->set_perfmon_cntr_group_id(pmu, PMU_DOMAIN_GROUP_PSTATE); | ||
2197 | |||
2198 | return 0; | ||
2199 | |||
2200 | fail_pmu_seq: | ||
2201 | nvgpu_mutex_destroy(&pmu->pmu_seq_lock); | ||
2202 | fail_pmu_copy: | ||
2203 | nvgpu_mutex_destroy(&pmu->pmu_copy_lock); | ||
2204 | fail_isr: | ||
2205 | nvgpu_mutex_destroy(&pmu->isr_mutex); | ||
2206 | fail_pg: | ||
2207 | nvgpu_mutex_destroy(&pmu->pg_mutex); | ||
2208 | fail_elpg: | ||
2209 | nvgpu_mutex_destroy(&pmu->elpg_mutex); | ||
2210 | return err; | ||
2211 | } | ||
2212 | 111 | ||
2213 | void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, | 112 | void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, |
2214 | u32 src, u8 *dst, u32 size, u8 port) | 113 | u32 src, u8 *dst, u32 size, u8 port) |
@@ -2793,63 +692,6 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set) | |||
2793 | pwr_pmu_msgq_tail_val_f(*tail)); | 692 | pwr_pmu_msgq_tail_val_f(*tail)); |
2794 | } | 693 | } |
2795 | 694 | ||
2796 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu) | ||
2797 | { | ||
2798 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
2799 | |||
2800 | gk20a_dbg_fn(""); | ||
2801 | |||
2802 | if (nvgpu_alloc_initialized(&pmu->dmem)) | ||
2803 | nvgpu_alloc_destroy(&pmu->dmem); | ||
2804 | |||
2805 | nvgpu_release_firmware(g, pmu->fw); | ||
2806 | |||
2807 | nvgpu_mutex_destroy(&pmu->elpg_mutex); | ||
2808 | nvgpu_mutex_destroy(&pmu->pg_mutex); | ||
2809 | nvgpu_mutex_destroy(&pmu->isr_mutex); | ||
2810 | nvgpu_mutex_destroy(&pmu->pmu_copy_lock); | ||
2811 | nvgpu_mutex_destroy(&pmu->pmu_seq_lock); | ||
2812 | } | ||
2813 | |||
2814 | static int gk20a_prepare_ucode(struct gk20a *g) | ||
2815 | { | ||
2816 | struct nvgpu_pmu *pmu = &g->pmu; | ||
2817 | int err = 0; | ||
2818 | struct mm_gk20a *mm = &g->mm; | ||
2819 | struct vm_gk20a *vm = mm->pmu.vm; | ||
2820 | |||
2821 | if (pmu->fw) | ||
2822 | return gk20a_init_pmu(pmu); | ||
2823 | |||
2824 | pmu->fw = nvgpu_request_firmware(g, GK20A_PMU_UCODE_IMAGE, 0); | ||
2825 | if (!pmu->fw) { | ||
2826 | nvgpu_err(g, "failed to load pmu ucode!!"); | ||
2827 | return err; | ||
2828 | } | ||
2829 | |||
2830 | gk20a_dbg_fn("firmware loaded"); | ||
2831 | |||
2832 | pmu->desc = (struct pmu_ucode_desc *)pmu->fw->data; | ||
2833 | pmu->ucode_image = (u32 *)((u8 *)pmu->desc + | ||
2834 | pmu->desc->descriptor_size); | ||
2835 | |||
2836 | err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, | ||
2837 | &pmu->ucode); | ||
2838 | if (err) | ||
2839 | goto err_release_fw; | ||
2840 | |||
2841 | nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image, | ||
2842 | pmu->desc->app_start_offset + pmu->desc->app_size); | ||
2843 | |||
2844 | return gk20a_init_pmu(pmu); | ||
2845 | |||
2846 | err_release_fw: | ||
2847 | nvgpu_release_firmware(g, pmu->fw); | ||
2848 | pmu->fw = NULL; | ||
2849 | |||
2850 | return err; | ||
2851 | } | ||
2852 | |||
2853 | static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, | 695 | static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, |
2854 | void *param, u32 handle, u32 status) | 696 | void *param, u32 handle, u32 status) |
2855 | { | 697 | { |
@@ -3018,7 +860,7 @@ u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) | |||
3018 | void gk20a_init_pmu_ops(struct gpu_ops *gops) | 860 | void gk20a_init_pmu_ops(struct gpu_ops *gops) |
3019 | { | 861 | { |
3020 | gops->pmu.is_pmu_supported = gk20a_is_pmu_supported; | 862 | gops->pmu.is_pmu_supported = gk20a_is_pmu_supported; |
3021 | gops->pmu.prepare_ucode = gk20a_prepare_ucode; | 863 | gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob; |
3022 | gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1; | 864 | gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1; |
3023 | gops->pmu.pmu_nsbootstrap = pmu_bootstrap; | 865 | gops->pmu.pmu_nsbootstrap = pmu_bootstrap; |
3024 | gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r; | 866 | gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r; |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index 1d2e20e6..b5038bd4 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | |||
@@ -30,24 +30,6 @@ struct nvgpu_firmware; | |||
30 | 30 | ||
31 | #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) | 31 | #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) |
32 | 32 | ||
33 | #define APP_VERSION_NC_3 22204331 | ||
34 | #define APP_VERSION_NC_2 20429989 | ||
35 | #define APP_VERSION_NC_1 20313802 | ||
36 | #define APP_VERSION_NC_0 20360931 | ||
37 | #define APP_VERSION_GM206 20652057 | ||
38 | #define APP_VERSION_NV_GPU 21307569 | ||
39 | #define APP_VERSION_NV_GPU_1 21308030 | ||
40 | #define APP_VERSION_GM20B_5 20490253 | ||
41 | #define APP_VERSION_GM20B_4 19008461 | ||
42 | #define APP_VERSION_GM20B_3 18935575 | ||
43 | #define APP_VERSION_GM20B_2 18694072 | ||
44 | #define APP_VERSION_GM20B_1 18547257 | ||
45 | #define APP_VERSION_GM20B 17615280 | ||
46 | #define APP_VERSION_3 18357968 | ||
47 | #define APP_VERSION_2 18542378 | ||
48 | #define APP_VERSION_1 17997577 /*Obsolete this once 18357968 gets in*/ | ||
49 | #define APP_VERSION_0 16856675 | ||
50 | |||
51 | /*Fuse defines*/ | 33 | /*Fuse defines*/ |
52 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) | 34 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) |
53 | #define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 | 35 | #define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 |
@@ -127,11 +109,7 @@ void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, | |||
127 | u32 src, u8 *dst, u32 size, u8 port); | 109 | u32 src, u8 *dst, u32 size, u8 port); |
128 | int pmu_reset(struct nvgpu_pmu *pmu); | 110 | int pmu_reset(struct nvgpu_pmu *pmu); |
129 | int pmu_bootstrap(struct nvgpu_pmu *pmu); | 111 | int pmu_bootstrap(struct nvgpu_pmu *pmu); |
130 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); | ||
131 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); | 112 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); |
132 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu); | ||
133 | |||
134 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); | ||
135 | 113 | ||
136 | int gk20a_pmu_ap_send_command(struct gk20a *g, | 114 | int gk20a_pmu_ap_send_command(struct gk20a *g, |
137 | union pmu_ap_cmd *p_ap_cmd, bool b_block); | 115 | union pmu_ap_cmd *p_ap_cmd, bool b_block); |