diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/nvgpu/Makefile.nvgpu | 1 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/pmu/pmu_fw.c | 2291 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 2168 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | 22 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gm20b/acr_gm20b.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp106/acr_gp106.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/pmu.h | 4 |
7 files changed, 2305 insertions, 2189 deletions
diff --git a/drivers/gpu/nvgpu/Makefile.nvgpu b/drivers/gpu/nvgpu/Makefile.nvgpu index f398e384..77d7be4a 100644 --- a/drivers/gpu/nvgpu/Makefile.nvgpu +++ b/drivers/gpu/nvgpu/Makefile.nvgpu | |||
@@ -60,6 +60,7 @@ nvgpu-y := \ | |||
60 | common/falcon/falcon.o \ | 60 | common/falcon/falcon.o \ |
61 | common/pmu/pmu.o \ | 61 | common/pmu/pmu.o \ |
62 | common/pmu/pmu_ipc.o \ | 62 | common/pmu/pmu_ipc.o \ |
63 | common/pmu/pmu_fw.o \ | ||
63 | gk20a/gk20a.o \ | 64 | gk20a/gk20a.o \ |
64 | gk20a/bus_gk20a.o \ | 65 | gk20a/bus_gk20a.o \ |
65 | gk20a/pramin_gk20a.o \ | 66 | gk20a/pramin_gk20a.o \ |
diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_fw.c b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c new file mode 100644 index 00000000..f6229a3a --- /dev/null +++ b/drivers/gpu/nvgpu/common/pmu/pmu_fw.c | |||
@@ -0,0 +1,2291 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms and conditions of the GNU General Public License, | ||
6 | * version 2, as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
11 | * more details. | ||
12 | */ | ||
13 | |||
14 | #include <nvgpu/pmu.h> | ||
15 | #include <nvgpu/dma.h> | ||
16 | #include <nvgpu/log.h> | ||
17 | #include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h> | ||
18 | #include <nvgpu/firmware.h> | ||
19 | |||
20 | #include "gk20a/gk20a.h" | ||
21 | |||
22 | /* PMU NS UCODE IMG */ | ||
23 | #define NVGPU_PMU_NS_UCODE_IMAGE "gpmu_ucode.bin" | ||
24 | |||
25 | /* PMU F/W version */ | ||
26 | #define APP_VERSION_NC_3 22204331 | ||
27 | #define APP_VERSION_NC_2 20429989 | ||
28 | #define APP_VERSION_NC_1 20313802 | ||
29 | #define APP_VERSION_NC_0 20360931 | ||
30 | #define APP_VERSION_GM206 20652057 | ||
31 | #define APP_VERSION_NV_GPU 21307569 | ||
32 | #define APP_VERSION_NV_GPU_1 21308030 | ||
33 | #define APP_VERSION_GM20B_5 20490253 | ||
34 | #define APP_VERSION_GM20B_4 19008461 | ||
35 | #define APP_VERSION_GM20B_3 18935575 | ||
36 | #define APP_VERSION_GM20B_2 18694072 | ||
37 | #define APP_VERSION_GM20B_1 18547257 | ||
38 | #define APP_VERSION_GM20B 17615280 | ||
39 | #define APP_VERSION_3 18357968 | ||
40 | #define APP_VERSION_2 18542378 | ||
41 | #define APP_VERSION_1 17997577 /*Obsolete this once 18357968 gets in*/ | ||
42 | #define APP_VERSION_0 16856675 | ||
43 | |||
44 | /* PMU version specific functions */ | ||
45 | static u32 pmu_perfmon_cntr_sz_v0(struct nvgpu_pmu *pmu) | ||
46 | { | ||
47 | return sizeof(struct pmu_perfmon_counter_v0); | ||
48 | } | ||
49 | |||
50 | static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) | ||
51 | { | ||
52 | return sizeof(struct pmu_perfmon_counter_v2); | ||
53 | } | ||
54 | |||
55 | static void *get_perfmon_cntr_ptr_v2(struct nvgpu_pmu *pmu) | ||
56 | { | ||
57 | return (void *)(&pmu->perfmon_counter_v2); | ||
58 | } | ||
59 | |||
60 | static void *get_perfmon_cntr_ptr_v0(struct nvgpu_pmu *pmu) | ||
61 | { | ||
62 | return (void *)(&pmu->perfmon_counter_v0); | ||
63 | } | ||
64 | |||
65 | static void set_perfmon_cntr_ut_v2(struct nvgpu_pmu *pmu, u16 ut) | ||
66 | { | ||
67 | pmu->perfmon_counter_v2.upper_threshold = ut; | ||
68 | } | ||
69 | |||
70 | static void set_perfmon_cntr_ut_v0(struct nvgpu_pmu *pmu, u16 ut) | ||
71 | { | ||
72 | pmu->perfmon_counter_v0.upper_threshold = ut; | ||
73 | } | ||
74 | |||
75 | static void set_perfmon_cntr_lt_v2(struct nvgpu_pmu *pmu, u16 lt) | ||
76 | { | ||
77 | pmu->perfmon_counter_v2.lower_threshold = lt; | ||
78 | } | ||
79 | |||
80 | static void set_perfmon_cntr_lt_v0(struct nvgpu_pmu *pmu, u16 lt) | ||
81 | { | ||
82 | pmu->perfmon_counter_v0.lower_threshold = lt; | ||
83 | } | ||
84 | |||
85 | static void set_perfmon_cntr_valid_v2(struct nvgpu_pmu *pmu, u8 valid) | ||
86 | { | ||
87 | pmu->perfmon_counter_v2.valid = valid; | ||
88 | } | ||
89 | |||
90 | static void set_perfmon_cntr_valid_v0(struct nvgpu_pmu *pmu, u8 valid) | ||
91 | { | ||
92 | pmu->perfmon_counter_v0.valid = valid; | ||
93 | } | ||
94 | |||
95 | static void set_perfmon_cntr_index_v2(struct nvgpu_pmu *pmu, u8 index) | ||
96 | { | ||
97 | pmu->perfmon_counter_v2.index = index; | ||
98 | } | ||
99 | |||
100 | static void set_perfmon_cntr_index_v0(struct nvgpu_pmu *pmu, u8 index) | ||
101 | { | ||
102 | pmu->perfmon_counter_v0.index = index; | ||
103 | } | ||
104 | |||
105 | static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid) | ||
106 | { | ||
107 | pmu->perfmon_counter_v2.group_id = gid; | ||
108 | } | ||
109 | |||
110 | static void set_perfmon_cntr_group_id_v0(struct nvgpu_pmu *pmu, u8 gid) | ||
111 | { | ||
112 | pmu->perfmon_counter_v0.group_id = gid; | ||
113 | } | ||
114 | |||
115 | static u32 pmu_cmdline_size_v0(struct nvgpu_pmu *pmu) | ||
116 | { | ||
117 | return sizeof(struct pmu_cmdline_args_v0); | ||
118 | } | ||
119 | |||
120 | static u32 pmu_cmdline_size_v1(struct nvgpu_pmu *pmu) | ||
121 | { | ||
122 | return sizeof(struct pmu_cmdline_args_v1); | ||
123 | } | ||
124 | |||
125 | static u32 pmu_cmdline_size_v2(struct nvgpu_pmu *pmu) | ||
126 | { | ||
127 | return sizeof(struct pmu_cmdline_args_v2); | ||
128 | } | ||
129 | |||
130 | static void set_pmu_cmdline_args_cpufreq_v2(struct nvgpu_pmu *pmu, u32 freq) | ||
131 | { | ||
132 | pmu->args_v2.cpu_freq_hz = freq; | ||
133 | } | ||
134 | static void set_pmu_cmdline_args_secure_mode_v2(struct nvgpu_pmu *pmu, u32 val) | ||
135 | { | ||
136 | pmu->args_v2.secure_mode = val; | ||
137 | } | ||
138 | |||
139 | static void set_pmu_cmdline_args_falctracesize_v2( | ||
140 | struct nvgpu_pmu *pmu, u32 size) | ||
141 | { | ||
142 | pmu->args_v2.falc_trace_size = size; | ||
143 | } | ||
144 | |||
145 | static void set_pmu_cmdline_args_falctracedmabase_v2(struct nvgpu_pmu *pmu) | ||
146 | { | ||
147 | pmu->args_v2.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
148 | } | ||
149 | |||
150 | static void set_pmu_cmdline_args_falctracedmaidx_v2( | ||
151 | struct nvgpu_pmu *pmu, u32 idx) | ||
152 | { | ||
153 | pmu->args_v2.falc_trace_dma_idx = idx; | ||
154 | } | ||
155 | |||
156 | |||
157 | static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) | ||
158 | { | ||
159 | pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
160 | pmu->args_v4.dma_addr.dma_base1 = 0; | ||
161 | pmu->args_v4.dma_addr.dma_offset = 0; | ||
162 | } | ||
163 | |||
164 | static u32 pmu_cmdline_size_v4(struct nvgpu_pmu *pmu) | ||
165 | { | ||
166 | return sizeof(struct pmu_cmdline_args_v4); | ||
167 | } | ||
168 | |||
169 | static void set_pmu_cmdline_args_cpufreq_v4(struct nvgpu_pmu *pmu, u32 freq) | ||
170 | { | ||
171 | pmu->args_v4.cpu_freq_hz = freq; | ||
172 | } | ||
173 | static void set_pmu_cmdline_args_secure_mode_v4(struct nvgpu_pmu *pmu, u32 val) | ||
174 | { | ||
175 | pmu->args_v4.secure_mode = val; | ||
176 | } | ||
177 | |||
178 | static void set_pmu_cmdline_args_falctracesize_v4( | ||
179 | struct nvgpu_pmu *pmu, u32 size) | ||
180 | { | ||
181 | pmu->args_v4.falc_trace_size = size; | ||
182 | } | ||
183 | static void set_pmu_cmdline_args_falctracedmaidx_v4( | ||
184 | struct nvgpu_pmu *pmu, u32 idx) | ||
185 | { | ||
186 | pmu->args_v4.falc_trace_dma_idx = idx; | ||
187 | } | ||
188 | |||
189 | static u32 pmu_cmdline_size_v5(struct nvgpu_pmu *pmu) | ||
190 | { | ||
191 | return sizeof(struct pmu_cmdline_args_v5); | ||
192 | } | ||
193 | |||
194 | static u32 pmu_cmdline_size_v6(struct nvgpu_pmu *pmu) | ||
195 | { | ||
196 | return sizeof(struct pmu_cmdline_args_v6); | ||
197 | } | ||
198 | |||
199 | static void set_pmu_cmdline_args_cpufreq_v5(struct nvgpu_pmu *pmu, u32 freq) | ||
200 | { | ||
201 | pmu->args_v5.cpu_freq_hz = 204000000; | ||
202 | } | ||
203 | static void set_pmu_cmdline_args_secure_mode_v5(struct nvgpu_pmu *pmu, u32 val) | ||
204 | { | ||
205 | pmu->args_v5.secure_mode = val; | ||
206 | } | ||
207 | |||
208 | static void set_pmu_cmdline_args_falctracesize_v5( | ||
209 | struct nvgpu_pmu *pmu, u32 size) | ||
210 | { | ||
211 | /* set by surface describe */ | ||
212 | } | ||
213 | |||
214 | static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu) | ||
215 | { | ||
216 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
217 | |||
218 | nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf); | ||
219 | } | ||
220 | |||
221 | static void set_pmu_cmdline_args_falctracedmaidx_v5( | ||
222 | struct nvgpu_pmu *pmu, u32 idx) | ||
223 | { | ||
224 | /* set by surface describe */ | ||
225 | } | ||
226 | |||
227 | static u32 pmu_cmdline_size_v3(struct nvgpu_pmu *pmu) | ||
228 | { | ||
229 | return sizeof(struct pmu_cmdline_args_v3); | ||
230 | } | ||
231 | |||
232 | static void set_pmu_cmdline_args_cpufreq_v3(struct nvgpu_pmu *pmu, u32 freq) | ||
233 | { | ||
234 | pmu->args_v3.cpu_freq_hz = freq; | ||
235 | } | ||
236 | static void set_pmu_cmdline_args_secure_mode_v3(struct nvgpu_pmu *pmu, u32 val) | ||
237 | { | ||
238 | pmu->args_v3.secure_mode = val; | ||
239 | } | ||
240 | |||
241 | static void set_pmu_cmdline_args_falctracesize_v3( | ||
242 | struct nvgpu_pmu *pmu, u32 size) | ||
243 | { | ||
244 | pmu->args_v3.falc_trace_size = size; | ||
245 | } | ||
246 | |||
247 | static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) | ||
248 | { | ||
249 | pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
250 | } | ||
251 | |||
252 | static void set_pmu_cmdline_args_falctracedmaidx_v3( | ||
253 | struct nvgpu_pmu *pmu, u32 idx) | ||
254 | { | ||
255 | pmu->args_v3.falc_trace_dma_idx = idx; | ||
256 | } | ||
257 | |||
258 | static void set_pmu_cmdline_args_cpufreq_v1(struct nvgpu_pmu *pmu, u32 freq) | ||
259 | { | ||
260 | pmu->args_v1.cpu_freq_hz = freq; | ||
261 | } | ||
262 | static void set_pmu_cmdline_args_secure_mode_v1(struct nvgpu_pmu *pmu, u32 val) | ||
263 | { | ||
264 | pmu->args_v1.secure_mode = val; | ||
265 | } | ||
266 | |||
267 | static void set_pmu_cmdline_args_falctracesize_v1( | ||
268 | struct nvgpu_pmu *pmu, u32 size) | ||
269 | { | ||
270 | pmu->args_v1.falc_trace_size = size; | ||
271 | } | ||
272 | |||
273 | static void set_pmu_cmdline_args_falctracedmabase_v1(struct nvgpu_pmu *pmu) | ||
274 | { | ||
275 | pmu->args_v1.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
276 | } | ||
277 | |||
278 | static void set_pmu_cmdline_args_falctracedmaidx_v1( | ||
279 | struct nvgpu_pmu *pmu, u32 idx) | ||
280 | { | ||
281 | pmu->args_v1.falc_trace_dma_idx = idx; | ||
282 | } | ||
283 | |||
284 | static void set_pmu_cmdline_args_cpufreq_v0(struct nvgpu_pmu *pmu, u32 freq) | ||
285 | { | ||
286 | pmu->args_v0.cpu_freq_hz = freq; | ||
287 | } | ||
288 | |||
289 | static void *get_pmu_cmdline_args_ptr_v4(struct nvgpu_pmu *pmu) | ||
290 | { | ||
291 | return (void *)(&pmu->args_v4); | ||
292 | } | ||
293 | |||
294 | static void *get_pmu_cmdline_args_ptr_v3(struct nvgpu_pmu *pmu) | ||
295 | { | ||
296 | return (void *)(&pmu->args_v3); | ||
297 | } | ||
298 | |||
299 | static void *get_pmu_cmdline_args_ptr_v2(struct nvgpu_pmu *pmu) | ||
300 | { | ||
301 | return (void *)(&pmu->args_v2); | ||
302 | } | ||
303 | |||
304 | static void *get_pmu_cmdline_args_ptr_v5(struct nvgpu_pmu *pmu) | ||
305 | { | ||
306 | return (void *)(&pmu->args_v5); | ||
307 | } | ||
308 | static void *get_pmu_cmdline_args_ptr_v1(struct nvgpu_pmu *pmu) | ||
309 | { | ||
310 | return (void *)(&pmu->args_v1); | ||
311 | } | ||
312 | |||
313 | static void *get_pmu_cmdline_args_ptr_v0(struct nvgpu_pmu *pmu) | ||
314 | { | ||
315 | return (void *)(&pmu->args_v0); | ||
316 | } | ||
317 | |||
318 | static u32 get_pmu_allocation_size_v3(struct nvgpu_pmu *pmu) | ||
319 | { | ||
320 | return sizeof(struct pmu_allocation_v3); | ||
321 | } | ||
322 | |||
323 | static u32 get_pmu_allocation_size_v2(struct nvgpu_pmu *pmu) | ||
324 | { | ||
325 | return sizeof(struct pmu_allocation_v2); | ||
326 | } | ||
327 | |||
328 | static u32 get_pmu_allocation_size_v1(struct nvgpu_pmu *pmu) | ||
329 | { | ||
330 | return sizeof(struct pmu_allocation_v1); | ||
331 | } | ||
332 | |||
333 | static u32 get_pmu_allocation_size_v0(struct nvgpu_pmu *pmu) | ||
334 | { | ||
335 | return sizeof(struct pmu_allocation_v0); | ||
336 | } | ||
337 | |||
338 | static void set_pmu_allocation_ptr_v3(struct nvgpu_pmu *pmu, | ||
339 | void **pmu_alloc_ptr, void *assign_ptr) | ||
340 | { | ||
341 | struct pmu_allocation_v3 **pmu_a_ptr = | ||
342 | (struct pmu_allocation_v3 **)pmu_alloc_ptr; | ||
343 | |||
344 | *pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr; | ||
345 | } | ||
346 | |||
347 | static void set_pmu_allocation_ptr_v2(struct nvgpu_pmu *pmu, | ||
348 | void **pmu_alloc_ptr, void *assign_ptr) | ||
349 | { | ||
350 | struct pmu_allocation_v2 **pmu_a_ptr = | ||
351 | (struct pmu_allocation_v2 **)pmu_alloc_ptr; | ||
352 | |||
353 | *pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr; | ||
354 | } | ||
355 | |||
356 | static void set_pmu_allocation_ptr_v1(struct nvgpu_pmu *pmu, | ||
357 | void **pmu_alloc_ptr, void *assign_ptr) | ||
358 | { | ||
359 | struct pmu_allocation_v1 **pmu_a_ptr = | ||
360 | (struct pmu_allocation_v1 **)pmu_alloc_ptr; | ||
361 | |||
362 | *pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr; | ||
363 | } | ||
364 | |||
365 | static void set_pmu_allocation_ptr_v0(struct nvgpu_pmu *pmu, | ||
366 | void **pmu_alloc_ptr, void *assign_ptr) | ||
367 | { | ||
368 | struct pmu_allocation_v0 **pmu_a_ptr = | ||
369 | (struct pmu_allocation_v0 **)pmu_alloc_ptr; | ||
370 | |||
371 | *pmu_a_ptr = (struct pmu_allocation_v0 *)assign_ptr; | ||
372 | } | ||
373 | |||
374 | static void pmu_allocation_set_dmem_size_v3(struct nvgpu_pmu *pmu, | ||
375 | void *pmu_alloc_ptr, u16 size) | ||
376 | { | ||
377 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
378 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
379 | |||
380 | pmu_a_ptr->alloc.dmem.size = size; | ||
381 | } | ||
382 | |||
383 | static void pmu_allocation_set_dmem_size_v2(struct nvgpu_pmu *pmu, | ||
384 | void *pmu_alloc_ptr, u16 size) | ||
385 | { | ||
386 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
387 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
388 | |||
389 | pmu_a_ptr->alloc.dmem.size = size; | ||
390 | } | ||
391 | |||
392 | static void pmu_allocation_set_dmem_size_v1(struct nvgpu_pmu *pmu, | ||
393 | void *pmu_alloc_ptr, u16 size) | ||
394 | { | ||
395 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
396 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
397 | |||
398 | pmu_a_ptr->alloc.dmem.size = size; | ||
399 | } | ||
400 | |||
401 | static void pmu_allocation_set_dmem_size_v0(struct nvgpu_pmu *pmu, | ||
402 | void *pmu_alloc_ptr, u16 size) | ||
403 | { | ||
404 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
405 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
406 | |||
407 | pmu_a_ptr->alloc.dmem.size = size; | ||
408 | } | ||
409 | |||
410 | static u16 pmu_allocation_get_dmem_size_v3(struct nvgpu_pmu *pmu, | ||
411 | void *pmu_alloc_ptr) | ||
412 | { | ||
413 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
414 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
415 | |||
416 | return pmu_a_ptr->alloc.dmem.size; | ||
417 | } | ||
418 | |||
419 | static u16 pmu_allocation_get_dmem_size_v2(struct nvgpu_pmu *pmu, | ||
420 | void *pmu_alloc_ptr) | ||
421 | { | ||
422 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
423 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
424 | |||
425 | return pmu_a_ptr->alloc.dmem.size; | ||
426 | } | ||
427 | |||
428 | static u16 pmu_allocation_get_dmem_size_v1(struct nvgpu_pmu *pmu, | ||
429 | void *pmu_alloc_ptr) | ||
430 | { | ||
431 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
432 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
433 | |||
434 | return pmu_a_ptr->alloc.dmem.size; | ||
435 | } | ||
436 | |||
437 | static u16 pmu_allocation_get_dmem_size_v0(struct nvgpu_pmu *pmu, | ||
438 | void *pmu_alloc_ptr) | ||
439 | { | ||
440 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
441 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
442 | |||
443 | return pmu_a_ptr->alloc.dmem.size; | ||
444 | } | ||
445 | |||
446 | static u32 pmu_allocation_get_dmem_offset_v3(struct nvgpu_pmu *pmu, | ||
447 | void *pmu_alloc_ptr) | ||
448 | { | ||
449 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
450 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
451 | |||
452 | return pmu_a_ptr->alloc.dmem.offset; | ||
453 | } | ||
454 | |||
455 | static u32 pmu_allocation_get_dmem_offset_v2(struct nvgpu_pmu *pmu, | ||
456 | void *pmu_alloc_ptr) | ||
457 | { | ||
458 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
459 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
460 | |||
461 | return pmu_a_ptr->alloc.dmem.offset; | ||
462 | } | ||
463 | |||
464 | static u32 pmu_allocation_get_dmem_offset_v1(struct nvgpu_pmu *pmu, | ||
465 | void *pmu_alloc_ptr) | ||
466 | { | ||
467 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
468 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
469 | |||
470 | return pmu_a_ptr->alloc.dmem.offset; | ||
471 | } | ||
472 | |||
473 | static u32 pmu_allocation_get_dmem_offset_v0(struct nvgpu_pmu *pmu, | ||
474 | void *pmu_alloc_ptr) | ||
475 | { | ||
476 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
477 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
478 | |||
479 | return pmu_a_ptr->alloc.dmem.offset; | ||
480 | } | ||
481 | |||
482 | static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct nvgpu_pmu *pmu, | ||
483 | void *pmu_alloc_ptr) | ||
484 | { | ||
485 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
486 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
487 | |||
488 | return &pmu_a_ptr->alloc.dmem.offset; | ||
489 | } | ||
490 | |||
491 | static void *pmu_allocation_get_fb_addr_v3( | ||
492 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) | ||
493 | { | ||
494 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
495 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
496 | |||
497 | return (void *)&pmu_a_ptr->alloc.fb; | ||
498 | } | ||
499 | |||
500 | static u32 pmu_allocation_get_fb_size_v3( | ||
501 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) | ||
502 | { | ||
503 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
504 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
505 | |||
506 | return sizeof(pmu_a_ptr->alloc.fb); | ||
507 | } | ||
508 | |||
509 | static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct nvgpu_pmu *pmu, | ||
510 | void *pmu_alloc_ptr) | ||
511 | { | ||
512 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
513 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
514 | |||
515 | return &pmu_a_ptr->alloc.dmem.offset; | ||
516 | } | ||
517 | |||
518 | static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct nvgpu_pmu *pmu, | ||
519 | void *pmu_alloc_ptr) | ||
520 | { | ||
521 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
522 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
523 | |||
524 | return &pmu_a_ptr->alloc.dmem.offset; | ||
525 | } | ||
526 | |||
527 | static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct nvgpu_pmu *pmu, | ||
528 | void *pmu_alloc_ptr) | ||
529 | { | ||
530 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
531 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
532 | |||
533 | return &pmu_a_ptr->alloc.dmem.offset; | ||
534 | } | ||
535 | |||
536 | static void pmu_allocation_set_dmem_offset_v3(struct nvgpu_pmu *pmu, | ||
537 | void *pmu_alloc_ptr, u32 offset) | ||
538 | { | ||
539 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
540 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
541 | |||
542 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
543 | } | ||
544 | |||
545 | static void pmu_allocation_set_dmem_offset_v2(struct nvgpu_pmu *pmu, | ||
546 | void *pmu_alloc_ptr, u32 offset) | ||
547 | { | ||
548 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
549 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
550 | |||
551 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
552 | } | ||
553 | |||
554 | static void pmu_allocation_set_dmem_offset_v1(struct nvgpu_pmu *pmu, | ||
555 | void *pmu_alloc_ptr, u32 offset) | ||
556 | { | ||
557 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
558 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
559 | |||
560 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
561 | } | ||
562 | |||
563 | static void pmu_allocation_set_dmem_offset_v0(struct nvgpu_pmu *pmu, | ||
564 | void *pmu_alloc_ptr, u32 offset) | ||
565 | { | ||
566 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
567 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
568 | |||
569 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
570 | } | ||
571 | |||
572 | static void *get_pmu_msg_pmu_init_msg_ptr_v4(struct pmu_init_msg *init) | ||
573 | { | ||
574 | return (void *)(&(init->pmu_init_v4)); | ||
575 | } | ||
576 | |||
577 | static void *get_pmu_msg_pmu_init_msg_ptr_v3(struct pmu_init_msg *init) | ||
578 | { | ||
579 | return (void *)(&(init->pmu_init_v3)); | ||
580 | } | ||
581 | |||
582 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v4(union pmu_init_msg_pmu *init_msg) | ||
583 | { | ||
584 | struct pmu_init_msg_pmu_v4 *init = | ||
585 | (struct pmu_init_msg_pmu_v4 *)(&init_msg->v4); | ||
586 | |||
587 | return init->sw_managed_area_offset; | ||
588 | } | ||
589 | |||
590 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v3(union pmu_init_msg_pmu *init_msg) | ||
591 | { | ||
592 | struct pmu_init_msg_pmu_v3 *init = | ||
593 | (struct pmu_init_msg_pmu_v3 *)(&init_msg->v3); | ||
594 | |||
595 | return init->sw_managed_area_offset; | ||
596 | } | ||
597 | |||
598 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v4(union pmu_init_msg_pmu *init_msg) | ||
599 | { | ||
600 | struct pmu_init_msg_pmu_v4 *init = | ||
601 | (struct pmu_init_msg_pmu_v4 *)(&init_msg->v4); | ||
602 | |||
603 | return init->sw_managed_area_size; | ||
604 | } | ||
605 | |||
606 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v3(union pmu_init_msg_pmu *init_msg) | ||
607 | { | ||
608 | struct pmu_init_msg_pmu_v3 *init = | ||
609 | (struct pmu_init_msg_pmu_v3 *)(&init_msg->v3); | ||
610 | |||
611 | return init->sw_managed_area_size; | ||
612 | } | ||
613 | |||
614 | static void *get_pmu_msg_pmu_init_msg_ptr_v2(struct pmu_init_msg *init) | ||
615 | { | ||
616 | return (void *)(&(init->pmu_init_v2)); | ||
617 | } | ||
618 | |||
619 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v2(union pmu_init_msg_pmu *init_msg) | ||
620 | { | ||
621 | struct pmu_init_msg_pmu_v2 *init = | ||
622 | (struct pmu_init_msg_pmu_v2 *)(&init_msg->v1); | ||
623 | |||
624 | return init->sw_managed_area_offset; | ||
625 | } | ||
626 | |||
627 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v2(union pmu_init_msg_pmu *init_msg) | ||
628 | { | ||
629 | struct pmu_init_msg_pmu_v2 *init = | ||
630 | (struct pmu_init_msg_pmu_v2 *)(&init_msg->v1); | ||
631 | |||
632 | return init->sw_managed_area_size; | ||
633 | } | ||
634 | |||
635 | static void *get_pmu_msg_pmu_init_msg_ptr_v1(struct pmu_init_msg *init) | ||
636 | { | ||
637 | return (void *)(&(init->pmu_init_v1)); | ||
638 | } | ||
639 | |||
640 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v1(union pmu_init_msg_pmu *init_msg) | ||
641 | { | ||
642 | struct pmu_init_msg_pmu_v1 *init = | ||
643 | (struct pmu_init_msg_pmu_v1 *)(&init_msg->v1); | ||
644 | |||
645 | return init->sw_managed_area_offset; | ||
646 | } | ||
647 | |||
648 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v1(union pmu_init_msg_pmu *init_msg) | ||
649 | { | ||
650 | struct pmu_init_msg_pmu_v1 *init = | ||
651 | (struct pmu_init_msg_pmu_v1 *)(&init_msg->v1); | ||
652 | |||
653 | return init->sw_managed_area_size; | ||
654 | } | ||
655 | |||
656 | static void *get_pmu_msg_pmu_init_msg_ptr_v0(struct pmu_init_msg *init) | ||
657 | { | ||
658 | return (void *)(&(init->pmu_init_v0)); | ||
659 | } | ||
660 | |||
661 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v0(union pmu_init_msg_pmu *init_msg) | ||
662 | { | ||
663 | struct pmu_init_msg_pmu_v0 *init = | ||
664 | (struct pmu_init_msg_pmu_v0 *)(&init_msg->v0); | ||
665 | |||
666 | return init->sw_managed_area_offset; | ||
667 | } | ||
668 | |||
669 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v0(union pmu_init_msg_pmu *init_msg) | ||
670 | { | ||
671 | struct pmu_init_msg_pmu_v0 *init = | ||
672 | (struct pmu_init_msg_pmu_v0 *)(&init_msg->v0); | ||
673 | |||
674 | return init->sw_managed_area_size; | ||
675 | } | ||
676 | |||
677 | static u32 get_pmu_perfmon_cmd_start_size_v3(void) | ||
678 | { | ||
679 | return sizeof(struct pmu_perfmon_cmd_start_v3); | ||
680 | } | ||
681 | |||
682 | static u32 get_pmu_perfmon_cmd_start_size_v2(void) | ||
683 | { | ||
684 | return sizeof(struct pmu_perfmon_cmd_start_v2); | ||
685 | } | ||
686 | |||
687 | static u32 get_pmu_perfmon_cmd_start_size_v1(void) | ||
688 | { | ||
689 | return sizeof(struct pmu_perfmon_cmd_start_v1); | ||
690 | } | ||
691 | |||
692 | static u32 get_pmu_perfmon_cmd_start_size_v0(void) | ||
693 | { | ||
694 | return sizeof(struct pmu_perfmon_cmd_start_v0); | ||
695 | } | ||
696 | |||
697 | static int get_perfmon_cmd_start_offsetofvar_v3( | ||
698 | enum pmu_perfmon_cmd_start_fields field) | ||
699 | { | ||
700 | switch (field) { | ||
701 | case COUNTER_ALLOC: | ||
702 | return offsetof(struct pmu_perfmon_cmd_start_v3, | ||
703 | counter_alloc); | ||
704 | default: | ||
705 | return -EINVAL; | ||
706 | } | ||
707 | |||
708 | return 0; | ||
709 | } | ||
710 | |||
711 | static int get_perfmon_cmd_start_offsetofvar_v2( | ||
712 | enum pmu_perfmon_cmd_start_fields field) | ||
713 | { | ||
714 | switch (field) { | ||
715 | case COUNTER_ALLOC: | ||
716 | return offsetof(struct pmu_perfmon_cmd_start_v2, | ||
717 | counter_alloc); | ||
718 | default: | ||
719 | return -EINVAL; | ||
720 | } | ||
721 | |||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | static int get_perfmon_cmd_start_offsetofvar_v1( | ||
726 | enum pmu_perfmon_cmd_start_fields field) | ||
727 | { | ||
728 | switch (field) { | ||
729 | case COUNTER_ALLOC: | ||
730 | return offsetof(struct pmu_perfmon_cmd_start_v1, | ||
731 | counter_alloc); | ||
732 | default: | ||
733 | return -EINVAL; | ||
734 | } | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static int get_perfmon_cmd_start_offsetofvar_v0( | ||
740 | enum pmu_perfmon_cmd_start_fields field) | ||
741 | { | ||
742 | switch (field) { | ||
743 | case COUNTER_ALLOC: | ||
744 | return offsetof(struct pmu_perfmon_cmd_start_v0, | ||
745 | counter_alloc); | ||
746 | default: | ||
747 | return -EINVAL; | ||
748 | } | ||
749 | |||
750 | return 0; | ||
751 | } | ||
752 | |||
753 | static u32 get_pmu_perfmon_cmd_init_size_v3(void) | ||
754 | { | ||
755 | return sizeof(struct pmu_perfmon_cmd_init_v3); | ||
756 | } | ||
757 | |||
758 | static u32 get_pmu_perfmon_cmd_init_size_v2(void) | ||
759 | { | ||
760 | return sizeof(struct pmu_perfmon_cmd_init_v2); | ||
761 | } | ||
762 | |||
763 | static u32 get_pmu_perfmon_cmd_init_size_v1(void) | ||
764 | { | ||
765 | return sizeof(struct pmu_perfmon_cmd_init_v1); | ||
766 | } | ||
767 | |||
768 | static u32 get_pmu_perfmon_cmd_init_size_v0(void) | ||
769 | { | ||
770 | return sizeof(struct pmu_perfmon_cmd_init_v0); | ||
771 | } | ||
772 | |||
773 | static int get_perfmon_cmd_init_offsetofvar_v3( | ||
774 | enum pmu_perfmon_cmd_start_fields field) | ||
775 | { | ||
776 | switch (field) { | ||
777 | case COUNTER_ALLOC: | ||
778 | return offsetof(struct pmu_perfmon_cmd_init_v3, | ||
779 | counter_alloc); | ||
780 | default: | ||
781 | return -EINVAL; | ||
782 | } | ||
783 | |||
784 | return 0; | ||
785 | } | ||
786 | |||
787 | static int get_perfmon_cmd_init_offsetofvar_v2( | ||
788 | enum pmu_perfmon_cmd_start_fields field) | ||
789 | { | ||
790 | switch (field) { | ||
791 | case COUNTER_ALLOC: | ||
792 | return offsetof(struct pmu_perfmon_cmd_init_v2, | ||
793 | counter_alloc); | ||
794 | default: | ||
795 | return -EINVAL; | ||
796 | } | ||
797 | |||
798 | return 0; | ||
799 | } | ||
800 | |||
801 | static int get_perfmon_cmd_init_offsetofvar_v1( | ||
802 | enum pmu_perfmon_cmd_start_fields field) | ||
803 | { | ||
804 | switch (field) { | ||
805 | case COUNTER_ALLOC: | ||
806 | return offsetof(struct pmu_perfmon_cmd_init_v1, | ||
807 | counter_alloc); | ||
808 | default: | ||
809 | return -EINVAL; | ||
810 | } | ||
811 | |||
812 | return 0; | ||
813 | } | ||
814 | |||
815 | static int get_perfmon_cmd_init_offsetofvar_v0( | ||
816 | enum pmu_perfmon_cmd_start_fields field) | ||
817 | { | ||
818 | switch (field) { | ||
819 | case COUNTER_ALLOC: | ||
820 | return offsetof(struct pmu_perfmon_cmd_init_v0, | ||
821 | counter_alloc); | ||
822 | default: | ||
823 | return -EINVAL; | ||
824 | } | ||
825 | |||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | static void perfmon_start_set_cmd_type_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
830 | { | ||
831 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
832 | |||
833 | start->cmd_type = value; | ||
834 | } | ||
835 | |||
836 | static void perfmon_start_set_cmd_type_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
837 | { | ||
838 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
839 | |||
840 | start->cmd_type = value; | ||
841 | } | ||
842 | |||
843 | static void perfmon_start_set_cmd_type_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
844 | { | ||
845 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
846 | |||
847 | start->cmd_type = value; | ||
848 | } | ||
849 | |||
850 | static void perfmon_start_set_cmd_type_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
851 | { | ||
852 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
853 | |||
854 | start->cmd_type = value; | ||
855 | } | ||
856 | |||
857 | static void perfmon_start_set_group_id_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
858 | { | ||
859 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
860 | |||
861 | start->group_id = value; | ||
862 | } | ||
863 | |||
864 | static void perfmon_start_set_group_id_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
865 | { | ||
866 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
867 | |||
868 | start->group_id = value; | ||
869 | } | ||
870 | |||
871 | static void perfmon_start_set_group_id_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
872 | { | ||
873 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
874 | |||
875 | start->group_id = value; | ||
876 | } | ||
877 | |||
878 | static void perfmon_start_set_group_id_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
879 | { | ||
880 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
881 | |||
882 | start->group_id = value; | ||
883 | } | ||
884 | |||
885 | static void perfmon_start_set_state_id_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
886 | { | ||
887 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
888 | |||
889 | start->state_id = value; | ||
890 | } | ||
891 | |||
892 | static void perfmon_start_set_state_id_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
893 | { | ||
894 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
895 | |||
896 | start->state_id = value; | ||
897 | } | ||
898 | |||
899 | static void perfmon_start_set_state_id_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
900 | { | ||
901 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
902 | |||
903 | start->state_id = value; | ||
904 | } | ||
905 | |||
906 | static void perfmon_start_set_state_id_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
907 | { | ||
908 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
909 | |||
910 | start->state_id = value; | ||
911 | } | ||
912 | |||
913 | static void perfmon_start_set_flags_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
914 | { | ||
915 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
916 | |||
917 | start->flags = value; | ||
918 | } | ||
919 | |||
920 | static void perfmon_start_set_flags_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
921 | { | ||
922 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
923 | |||
924 | start->flags = value; | ||
925 | } | ||
926 | |||
927 | static void perfmon_start_set_flags_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
928 | { | ||
929 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
930 | |||
931 | start->flags = value; | ||
932 | } | ||
933 | |||
934 | static void perfmon_start_set_flags_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
935 | { | ||
936 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
937 | |||
938 | start->flags = value; | ||
939 | } | ||
940 | |||
941 | static u8 perfmon_start_get_flags_v3(struct pmu_perfmon_cmd *pc) | ||
942 | { | ||
943 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
944 | |||
945 | return start->flags; | ||
946 | } | ||
947 | |||
948 | static u8 perfmon_start_get_flags_v2(struct pmu_perfmon_cmd *pc) | ||
949 | { | ||
950 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
951 | |||
952 | return start->flags; | ||
953 | } | ||
954 | |||
955 | static u8 perfmon_start_get_flags_v1(struct pmu_perfmon_cmd *pc) | ||
956 | { | ||
957 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
958 | |||
959 | return start->flags; | ||
960 | } | ||
961 | |||
962 | static u8 perfmon_start_get_flags_v0(struct pmu_perfmon_cmd *pc) | ||
963 | { | ||
964 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
965 | |||
966 | return start->flags; | ||
967 | } | ||
968 | |||
969 | static void perfmon_cmd_init_set_sample_buffer_v3(struct pmu_perfmon_cmd *pc, | ||
970 | u16 value) | ||
971 | { | ||
972 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
973 | |||
974 | init->sample_buffer = value; | ||
975 | } | ||
976 | |||
977 | static void perfmon_cmd_init_set_sample_buffer_v2(struct pmu_perfmon_cmd *pc, | ||
978 | u16 value) | ||
979 | { | ||
980 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
981 | |||
982 | init->sample_buffer = value; | ||
983 | } | ||
984 | |||
985 | |||
986 | static void perfmon_cmd_init_set_sample_buffer_v1(struct pmu_perfmon_cmd *pc, | ||
987 | u16 value) | ||
988 | { | ||
989 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
990 | |||
991 | init->sample_buffer = value; | ||
992 | } | ||
993 | |||
994 | static void perfmon_cmd_init_set_sample_buffer_v0(struct pmu_perfmon_cmd *pc, | ||
995 | u16 value) | ||
996 | { | ||
997 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
998 | |||
999 | init->sample_buffer = value; | ||
1000 | } | ||
1001 | |||
1002 | static void perfmon_cmd_init_set_dec_cnt_v3(struct pmu_perfmon_cmd *pc, | ||
1003 | u8 value) | ||
1004 | { | ||
1005 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1006 | |||
1007 | init->to_decrease_count = value; | ||
1008 | } | ||
1009 | |||
1010 | static void perfmon_cmd_init_set_dec_cnt_v2(struct pmu_perfmon_cmd *pc, | ||
1011 | u8 value) | ||
1012 | { | ||
1013 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1014 | |||
1015 | init->to_decrease_count = value; | ||
1016 | } | ||
1017 | |||
1018 | static void perfmon_cmd_init_set_dec_cnt_v1(struct pmu_perfmon_cmd *pc, | ||
1019 | u8 value) | ||
1020 | { | ||
1021 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1022 | |||
1023 | init->to_decrease_count = value; | ||
1024 | } | ||
1025 | |||
1026 | static void perfmon_cmd_init_set_dec_cnt_v0(struct pmu_perfmon_cmd *pc, | ||
1027 | u8 value) | ||
1028 | { | ||
1029 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1030 | |||
1031 | init->to_decrease_count = value; | ||
1032 | } | ||
1033 | |||
1034 | static void perfmon_cmd_init_set_base_cnt_id_v3(struct pmu_perfmon_cmd *pc, | ||
1035 | u8 value) | ||
1036 | { | ||
1037 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1038 | |||
1039 | init->base_counter_id = value; | ||
1040 | } | ||
1041 | |||
1042 | static void perfmon_cmd_init_set_base_cnt_id_v2(struct pmu_perfmon_cmd *pc, | ||
1043 | u8 value) | ||
1044 | { | ||
1045 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1046 | |||
1047 | init->base_counter_id = value; | ||
1048 | } | ||
1049 | |||
1050 | static void perfmon_cmd_init_set_base_cnt_id_v1(struct pmu_perfmon_cmd *pc, | ||
1051 | u8 value) | ||
1052 | { | ||
1053 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1054 | |||
1055 | init->base_counter_id = value; | ||
1056 | } | ||
1057 | |||
1058 | static void perfmon_cmd_init_set_base_cnt_id_v0(struct pmu_perfmon_cmd *pc, | ||
1059 | u8 value) | ||
1060 | { | ||
1061 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1062 | |||
1063 | init->base_counter_id = value; | ||
1064 | } | ||
1065 | |||
1066 | static void perfmon_cmd_init_set_samp_period_us_v3(struct pmu_perfmon_cmd *pc, | ||
1067 | u32 value) | ||
1068 | { | ||
1069 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1070 | |||
1071 | init->sample_period_us = value; | ||
1072 | } | ||
1073 | |||
1074 | static void perfmon_cmd_init_set_samp_period_us_v2(struct pmu_perfmon_cmd *pc, | ||
1075 | u32 value) | ||
1076 | { | ||
1077 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1078 | |||
1079 | init->sample_period_us = value; | ||
1080 | } | ||
1081 | |||
1082 | static void perfmon_cmd_init_set_samp_period_us_v1(struct pmu_perfmon_cmd *pc, | ||
1083 | u32 value) | ||
1084 | { | ||
1085 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1086 | |||
1087 | init->sample_period_us = value; | ||
1088 | } | ||
1089 | |||
1090 | static void perfmon_cmd_init_set_samp_period_us_v0(struct pmu_perfmon_cmd *pc, | ||
1091 | u32 value) | ||
1092 | { | ||
1093 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1094 | |||
1095 | init->sample_period_us = value; | ||
1096 | } | ||
1097 | |||
1098 | static void perfmon_cmd_init_set_num_cnt_v3(struct pmu_perfmon_cmd *pc, | ||
1099 | u8 value) | ||
1100 | { | ||
1101 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1102 | |||
1103 | init->num_counters = value; | ||
1104 | } | ||
1105 | |||
1106 | static void perfmon_cmd_init_set_num_cnt_v2(struct pmu_perfmon_cmd *pc, | ||
1107 | u8 value) | ||
1108 | { | ||
1109 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1110 | |||
1111 | init->num_counters = value; | ||
1112 | } | ||
1113 | |||
1114 | static void perfmon_cmd_init_set_num_cnt_v1(struct pmu_perfmon_cmd *pc, | ||
1115 | u8 value) | ||
1116 | { | ||
1117 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1118 | |||
1119 | init->num_counters = value; | ||
1120 | } | ||
1121 | |||
1122 | static void perfmon_cmd_init_set_num_cnt_v0(struct pmu_perfmon_cmd *pc, | ||
1123 | u8 value) | ||
1124 | { | ||
1125 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1126 | |||
1127 | init->num_counters = value; | ||
1128 | } | ||
1129 | |||
1130 | static void perfmon_cmd_init_set_mov_avg_v3(struct pmu_perfmon_cmd *pc, | ||
1131 | u8 value) | ||
1132 | { | ||
1133 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1134 | |||
1135 | init->samples_in_moving_avg = value; | ||
1136 | } | ||
1137 | |||
1138 | static void perfmon_cmd_init_set_mov_avg_v2(struct pmu_perfmon_cmd *pc, | ||
1139 | u8 value) | ||
1140 | { | ||
1141 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1142 | |||
1143 | init->samples_in_moving_avg = value; | ||
1144 | } | ||
1145 | |||
1146 | static void perfmon_cmd_init_set_mov_avg_v1(struct pmu_perfmon_cmd *pc, | ||
1147 | u8 value) | ||
1148 | { | ||
1149 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1150 | |||
1151 | init->samples_in_moving_avg = value; | ||
1152 | } | ||
1153 | |||
1154 | static void perfmon_cmd_init_set_mov_avg_v0(struct pmu_perfmon_cmd *pc, | ||
1155 | u8 value) | ||
1156 | { | ||
1157 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1158 | |||
1159 | init->samples_in_moving_avg = value; | ||
1160 | } | ||
1161 | |||
1162 | static void get_pmu_init_msg_pmu_queue_params_v0(struct pmu_queue *queue, | ||
1163 | u32 id, void *pmu_init_msg) | ||
1164 | { | ||
1165 | struct pmu_init_msg_pmu_v0 *init = | ||
1166 | (struct pmu_init_msg_pmu_v0 *)pmu_init_msg; | ||
1167 | |||
1168 | queue->index = init->queue_info[id].index; | ||
1169 | queue->offset = init->queue_info[id].offset; | ||
1170 | queue->size = init->queue_info[id].size; | ||
1171 | } | ||
1172 | |||
1173 | static void get_pmu_init_msg_pmu_queue_params_v1(struct pmu_queue *queue, | ||
1174 | u32 id, void *pmu_init_msg) | ||
1175 | { | ||
1176 | struct pmu_init_msg_pmu_v1 *init = | ||
1177 | (struct pmu_init_msg_pmu_v1 *)pmu_init_msg; | ||
1178 | |||
1179 | queue->index = init->queue_info[id].index; | ||
1180 | queue->offset = init->queue_info[id].offset; | ||
1181 | queue->size = init->queue_info[id].size; | ||
1182 | } | ||
1183 | |||
1184 | static void get_pmu_init_msg_pmu_queue_params_v2(struct pmu_queue *queue, | ||
1185 | u32 id, void *pmu_init_msg) | ||
1186 | { | ||
1187 | struct pmu_init_msg_pmu_v2 *init = | ||
1188 | (struct pmu_init_msg_pmu_v2 *)pmu_init_msg; | ||
1189 | |||
1190 | queue->index = init->queue_info[id].index; | ||
1191 | queue->offset = init->queue_info[id].offset; | ||
1192 | queue->size = init->queue_info[id].size; | ||
1193 | } | ||
1194 | |||
1195 | static void get_pmu_init_msg_pmu_queue_params_v4(struct pmu_queue *queue, | ||
1196 | u32 id, void *pmu_init_msg) | ||
1197 | { | ||
1198 | struct pmu_init_msg_pmu_v4 *init = pmu_init_msg; | ||
1199 | u32 current_ptr = 0; | ||
1200 | u8 i; | ||
1201 | u8 tmp_id = id; | ||
1202 | |||
1203 | if (tmp_id == PMU_COMMAND_QUEUE_HPQ) | ||
1204 | tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; | ||
1205 | else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) | ||
1206 | tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; | ||
1207 | else if (tmp_id == PMU_MESSAGE_QUEUE) | ||
1208 | tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; | ||
1209 | else | ||
1210 | return; | ||
1211 | |||
1212 | queue->index = init->queue_index[tmp_id]; | ||
1213 | queue->size = init->queue_size[tmp_id]; | ||
1214 | if (tmp_id != 0) { | ||
1215 | for (i = 0 ; i < tmp_id; i++) | ||
1216 | current_ptr += init->queue_size[i]; | ||
1217 | } | ||
1218 | queue->offset = init->queue_offset + current_ptr; | ||
1219 | } | ||
1220 | |||
1221 | static void get_pmu_init_msg_pmu_queue_params_v3(struct pmu_queue *queue, | ||
1222 | u32 id, void *pmu_init_msg) | ||
1223 | { | ||
1224 | struct pmu_init_msg_pmu_v3 *init = | ||
1225 | (struct pmu_init_msg_pmu_v3 *)pmu_init_msg; | ||
1226 | u32 current_ptr = 0; | ||
1227 | u8 i; | ||
1228 | u8 tmp_id = id; | ||
1229 | |||
1230 | if (tmp_id == PMU_COMMAND_QUEUE_HPQ) | ||
1231 | tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; | ||
1232 | else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) | ||
1233 | tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; | ||
1234 | else if (tmp_id == PMU_MESSAGE_QUEUE) | ||
1235 | tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; | ||
1236 | else | ||
1237 | return; | ||
1238 | queue->index = init->queue_index[tmp_id]; | ||
1239 | queue->size = init->queue_size[tmp_id]; | ||
1240 | if (tmp_id != 0) { | ||
1241 | for (i = 0 ; i < tmp_id; i++) | ||
1242 | current_ptr += init->queue_size[i]; | ||
1243 | } | ||
1244 | queue->offset = init->queue_offset + current_ptr; | ||
1245 | } | ||
1246 | |||
1247 | static void *get_pmu_sequence_in_alloc_ptr_v3(struct pmu_sequence *seq) | ||
1248 | { | ||
1249 | return (void *)(&seq->in_v3); | ||
1250 | } | ||
1251 | |||
1252 | static void *get_pmu_sequence_in_alloc_ptr_v1(struct pmu_sequence *seq) | ||
1253 | { | ||
1254 | return (void *)(&seq->in_v1); | ||
1255 | } | ||
1256 | |||
1257 | static void *get_pmu_sequence_in_alloc_ptr_v0(struct pmu_sequence *seq) | ||
1258 | { | ||
1259 | return (void *)(&seq->in_v0); | ||
1260 | } | ||
1261 | |||
1262 | static void *get_pmu_sequence_out_alloc_ptr_v3(struct pmu_sequence *seq) | ||
1263 | { | ||
1264 | return (void *)(&seq->out_v3); | ||
1265 | } | ||
1266 | |||
1267 | static void *get_pmu_sequence_out_alloc_ptr_v1(struct pmu_sequence *seq) | ||
1268 | { | ||
1269 | return (void *)(&seq->out_v1); | ||
1270 | } | ||
1271 | |||
1272 | static void *get_pmu_sequence_out_alloc_ptr_v0(struct pmu_sequence *seq) | ||
1273 | { | ||
1274 | return (void *)(&seq->out_v0); | ||
1275 | } | ||
1276 | |||
1277 | static u8 pg_cmd_eng_buf_load_size_v0(struct pmu_pg_cmd *pg) | ||
1278 | { | ||
1279 | return sizeof(pg->eng_buf_load_v0); | ||
1280 | } | ||
1281 | |||
1282 | static u8 pg_cmd_eng_buf_load_size_v1(struct pmu_pg_cmd *pg) | ||
1283 | { | ||
1284 | return sizeof(pg->eng_buf_load_v1); | ||
1285 | } | ||
1286 | |||
1287 | static u8 pg_cmd_eng_buf_load_size_v2(struct pmu_pg_cmd *pg) | ||
1288 | { | ||
1289 | return sizeof(pg->eng_buf_load_v2); | ||
1290 | } | ||
1291 | |||
1292 | static void pg_cmd_eng_buf_load_set_cmd_type_v0(struct pmu_pg_cmd *pg, | ||
1293 | u8 value) | ||
1294 | { | ||
1295 | pg->eng_buf_load_v0.cmd_type = value; | ||
1296 | } | ||
1297 | |||
1298 | static void pg_cmd_eng_buf_load_set_cmd_type_v1(struct pmu_pg_cmd *pg, | ||
1299 | u8 value) | ||
1300 | { | ||
1301 | pg->eng_buf_load_v1.cmd_type = value; | ||
1302 | } | ||
1303 | |||
1304 | static void pg_cmd_eng_buf_load_set_cmd_type_v2(struct pmu_pg_cmd *pg, | ||
1305 | u8 value) | ||
1306 | { | ||
1307 | pg->eng_buf_load_v2.cmd_type = value; | ||
1308 | } | ||
1309 | |||
1310 | static void pg_cmd_eng_buf_load_set_engine_id_v0(struct pmu_pg_cmd *pg, | ||
1311 | u8 value) | ||
1312 | { | ||
1313 | pg->eng_buf_load_v0.engine_id = value; | ||
1314 | } | ||
1315 | static void pg_cmd_eng_buf_load_set_engine_id_v1(struct pmu_pg_cmd *pg, | ||
1316 | u8 value) | ||
1317 | { | ||
1318 | pg->eng_buf_load_v1.engine_id = value; | ||
1319 | } | ||
1320 | static void pg_cmd_eng_buf_load_set_engine_id_v2(struct pmu_pg_cmd *pg, | ||
1321 | u8 value) | ||
1322 | { | ||
1323 | pg->eng_buf_load_v2.engine_id = value; | ||
1324 | } | ||
1325 | static void pg_cmd_eng_buf_load_set_buf_idx_v0(struct pmu_pg_cmd *pg, | ||
1326 | u8 value) | ||
1327 | { | ||
1328 | pg->eng_buf_load_v0.buf_idx = value; | ||
1329 | } | ||
1330 | static void pg_cmd_eng_buf_load_set_buf_idx_v1(struct pmu_pg_cmd *pg, | ||
1331 | u8 value) | ||
1332 | { | ||
1333 | pg->eng_buf_load_v1.buf_idx = value; | ||
1334 | } | ||
1335 | static void pg_cmd_eng_buf_load_set_buf_idx_v2(struct pmu_pg_cmd *pg, | ||
1336 | u8 value) | ||
1337 | { | ||
1338 | pg->eng_buf_load_v2.buf_idx = value; | ||
1339 | } | ||
1340 | |||
1341 | static void pg_cmd_eng_buf_load_set_pad_v0(struct pmu_pg_cmd *pg, | ||
1342 | u8 value) | ||
1343 | { | ||
1344 | pg->eng_buf_load_v0.pad = value; | ||
1345 | } | ||
1346 | static void pg_cmd_eng_buf_load_set_pad_v1(struct pmu_pg_cmd *pg, | ||
1347 | u8 value) | ||
1348 | { | ||
1349 | pg->eng_buf_load_v1.pad = value; | ||
1350 | } | ||
1351 | static void pg_cmd_eng_buf_load_set_pad_v2(struct pmu_pg_cmd *pg, | ||
1352 | u8 value) | ||
1353 | { | ||
1354 | pg->eng_buf_load_v2.pad = value; | ||
1355 | } | ||
1356 | |||
1357 | static void pg_cmd_eng_buf_load_set_buf_size_v0(struct pmu_pg_cmd *pg, | ||
1358 | u16 value) | ||
1359 | { | ||
1360 | pg->eng_buf_load_v0.buf_size = value; | ||
1361 | } | ||
1362 | static void pg_cmd_eng_buf_load_set_buf_size_v1(struct pmu_pg_cmd *pg, | ||
1363 | u16 value) | ||
1364 | { | ||
1365 | pg->eng_buf_load_v1.dma_desc.dma_size = value; | ||
1366 | } | ||
1367 | static void pg_cmd_eng_buf_load_set_buf_size_v2(struct pmu_pg_cmd *pg, | ||
1368 | u16 value) | ||
1369 | { | ||
1370 | pg->eng_buf_load_v2.dma_desc.params = value; | ||
1371 | } | ||
1372 | |||
1373 | static void pg_cmd_eng_buf_load_set_dma_base_v0(struct pmu_pg_cmd *pg, | ||
1374 | u32 value) | ||
1375 | { | ||
1376 | pg->eng_buf_load_v0.dma_base = (value >> 8); | ||
1377 | } | ||
1378 | static void pg_cmd_eng_buf_load_set_dma_base_v1(struct pmu_pg_cmd *pg, | ||
1379 | u32 value) | ||
1380 | { | ||
1381 | pg->eng_buf_load_v1.dma_desc.dma_addr.lo |= u64_lo32(value); | ||
1382 | pg->eng_buf_load_v1.dma_desc.dma_addr.hi |= u64_hi32(value); | ||
1383 | } | ||
1384 | static void pg_cmd_eng_buf_load_set_dma_base_v2(struct pmu_pg_cmd *pg, | ||
1385 | u32 value) | ||
1386 | { | ||
1387 | pg->eng_buf_load_v2.dma_desc.address.lo = u64_lo32(value); | ||
1388 | pg->eng_buf_load_v2.dma_desc.address.hi = u64_lo32(value); | ||
1389 | } | ||
1390 | |||
1391 | static void pg_cmd_eng_buf_load_set_dma_offset_v0(struct pmu_pg_cmd *pg, | ||
1392 | u8 value) | ||
1393 | { | ||
1394 | pg->eng_buf_load_v0.dma_offset = value; | ||
1395 | } | ||
1396 | static void pg_cmd_eng_buf_load_set_dma_offset_v1(struct pmu_pg_cmd *pg, | ||
1397 | u8 value) | ||
1398 | { | ||
1399 | pg->eng_buf_load_v1.dma_desc.dma_addr.lo |= value; | ||
1400 | } | ||
1401 | static void pg_cmd_eng_buf_load_set_dma_offset_v2(struct pmu_pg_cmd *pg, | ||
1402 | u8 value) | ||
1403 | { | ||
1404 | pg->eng_buf_load_v2.dma_desc.address.lo |= u64_lo32(value); | ||
1405 | pg->eng_buf_load_v2.dma_desc.address.hi |= u64_lo32(value); | ||
1406 | } | ||
1407 | |||
1408 | static void pg_cmd_eng_buf_load_set_dma_idx_v0(struct pmu_pg_cmd *pg, | ||
1409 | u8 value) | ||
1410 | { | ||
1411 | pg->eng_buf_load_v0.dma_idx = value; | ||
1412 | } | ||
1413 | |||
1414 | static void pg_cmd_eng_buf_load_set_dma_idx_v1(struct pmu_pg_cmd *pg, | ||
1415 | u8 value) | ||
1416 | { | ||
1417 | pg->eng_buf_load_v1.dma_desc.dma_idx = value; | ||
1418 | } | ||
1419 | |||
1420 | static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg, | ||
1421 | u8 value) | ||
1422 | { | ||
1423 | pg->eng_buf_load_v2.dma_desc.params |= (value << 24); | ||
1424 | } | ||
1425 | |||
1426 | static int nvgpu_init_pmu_fw_ver_ops(struct nvgpu_pmu *pmu) | ||
1427 | { | ||
1428 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
1429 | struct pmu_v *pv = &g->ops.pmu_ver; | ||
1430 | int err = 0; | ||
1431 | |||
1432 | nvgpu_log_fn(g, " "); | ||
1433 | |||
1434 | switch (pmu->desc->app_version) { | ||
1435 | case APP_VERSION_NC_2: | ||
1436 | case APP_VERSION_NC_1: | ||
1437 | case APP_VERSION_NC_0: | ||
1438 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1439 | pg_cmd_eng_buf_load_size_v1; | ||
1440 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1441 | pg_cmd_eng_buf_load_set_cmd_type_v1; | ||
1442 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1443 | pg_cmd_eng_buf_load_set_engine_id_v1; | ||
1444 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1445 | pg_cmd_eng_buf_load_set_buf_idx_v1; | ||
1446 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1447 | pg_cmd_eng_buf_load_set_pad_v1; | ||
1448 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1449 | pg_cmd_eng_buf_load_set_buf_size_v1; | ||
1450 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1451 | pg_cmd_eng_buf_load_set_dma_base_v1; | ||
1452 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1453 | pg_cmd_eng_buf_load_set_dma_offset_v1; | ||
1454 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1455 | pg_cmd_eng_buf_load_set_dma_idx_v1; | ||
1456 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1457 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1458 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1459 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1460 | set_perfmon_cntr_valid_v2; | ||
1461 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1462 | set_perfmon_cntr_index_v2; | ||
1463 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1464 | set_perfmon_cntr_group_id_v2; | ||
1465 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1466 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1467 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1468 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1469 | pmu_cmdline_size_v4; | ||
1470 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1471 | set_pmu_cmdline_args_cpufreq_v4; | ||
1472 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1473 | set_pmu_cmdline_args_secure_mode_v4; | ||
1474 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1475 | set_pmu_cmdline_args_falctracesize_v4; | ||
1476 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1477 | set_pmu_cmdline_args_falctracedmabase_v4; | ||
1478 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1479 | set_pmu_cmdline_args_falctracedmaidx_v4; | ||
1480 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1481 | get_pmu_cmdline_args_ptr_v4; | ||
1482 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1483 | get_pmu_allocation_size_v2; | ||
1484 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1485 | set_pmu_allocation_ptr_v2; | ||
1486 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1487 | pmu_allocation_set_dmem_size_v2; | ||
1488 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1489 | pmu_allocation_get_dmem_size_v2; | ||
1490 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1491 | pmu_allocation_get_dmem_offset_v2; | ||
1492 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1493 | pmu_allocation_get_dmem_offset_addr_v2; | ||
1494 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1495 | pmu_allocation_set_dmem_offset_v2; | ||
1496 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1497 | get_pmu_init_msg_pmu_queue_params_v1; | ||
1498 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1499 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
1500 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1501 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
1502 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1503 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
1504 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1505 | get_pmu_perfmon_cmd_start_size_v2; | ||
1506 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1507 | get_perfmon_cmd_start_offsetofvar_v2; | ||
1508 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1509 | perfmon_start_set_cmd_type_v2; | ||
1510 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1511 | perfmon_start_set_group_id_v2; | ||
1512 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1513 | perfmon_start_set_state_id_v2; | ||
1514 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1515 | perfmon_start_set_flags_v2; | ||
1516 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1517 | perfmon_start_get_flags_v2; | ||
1518 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1519 | get_pmu_perfmon_cmd_init_size_v2; | ||
1520 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1521 | get_perfmon_cmd_init_offsetofvar_v2; | ||
1522 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1523 | perfmon_cmd_init_set_sample_buffer_v2; | ||
1524 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1525 | perfmon_cmd_init_set_dec_cnt_v2; | ||
1526 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1527 | perfmon_cmd_init_set_base_cnt_id_v2; | ||
1528 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1529 | perfmon_cmd_init_set_samp_period_us_v2; | ||
1530 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1531 | perfmon_cmd_init_set_num_cnt_v2; | ||
1532 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1533 | perfmon_cmd_init_set_mov_avg_v2; | ||
1534 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1535 | get_pmu_sequence_in_alloc_ptr_v1; | ||
1536 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1537 | get_pmu_sequence_out_alloc_ptr_v1; | ||
1538 | break; | ||
1539 | case APP_VERSION_NC_3: | ||
1540 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1541 | pg_cmd_eng_buf_load_size_v2; | ||
1542 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1543 | pg_cmd_eng_buf_load_set_cmd_type_v2; | ||
1544 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1545 | pg_cmd_eng_buf_load_set_engine_id_v2; | ||
1546 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1547 | pg_cmd_eng_buf_load_set_buf_idx_v2; | ||
1548 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1549 | pg_cmd_eng_buf_load_set_pad_v2; | ||
1550 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1551 | pg_cmd_eng_buf_load_set_buf_size_v2; | ||
1552 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1553 | pg_cmd_eng_buf_load_set_dma_base_v2; | ||
1554 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1555 | pg_cmd_eng_buf_load_set_dma_offset_v2; | ||
1556 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1557 | pg_cmd_eng_buf_load_set_dma_idx_v2; | ||
1558 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1559 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1560 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1561 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1562 | set_perfmon_cntr_valid_v2; | ||
1563 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1564 | set_perfmon_cntr_index_v2; | ||
1565 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1566 | set_perfmon_cntr_group_id_v2; | ||
1567 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1568 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1569 | g->ops.pmu_ver.is_pmu_zbc_save_supported = false; | ||
1570 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1571 | pmu_cmdline_size_v6; | ||
1572 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1573 | set_pmu_cmdline_args_cpufreq_v5; | ||
1574 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1575 | set_pmu_cmdline_args_secure_mode_v5; | ||
1576 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1577 | set_pmu_cmdline_args_falctracesize_v5; | ||
1578 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1579 | set_pmu_cmdline_args_falctracedmabase_v5; | ||
1580 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1581 | set_pmu_cmdline_args_falctracedmaidx_v5; | ||
1582 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1583 | get_pmu_cmdline_args_ptr_v5; | ||
1584 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1585 | get_pmu_allocation_size_v3; | ||
1586 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1587 | set_pmu_allocation_ptr_v3; | ||
1588 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1589 | pmu_allocation_set_dmem_size_v3; | ||
1590 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1591 | pmu_allocation_get_dmem_size_v3; | ||
1592 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1593 | pmu_allocation_get_dmem_offset_v3; | ||
1594 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1595 | pmu_allocation_get_dmem_offset_addr_v3; | ||
1596 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1597 | pmu_allocation_set_dmem_offset_v3; | ||
1598 | g->ops.pmu_ver.pmu_allocation_get_fb_addr = | ||
1599 | pmu_allocation_get_fb_addr_v3; | ||
1600 | g->ops.pmu_ver.pmu_allocation_get_fb_size = | ||
1601 | pmu_allocation_get_fb_size_v3; | ||
1602 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1603 | get_pmu_init_msg_pmu_queue_params_v4; | ||
1604 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1605 | get_pmu_msg_pmu_init_msg_ptr_v4; | ||
1606 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1607 | get_pmu_init_msg_pmu_sw_mg_off_v4; | ||
1608 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1609 | get_pmu_init_msg_pmu_sw_mg_size_v4; | ||
1610 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1611 | get_pmu_perfmon_cmd_start_size_v3; | ||
1612 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1613 | get_perfmon_cmd_start_offsetofvar_v3; | ||
1614 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1615 | perfmon_start_set_cmd_type_v3; | ||
1616 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1617 | perfmon_start_set_group_id_v3; | ||
1618 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1619 | perfmon_start_set_state_id_v3; | ||
1620 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1621 | perfmon_start_set_flags_v3; | ||
1622 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1623 | perfmon_start_get_flags_v3; | ||
1624 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1625 | get_pmu_perfmon_cmd_init_size_v3; | ||
1626 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1627 | get_perfmon_cmd_init_offsetofvar_v3; | ||
1628 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1629 | perfmon_cmd_init_set_sample_buffer_v3; | ||
1630 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1631 | perfmon_cmd_init_set_dec_cnt_v3; | ||
1632 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1633 | perfmon_cmd_init_set_base_cnt_id_v3; | ||
1634 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1635 | perfmon_cmd_init_set_samp_period_us_v3; | ||
1636 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1637 | perfmon_cmd_init_set_num_cnt_v3; | ||
1638 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1639 | perfmon_cmd_init_set_mov_avg_v3; | ||
1640 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1641 | get_pmu_sequence_in_alloc_ptr_v3; | ||
1642 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1643 | get_pmu_sequence_out_alloc_ptr_v3; | ||
1644 | break; | ||
1645 | case APP_VERSION_GM206: | ||
1646 | case APP_VERSION_NV_GPU: | ||
1647 | case APP_VERSION_NV_GPU_1: | ||
1648 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1649 | pg_cmd_eng_buf_load_size_v2; | ||
1650 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1651 | pg_cmd_eng_buf_load_set_cmd_type_v2; | ||
1652 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1653 | pg_cmd_eng_buf_load_set_engine_id_v2; | ||
1654 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1655 | pg_cmd_eng_buf_load_set_buf_idx_v2; | ||
1656 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1657 | pg_cmd_eng_buf_load_set_pad_v2; | ||
1658 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1659 | pg_cmd_eng_buf_load_set_buf_size_v2; | ||
1660 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1661 | pg_cmd_eng_buf_load_set_dma_base_v2; | ||
1662 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1663 | pg_cmd_eng_buf_load_set_dma_offset_v2; | ||
1664 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1665 | pg_cmd_eng_buf_load_set_dma_idx_v2; | ||
1666 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1667 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1668 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1669 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1670 | set_perfmon_cntr_valid_v2; | ||
1671 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1672 | set_perfmon_cntr_index_v2; | ||
1673 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1674 | set_perfmon_cntr_group_id_v2; | ||
1675 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1676 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1677 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1678 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1679 | pmu_cmdline_size_v5; | ||
1680 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1681 | set_pmu_cmdline_args_cpufreq_v5; | ||
1682 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1683 | set_pmu_cmdline_args_secure_mode_v5; | ||
1684 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1685 | set_pmu_cmdline_args_falctracesize_v5; | ||
1686 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1687 | set_pmu_cmdline_args_falctracedmabase_v5; | ||
1688 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1689 | set_pmu_cmdline_args_falctracedmaidx_v5; | ||
1690 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1691 | get_pmu_cmdline_args_ptr_v5; | ||
1692 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1693 | get_pmu_allocation_size_v3; | ||
1694 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1695 | set_pmu_allocation_ptr_v3; | ||
1696 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1697 | pmu_allocation_set_dmem_size_v3; | ||
1698 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1699 | pmu_allocation_get_dmem_size_v3; | ||
1700 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1701 | pmu_allocation_get_dmem_offset_v3; | ||
1702 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1703 | pmu_allocation_get_dmem_offset_addr_v3; | ||
1704 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1705 | pmu_allocation_set_dmem_offset_v3; | ||
1706 | g->ops.pmu_ver.pmu_allocation_get_fb_addr = | ||
1707 | pmu_allocation_get_fb_addr_v3; | ||
1708 | g->ops.pmu_ver.pmu_allocation_get_fb_size = | ||
1709 | pmu_allocation_get_fb_size_v3; | ||
1710 | if (pmu->desc->app_version != APP_VERSION_NV_GPU && | ||
1711 | pmu->desc->app_version != APP_VERSION_NV_GPU_1) { | ||
1712 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1713 | get_pmu_init_msg_pmu_queue_params_v2; | ||
1714 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1715 | get_pmu_msg_pmu_init_msg_ptr_v2; | ||
1716 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1717 | get_pmu_init_msg_pmu_sw_mg_off_v2; | ||
1718 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1719 | get_pmu_init_msg_pmu_sw_mg_size_v2; | ||
1720 | } else { | ||
1721 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1722 | get_pmu_init_msg_pmu_queue_params_v3; | ||
1723 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1724 | get_pmu_msg_pmu_init_msg_ptr_v3; | ||
1725 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1726 | get_pmu_init_msg_pmu_sw_mg_off_v3; | ||
1727 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1728 | get_pmu_init_msg_pmu_sw_mg_size_v3; | ||
1729 | } | ||
1730 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1731 | get_pmu_perfmon_cmd_start_size_v3; | ||
1732 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1733 | get_perfmon_cmd_start_offsetofvar_v3; | ||
1734 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1735 | perfmon_start_set_cmd_type_v3; | ||
1736 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1737 | perfmon_start_set_group_id_v3; | ||
1738 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1739 | perfmon_start_set_state_id_v3; | ||
1740 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1741 | perfmon_start_set_flags_v3; | ||
1742 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1743 | perfmon_start_get_flags_v3; | ||
1744 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1745 | get_pmu_perfmon_cmd_init_size_v3; | ||
1746 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1747 | get_perfmon_cmd_init_offsetofvar_v3; | ||
1748 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1749 | perfmon_cmd_init_set_sample_buffer_v3; | ||
1750 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1751 | perfmon_cmd_init_set_dec_cnt_v3; | ||
1752 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1753 | perfmon_cmd_init_set_base_cnt_id_v3; | ||
1754 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1755 | perfmon_cmd_init_set_samp_period_us_v3; | ||
1756 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1757 | perfmon_cmd_init_set_num_cnt_v3; | ||
1758 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1759 | perfmon_cmd_init_set_mov_avg_v3; | ||
1760 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1761 | get_pmu_sequence_in_alloc_ptr_v3; | ||
1762 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1763 | get_pmu_sequence_out_alloc_ptr_v3; | ||
1764 | break; | ||
1765 | case APP_VERSION_GM20B_5: | ||
1766 | case APP_VERSION_GM20B_4: | ||
1767 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1768 | pg_cmd_eng_buf_load_size_v0; | ||
1769 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1770 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
1771 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1772 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
1773 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1774 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
1775 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1776 | pg_cmd_eng_buf_load_set_pad_v0; | ||
1777 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1778 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
1779 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1780 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
1781 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1782 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
1783 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1784 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
1785 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1786 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1787 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1788 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1789 | set_perfmon_cntr_valid_v2; | ||
1790 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1791 | set_perfmon_cntr_index_v2; | ||
1792 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1793 | set_perfmon_cntr_group_id_v2; | ||
1794 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1795 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1796 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1797 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1798 | pmu_cmdline_size_v3; | ||
1799 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1800 | set_pmu_cmdline_args_cpufreq_v3; | ||
1801 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1802 | set_pmu_cmdline_args_secure_mode_v3; | ||
1803 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1804 | set_pmu_cmdline_args_falctracesize_v3; | ||
1805 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1806 | set_pmu_cmdline_args_falctracedmabase_v3; | ||
1807 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1808 | set_pmu_cmdline_args_falctracedmaidx_v3; | ||
1809 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1810 | get_pmu_cmdline_args_ptr_v3; | ||
1811 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1812 | get_pmu_allocation_size_v1; | ||
1813 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1814 | set_pmu_allocation_ptr_v1; | ||
1815 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1816 | pmu_allocation_set_dmem_size_v1; | ||
1817 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1818 | pmu_allocation_get_dmem_size_v1; | ||
1819 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1820 | pmu_allocation_get_dmem_offset_v1; | ||
1821 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1822 | pmu_allocation_get_dmem_offset_addr_v1; | ||
1823 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1824 | pmu_allocation_set_dmem_offset_v1; | ||
1825 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1826 | get_pmu_init_msg_pmu_queue_params_v1; | ||
1827 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1828 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
1829 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1830 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
1831 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1832 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
1833 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1834 | get_pmu_perfmon_cmd_start_size_v1; | ||
1835 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1836 | get_perfmon_cmd_start_offsetofvar_v1; | ||
1837 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1838 | perfmon_start_set_cmd_type_v1; | ||
1839 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1840 | perfmon_start_set_group_id_v1; | ||
1841 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1842 | perfmon_start_set_state_id_v1; | ||
1843 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1844 | perfmon_start_set_flags_v1; | ||
1845 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1846 | perfmon_start_get_flags_v1; | ||
1847 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1848 | get_pmu_perfmon_cmd_init_size_v1; | ||
1849 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1850 | get_perfmon_cmd_init_offsetofvar_v1; | ||
1851 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1852 | perfmon_cmd_init_set_sample_buffer_v1; | ||
1853 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1854 | perfmon_cmd_init_set_dec_cnt_v1; | ||
1855 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1856 | perfmon_cmd_init_set_base_cnt_id_v1; | ||
1857 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1858 | perfmon_cmd_init_set_samp_period_us_v1; | ||
1859 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1860 | perfmon_cmd_init_set_num_cnt_v1; | ||
1861 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1862 | perfmon_cmd_init_set_mov_avg_v1; | ||
1863 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1864 | get_pmu_sequence_in_alloc_ptr_v1; | ||
1865 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1866 | get_pmu_sequence_out_alloc_ptr_v1; | ||
1867 | break; | ||
1868 | case APP_VERSION_GM20B_3: | ||
1869 | case APP_VERSION_GM20B_2: | ||
1870 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1871 | pg_cmd_eng_buf_load_size_v0; | ||
1872 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1873 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
1874 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1875 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
1876 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1877 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
1878 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1879 | pg_cmd_eng_buf_load_set_pad_v0; | ||
1880 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1881 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
1882 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1883 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
1884 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1885 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
1886 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1887 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
1888 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1889 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1890 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1891 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1892 | set_perfmon_cntr_valid_v2; | ||
1893 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1894 | set_perfmon_cntr_index_v2; | ||
1895 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1896 | set_perfmon_cntr_group_id_v2; | ||
1897 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1898 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1899 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1900 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1901 | pmu_cmdline_size_v2; | ||
1902 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1903 | set_pmu_cmdline_args_cpufreq_v2; | ||
1904 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1905 | set_pmu_cmdline_args_secure_mode_v2; | ||
1906 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1907 | set_pmu_cmdline_args_falctracesize_v2; | ||
1908 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1909 | set_pmu_cmdline_args_falctracedmabase_v2; | ||
1910 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1911 | set_pmu_cmdline_args_falctracedmaidx_v2; | ||
1912 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1913 | get_pmu_cmdline_args_ptr_v2; | ||
1914 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1915 | get_pmu_allocation_size_v1; | ||
1916 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1917 | set_pmu_allocation_ptr_v1; | ||
1918 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1919 | pmu_allocation_set_dmem_size_v1; | ||
1920 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1921 | pmu_allocation_get_dmem_size_v1; | ||
1922 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1923 | pmu_allocation_get_dmem_offset_v1; | ||
1924 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1925 | pmu_allocation_get_dmem_offset_addr_v1; | ||
1926 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1927 | pmu_allocation_set_dmem_offset_v1; | ||
1928 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1929 | get_pmu_init_msg_pmu_queue_params_v1; | ||
1930 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1931 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
1932 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1933 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
1934 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1935 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
1936 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1937 | get_pmu_perfmon_cmd_start_size_v1; | ||
1938 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1939 | get_perfmon_cmd_start_offsetofvar_v1; | ||
1940 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1941 | perfmon_start_set_cmd_type_v1; | ||
1942 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1943 | perfmon_start_set_group_id_v1; | ||
1944 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1945 | perfmon_start_set_state_id_v1; | ||
1946 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1947 | perfmon_start_set_flags_v1; | ||
1948 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1949 | perfmon_start_get_flags_v1; | ||
1950 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1951 | get_pmu_perfmon_cmd_init_size_v1; | ||
1952 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1953 | get_perfmon_cmd_init_offsetofvar_v1; | ||
1954 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1955 | perfmon_cmd_init_set_sample_buffer_v1; | ||
1956 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1957 | perfmon_cmd_init_set_dec_cnt_v1; | ||
1958 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1959 | perfmon_cmd_init_set_base_cnt_id_v1; | ||
1960 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1961 | perfmon_cmd_init_set_samp_period_us_v1; | ||
1962 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1963 | perfmon_cmd_init_set_num_cnt_v1; | ||
1964 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1965 | perfmon_cmd_init_set_mov_avg_v1; | ||
1966 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1967 | get_pmu_sequence_in_alloc_ptr_v1; | ||
1968 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1969 | get_pmu_sequence_out_alloc_ptr_v1; | ||
1970 | break; | ||
1971 | case APP_VERSION_GM20B_1: | ||
1972 | case APP_VERSION_GM20B: | ||
1973 | case APP_VERSION_1: | ||
1974 | case APP_VERSION_2: | ||
1975 | case APP_VERSION_3: | ||
1976 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1977 | pg_cmd_eng_buf_load_size_v0; | ||
1978 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1979 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
1980 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1981 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
1982 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1983 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
1984 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1985 | pg_cmd_eng_buf_load_set_pad_v0; | ||
1986 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1987 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
1988 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1989 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
1990 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1991 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
1992 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1993 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
1994 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1995 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1996 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0; | ||
1997 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0; | ||
1998 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0; | ||
1999 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
2000 | set_perfmon_cntr_valid_v0; | ||
2001 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
2002 | set_perfmon_cntr_index_v0; | ||
2003 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
2004 | set_perfmon_cntr_group_id_v0; | ||
2005 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v0; | ||
2006 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
2007 | pmu_cmdline_size_v1; | ||
2008 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
2009 | set_pmu_cmdline_args_cpufreq_v1; | ||
2010 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
2011 | set_pmu_cmdline_args_secure_mode_v1; | ||
2012 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
2013 | set_pmu_cmdline_args_falctracesize_v1; | ||
2014 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
2015 | set_pmu_cmdline_args_falctracedmabase_v1; | ||
2016 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
2017 | set_pmu_cmdline_args_falctracedmaidx_v1; | ||
2018 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
2019 | get_pmu_cmdline_args_ptr_v1; | ||
2020 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
2021 | get_pmu_allocation_size_v1; | ||
2022 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
2023 | set_pmu_allocation_ptr_v1; | ||
2024 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
2025 | pmu_allocation_set_dmem_size_v1; | ||
2026 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
2027 | pmu_allocation_get_dmem_size_v1; | ||
2028 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
2029 | pmu_allocation_get_dmem_offset_v1; | ||
2030 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
2031 | pmu_allocation_get_dmem_offset_addr_v1; | ||
2032 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
2033 | pmu_allocation_set_dmem_offset_v1; | ||
2034 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
2035 | get_pmu_init_msg_pmu_queue_params_v1; | ||
2036 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
2037 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
2038 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
2039 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
2040 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
2041 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
2042 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
2043 | get_pmu_perfmon_cmd_start_size_v1; | ||
2044 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
2045 | get_perfmon_cmd_start_offsetofvar_v1; | ||
2046 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
2047 | perfmon_start_set_cmd_type_v1; | ||
2048 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
2049 | perfmon_start_set_group_id_v1; | ||
2050 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
2051 | perfmon_start_set_state_id_v1; | ||
2052 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
2053 | perfmon_start_set_flags_v1; | ||
2054 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
2055 | perfmon_start_get_flags_v1; | ||
2056 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
2057 | get_pmu_perfmon_cmd_init_size_v1; | ||
2058 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
2059 | get_perfmon_cmd_init_offsetofvar_v1; | ||
2060 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
2061 | perfmon_cmd_init_set_sample_buffer_v1; | ||
2062 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
2063 | perfmon_cmd_init_set_dec_cnt_v1; | ||
2064 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
2065 | perfmon_cmd_init_set_base_cnt_id_v1; | ||
2066 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
2067 | perfmon_cmd_init_set_samp_period_us_v1; | ||
2068 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
2069 | perfmon_cmd_init_set_num_cnt_v1; | ||
2070 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
2071 | perfmon_cmd_init_set_mov_avg_v1; | ||
2072 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
2073 | get_pmu_sequence_in_alloc_ptr_v1; | ||
2074 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
2075 | get_pmu_sequence_out_alloc_ptr_v1; | ||
2076 | break; | ||
2077 | case APP_VERSION_0: | ||
2078 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
2079 | pg_cmd_eng_buf_load_size_v0; | ||
2080 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
2081 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
2082 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
2083 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
2084 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
2085 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
2086 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
2087 | pg_cmd_eng_buf_load_set_pad_v0; | ||
2088 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
2089 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
2090 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
2091 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
2092 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
2093 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
2094 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
2095 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
2096 | g->ops.pmu_ver.cmd_id_zbc_table_update = 14; | ||
2097 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
2098 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0; | ||
2099 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0; | ||
2100 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0; | ||
2101 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
2102 | set_perfmon_cntr_valid_v0; | ||
2103 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
2104 | set_perfmon_cntr_index_v0; | ||
2105 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
2106 | set_perfmon_cntr_group_id_v0; | ||
2107 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v0; | ||
2108 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
2109 | pmu_cmdline_size_v0; | ||
2110 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
2111 | set_pmu_cmdline_args_cpufreq_v0; | ||
2112 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
2113 | NULL; | ||
2114 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
2115 | get_pmu_cmdline_args_ptr_v0; | ||
2116 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
2117 | get_pmu_allocation_size_v0; | ||
2118 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
2119 | set_pmu_allocation_ptr_v0; | ||
2120 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
2121 | pmu_allocation_set_dmem_size_v0; | ||
2122 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
2123 | pmu_allocation_get_dmem_size_v0; | ||
2124 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
2125 | pmu_allocation_get_dmem_offset_v0; | ||
2126 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
2127 | pmu_allocation_get_dmem_offset_addr_v0; | ||
2128 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
2129 | pmu_allocation_set_dmem_offset_v0; | ||
2130 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
2131 | get_pmu_init_msg_pmu_queue_params_v0; | ||
2132 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
2133 | get_pmu_msg_pmu_init_msg_ptr_v0; | ||
2134 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
2135 | get_pmu_init_msg_pmu_sw_mg_off_v0; | ||
2136 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
2137 | get_pmu_init_msg_pmu_sw_mg_size_v0; | ||
2138 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
2139 | get_pmu_perfmon_cmd_start_size_v0; | ||
2140 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
2141 | get_perfmon_cmd_start_offsetofvar_v0; | ||
2142 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
2143 | perfmon_start_set_cmd_type_v0; | ||
2144 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
2145 | perfmon_start_set_group_id_v0; | ||
2146 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
2147 | perfmon_start_set_state_id_v0; | ||
2148 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
2149 | perfmon_start_set_flags_v0; | ||
2150 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
2151 | perfmon_start_get_flags_v0; | ||
2152 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
2153 | get_pmu_perfmon_cmd_init_size_v0; | ||
2154 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
2155 | get_perfmon_cmd_init_offsetofvar_v0; | ||
2156 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
2157 | perfmon_cmd_init_set_sample_buffer_v0; | ||
2158 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
2159 | perfmon_cmd_init_set_dec_cnt_v0; | ||
2160 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
2161 | perfmon_cmd_init_set_base_cnt_id_v0; | ||
2162 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
2163 | perfmon_cmd_init_set_samp_period_us_v0; | ||
2164 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
2165 | perfmon_cmd_init_set_num_cnt_v0; | ||
2166 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
2167 | perfmon_cmd_init_set_mov_avg_v0; | ||
2168 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
2169 | get_pmu_sequence_in_alloc_ptr_v0; | ||
2170 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
2171 | get_pmu_sequence_out_alloc_ptr_v0; | ||
2172 | break; | ||
2173 | default: | ||
2174 | nvgpu_err(g, "PMU code version not supported version: %d\n", | ||
2175 | pmu->desc->app_version); | ||
2176 | err = -EINVAL; | ||
2177 | } | ||
2178 | pv->set_perfmon_cntr_index(pmu, 3); /* GR & CE2 */ | ||
2179 | pv->set_perfmon_cntr_group_id(pmu, PMU_DOMAIN_GROUP_PSTATE); | ||
2180 | |||
2181 | return err; | ||
2182 | } | ||
2183 | |||
2184 | static void nvgpu_remove_pmu_support(struct nvgpu_pmu *pmu) | ||
2185 | { | ||
2186 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
2187 | |||
2188 | nvgpu_log_fn(g, " "); | ||
2189 | |||
2190 | if (nvgpu_alloc_initialized(&pmu->dmem)) | ||
2191 | nvgpu_alloc_destroy(&pmu->dmem); | ||
2192 | |||
2193 | nvgpu_release_firmware(g, pmu->fw); | ||
2194 | |||
2195 | nvgpu_mutex_destroy(&pmu->elpg_mutex); | ||
2196 | nvgpu_mutex_destroy(&pmu->pg_mutex); | ||
2197 | nvgpu_mutex_destroy(&pmu->isr_mutex); | ||
2198 | nvgpu_mutex_destroy(&pmu->pmu_copy_lock); | ||
2199 | nvgpu_mutex_destroy(&pmu->pmu_seq_lock); | ||
2200 | } | ||
2201 | |||
2202 | int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu) | ||
2203 | { | ||
2204 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
2205 | int err = 0; | ||
2206 | |||
2207 | nvgpu_log_fn(g, " "); | ||
2208 | |||
2209 | err = nvgpu_mutex_init(&pmu->elpg_mutex); | ||
2210 | if (err) | ||
2211 | return err; | ||
2212 | |||
2213 | err = nvgpu_mutex_init(&pmu->pg_mutex); | ||
2214 | if (err) | ||
2215 | goto fail_elpg; | ||
2216 | |||
2217 | err = nvgpu_mutex_init(&pmu->isr_mutex); | ||
2218 | if (err) | ||
2219 | goto fail_pg; | ||
2220 | |||
2221 | err = nvgpu_mutex_init(&pmu->pmu_copy_lock); | ||
2222 | if (err) | ||
2223 | goto fail_isr; | ||
2224 | |||
2225 | err = nvgpu_mutex_init(&pmu->pmu_seq_lock); | ||
2226 | if (err) | ||
2227 | goto fail_pmu_copy; | ||
2228 | |||
2229 | pmu->remove_support = nvgpu_remove_pmu_support; | ||
2230 | |||
2231 | err = nvgpu_init_pmu_fw_ver_ops(pmu); | ||
2232 | if (err) | ||
2233 | goto fail_pmu_seq; | ||
2234 | |||
2235 | goto exit; | ||
2236 | |||
2237 | fail_pmu_seq: | ||
2238 | nvgpu_mutex_destroy(&pmu->pmu_seq_lock); | ||
2239 | fail_pmu_copy: | ||
2240 | nvgpu_mutex_destroy(&pmu->pmu_copy_lock); | ||
2241 | fail_isr: | ||
2242 | nvgpu_mutex_destroy(&pmu->isr_mutex); | ||
2243 | fail_pg: | ||
2244 | nvgpu_mutex_destroy(&pmu->pg_mutex); | ||
2245 | fail_elpg: | ||
2246 | nvgpu_mutex_destroy(&pmu->elpg_mutex); | ||
2247 | exit: | ||
2248 | return err; | ||
2249 | } | ||
2250 | |||
2251 | int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g) | ||
2252 | { | ||
2253 | struct nvgpu_pmu *pmu = &g->pmu; | ||
2254 | int err = 0; | ||
2255 | struct mm_gk20a *mm = &g->mm; | ||
2256 | struct vm_gk20a *vm = mm->pmu.vm; | ||
2257 | |||
2258 | nvgpu_log_fn(g, " "); | ||
2259 | |||
2260 | if (pmu->fw) | ||
2261 | return nvgpu_init_pmu_fw_support(pmu); | ||
2262 | |||
2263 | pmu->fw = nvgpu_request_firmware(g, NVGPU_PMU_NS_UCODE_IMAGE, 0); | ||
2264 | if (!pmu->fw) { | ||
2265 | nvgpu_err(g, "failed to load pmu ucode!!"); | ||
2266 | return err; | ||
2267 | } | ||
2268 | |||
2269 | nvgpu_log_fn(g, "firmware loaded"); | ||
2270 | |||
2271 | pmu->desc = (struct pmu_ucode_desc *)pmu->fw->data; | ||
2272 | pmu->ucode_image = (u32 *)((u8 *)pmu->desc + | ||
2273 | pmu->desc->descriptor_size); | ||
2274 | |||
2275 | err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, | ||
2276 | &pmu->ucode); | ||
2277 | if (err) | ||
2278 | goto err_release_fw; | ||
2279 | |||
2280 | nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image, | ||
2281 | pmu->desc->app_start_offset + pmu->desc->app_size); | ||
2282 | |||
2283 | return nvgpu_init_pmu_fw_support(pmu); | ||
2284 | |||
2285 | err_release_fw: | ||
2286 | nvgpu_release_firmware(g, pmu->fw); | ||
2287 | pmu->fw = NULL; | ||
2288 | |||
2289 | return err; | ||
2290 | } | ||
2291 | |||
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index a9457330..247b38a5 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -35,249 +35,20 @@ | |||
35 | #include "nvgpu_gpuid_t19x.h" | 35 | #include "nvgpu_gpuid_t19x.h" |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #define GK20A_PMU_UCODE_IMAGE "gpmu_ucode.bin" | 38 | #define gk20a_dbg_pmu(fmt, arg...) \ |
39 | gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) | ||
39 | 40 | ||
40 | #define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 | 41 | #define PMU_MEM_SCRUBBING_TIMEOUT_MAX 1000 |
41 | #define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 | 42 | #define PMU_MEM_SCRUBBING_TIMEOUT_DEFAULT 10 |
42 | 43 | ||
43 | #define gk20a_dbg_pmu(fmt, arg...) \ | ||
44 | gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) | ||
45 | |||
46 | static void ap_callback_init_and_enable_ctrl( | 44 | static void ap_callback_init_and_enable_ctrl( |
47 | struct gk20a *g, struct pmu_msg *msg, | 45 | struct gk20a *g, struct pmu_msg *msg, |
48 | void *param, u32 seq_desc, u32 status); | 46 | void *param, u32 seq_desc, u32 status); |
49 | 47 | ||
50 | static u32 pmu_perfmon_cntr_sz_v0(struct nvgpu_pmu *pmu) | ||
51 | { | ||
52 | return sizeof(struct pmu_perfmon_counter_v0); | ||
53 | } | ||
54 | |||
55 | static u32 pmu_perfmon_cntr_sz_v2(struct nvgpu_pmu *pmu) | ||
56 | { | ||
57 | return sizeof(struct pmu_perfmon_counter_v2); | ||
58 | } | ||
59 | |||
60 | static void *get_perfmon_cntr_ptr_v2(struct nvgpu_pmu *pmu) | ||
61 | { | ||
62 | return (void *)(&pmu->perfmon_counter_v2); | ||
63 | } | ||
64 | |||
65 | static void *get_perfmon_cntr_ptr_v0(struct nvgpu_pmu *pmu) | ||
66 | { | ||
67 | return (void *)(&pmu->perfmon_counter_v0); | ||
68 | } | ||
69 | |||
70 | static void set_perfmon_cntr_ut_v2(struct nvgpu_pmu *pmu, u16 ut) | ||
71 | { | ||
72 | pmu->perfmon_counter_v2.upper_threshold = ut; | ||
73 | } | ||
74 | |||
75 | static void set_perfmon_cntr_ut_v0(struct nvgpu_pmu *pmu, u16 ut) | ||
76 | { | ||
77 | pmu->perfmon_counter_v0.upper_threshold = ut; | ||
78 | } | ||
79 | |||
80 | static void set_perfmon_cntr_lt_v2(struct nvgpu_pmu *pmu, u16 lt) | ||
81 | { | ||
82 | pmu->perfmon_counter_v2.lower_threshold = lt; | ||
83 | } | ||
84 | |||
85 | static void set_perfmon_cntr_lt_v0(struct nvgpu_pmu *pmu, u16 lt) | ||
86 | { | ||
87 | pmu->perfmon_counter_v0.lower_threshold = lt; | ||
88 | } | ||
89 | |||
90 | static void set_perfmon_cntr_valid_v2(struct nvgpu_pmu *pmu, u8 valid) | ||
91 | { | ||
92 | pmu->perfmon_counter_v2.valid = valid; | ||
93 | } | ||
94 | |||
95 | static void set_perfmon_cntr_valid_v0(struct nvgpu_pmu *pmu, u8 valid) | ||
96 | { | ||
97 | pmu->perfmon_counter_v0.valid = valid; | ||
98 | } | ||
99 | |||
100 | static void set_perfmon_cntr_index_v2(struct nvgpu_pmu *pmu, u8 index) | ||
101 | { | ||
102 | pmu->perfmon_counter_v2.index = index; | ||
103 | } | ||
104 | |||
105 | static void set_perfmon_cntr_index_v0(struct nvgpu_pmu *pmu, u8 index) | ||
106 | { | ||
107 | pmu->perfmon_counter_v0.index = index; | ||
108 | } | ||
109 | |||
110 | static void set_perfmon_cntr_group_id_v2(struct nvgpu_pmu *pmu, u8 gid) | ||
111 | { | ||
112 | pmu->perfmon_counter_v2.group_id = gid; | ||
113 | } | ||
114 | |||
115 | static void set_perfmon_cntr_group_id_v0(struct nvgpu_pmu *pmu, u8 gid) | ||
116 | { | ||
117 | pmu->perfmon_counter_v0.group_id = gid; | ||
118 | } | ||
119 | |||
120 | static u32 pmu_cmdline_size_v0(struct nvgpu_pmu *pmu) | ||
121 | { | ||
122 | return sizeof(struct pmu_cmdline_args_v0); | ||
123 | } | ||
124 | |||
125 | static u32 pmu_cmdline_size_v1(struct nvgpu_pmu *pmu) | ||
126 | { | ||
127 | return sizeof(struct pmu_cmdline_args_v1); | ||
128 | } | ||
129 | |||
130 | static u32 pmu_cmdline_size_v2(struct nvgpu_pmu *pmu) | ||
131 | { | ||
132 | return sizeof(struct pmu_cmdline_args_v2); | ||
133 | } | ||
134 | |||
135 | static void set_pmu_cmdline_args_cpufreq_v2(struct nvgpu_pmu *pmu, u32 freq) | ||
136 | { | ||
137 | pmu->args_v2.cpu_freq_hz = freq; | ||
138 | } | ||
139 | static void set_pmu_cmdline_args_secure_mode_v2(struct nvgpu_pmu *pmu, u32 val) | ||
140 | { | ||
141 | pmu->args_v2.secure_mode = val; | ||
142 | } | ||
143 | |||
144 | static void set_pmu_cmdline_args_falctracesize_v2( | ||
145 | struct nvgpu_pmu *pmu, u32 size) | ||
146 | { | ||
147 | pmu->args_v2.falc_trace_size = size; | ||
148 | } | ||
149 | |||
150 | static void set_pmu_cmdline_args_falctracedmabase_v2(struct nvgpu_pmu *pmu) | ||
151 | { | ||
152 | pmu->args_v2.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
153 | } | ||
154 | |||
155 | static void set_pmu_cmdline_args_falctracedmaidx_v2( | ||
156 | struct nvgpu_pmu *pmu, u32 idx) | ||
157 | { | ||
158 | pmu->args_v2.falc_trace_dma_idx = idx; | ||
159 | } | ||
160 | |||
161 | |||
162 | static void set_pmu_cmdline_args_falctracedmabase_v4(struct nvgpu_pmu *pmu) | ||
163 | { | ||
164 | pmu->args_v4.dma_addr.dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
165 | pmu->args_v4.dma_addr.dma_base1 = 0; | ||
166 | pmu->args_v4.dma_addr.dma_offset = 0; | ||
167 | } | ||
168 | |||
169 | static u32 pmu_cmdline_size_v4(struct nvgpu_pmu *pmu) | ||
170 | { | ||
171 | return sizeof(struct pmu_cmdline_args_v4); | ||
172 | } | ||
173 | |||
174 | static void set_pmu_cmdline_args_cpufreq_v4(struct nvgpu_pmu *pmu, u32 freq) | ||
175 | { | ||
176 | pmu->args_v4.cpu_freq_hz = freq; | ||
177 | } | ||
178 | static void set_pmu_cmdline_args_secure_mode_v4(struct nvgpu_pmu *pmu, u32 val) | ||
179 | { | ||
180 | pmu->args_v4.secure_mode = val; | ||
181 | } | ||
182 | |||
183 | static void set_pmu_cmdline_args_falctracesize_v4( | ||
184 | struct nvgpu_pmu *pmu, u32 size) | ||
185 | { | ||
186 | pmu->args_v4.falc_trace_size = size; | ||
187 | } | ||
188 | static void set_pmu_cmdline_args_falctracedmaidx_v4( | ||
189 | struct nvgpu_pmu *pmu, u32 idx) | ||
190 | { | ||
191 | pmu->args_v4.falc_trace_dma_idx = idx; | ||
192 | } | ||
193 | |||
194 | static u32 pmu_cmdline_size_v5(struct nvgpu_pmu *pmu) | ||
195 | { | ||
196 | return sizeof(struct pmu_cmdline_args_v5); | ||
197 | } | ||
198 | |||
199 | static u32 pmu_cmdline_size_v6(struct nvgpu_pmu *pmu) | ||
200 | { | ||
201 | return sizeof(struct pmu_cmdline_args_v6); | ||
202 | } | ||
203 | |||
204 | static void set_pmu_cmdline_args_cpufreq_v5(struct nvgpu_pmu *pmu, u32 freq) | ||
205 | { | ||
206 | pmu->args_v5.cpu_freq_hz = 204000000; | ||
207 | } | ||
208 | static void set_pmu_cmdline_args_secure_mode_v5(struct nvgpu_pmu *pmu, u32 val) | ||
209 | { | ||
210 | pmu->args_v5.secure_mode = val; | ||
211 | } | ||
212 | |||
213 | static void set_pmu_cmdline_args_falctracesize_v5( | ||
214 | struct nvgpu_pmu *pmu, u32 size) | ||
215 | { | ||
216 | /* set by surface describe */ | ||
217 | } | ||
218 | |||
219 | static void set_pmu_cmdline_args_falctracedmabase_v5(struct nvgpu_pmu *pmu) | ||
220 | { | ||
221 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
222 | |||
223 | nvgpu_pmu_surface_describe(g, &pmu->trace_buf, &pmu->args_v5.trace_buf); | ||
224 | } | ||
225 | |||
226 | static void set_pmu_cmdline_args_falctracedmaidx_v5( | ||
227 | struct nvgpu_pmu *pmu, u32 idx) | ||
228 | { | ||
229 | /* set by surface describe */ | ||
230 | } | ||
231 | |||
232 | static u32 pmu_cmdline_size_v3(struct nvgpu_pmu *pmu) | ||
233 | { | ||
234 | return sizeof(struct pmu_cmdline_args_v3); | ||
235 | } | ||
236 | |||
237 | static void set_pmu_cmdline_args_cpufreq_v3(struct nvgpu_pmu *pmu, u32 freq) | ||
238 | { | ||
239 | pmu->args_v3.cpu_freq_hz = freq; | ||
240 | } | ||
241 | static void set_pmu_cmdline_args_secure_mode_v3(struct nvgpu_pmu *pmu, u32 val) | ||
242 | { | ||
243 | pmu->args_v3.secure_mode = val; | ||
244 | } | ||
245 | |||
246 | static void set_pmu_cmdline_args_falctracesize_v3( | ||
247 | struct nvgpu_pmu *pmu, u32 size) | ||
248 | { | ||
249 | pmu->args_v3.falc_trace_size = size; | ||
250 | } | ||
251 | |||
252 | static void set_pmu_cmdline_args_falctracedmabase_v3(struct nvgpu_pmu *pmu) | ||
253 | { | ||
254 | pmu->args_v3.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
255 | } | ||
256 | |||
257 | static void set_pmu_cmdline_args_falctracedmaidx_v3( | ||
258 | struct nvgpu_pmu *pmu, u32 idx) | ||
259 | { | ||
260 | pmu->args_v3.falc_trace_dma_idx = idx; | ||
261 | } | ||
262 | |||
263 | static void set_pmu_cmdline_args_cpufreq_v1(struct nvgpu_pmu *pmu, u32 freq) | ||
264 | { | ||
265 | pmu->args_v1.cpu_freq_hz = freq; | ||
266 | } | ||
267 | static void set_pmu_cmdline_args_secure_mode_v1(struct nvgpu_pmu *pmu, u32 val) | ||
268 | { | ||
269 | pmu->args_v1.secure_mode = val; | ||
270 | } | ||
271 | |||
272 | static void set_pmu_cmdline_args_falctracesize_v1( | ||
273 | struct nvgpu_pmu *pmu, u32 size) | ||
274 | { | ||
275 | pmu->args_v1.falc_trace_size = size; | ||
276 | } | ||
277 | |||
278 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) | 48 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) |
279 | { | 49 | { |
280 | u32 i = 0, j = strlen(strings); | 50 | u32 i = 0, j = strlen(strings); |
51 | |||
281 | for (; i < j; i++) { | 52 | for (; i < j; i++) { |
282 | if (strings[i] == '%') | 53 | if (strings[i] == '%') |
283 | if (strings[i + 1] == 'x' || strings[i + 1] == 'X') { | 54 | if (strings[i + 1] == 'x' || strings[i + 1] == 'X') { |
@@ -330,1885 +101,13 @@ static void printtrace(struct nvgpu_pmu *pmu) | |||
330 | l++; | 101 | l++; |
331 | m += k + 2; | 102 | m += k + 2; |
332 | } | 103 | } |
104 | |||
333 | scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); | 105 | scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); |
334 | nvgpu_err(g, "%s", buf); | 106 | nvgpu_err(g, "%s", buf); |
335 | } | 107 | } |
336 | nvgpu_kfree(g, tracebuffer); | 108 | nvgpu_kfree(g, tracebuffer); |
337 | } | 109 | } |
338 | 110 | ||
339 | static void set_pmu_cmdline_args_falctracedmabase_v1(struct nvgpu_pmu *pmu) | ||
340 | { | ||
341 | pmu->args_v1.falc_trace_dma_base = ((u32)pmu->trace_buf.gpu_va)/0x100; | ||
342 | } | ||
343 | |||
344 | static void set_pmu_cmdline_args_falctracedmaidx_v1( | ||
345 | struct nvgpu_pmu *pmu, u32 idx) | ||
346 | { | ||
347 | pmu->args_v1.falc_trace_dma_idx = idx; | ||
348 | } | ||
349 | |||
350 | static void set_pmu_cmdline_args_cpufreq_v0(struct nvgpu_pmu *pmu, u32 freq) | ||
351 | { | ||
352 | pmu->args_v0.cpu_freq_hz = freq; | ||
353 | } | ||
354 | |||
355 | static void *get_pmu_cmdline_args_ptr_v4(struct nvgpu_pmu *pmu) | ||
356 | { | ||
357 | return (void *)(&pmu->args_v4); | ||
358 | } | ||
359 | |||
360 | static void *get_pmu_cmdline_args_ptr_v3(struct nvgpu_pmu *pmu) | ||
361 | { | ||
362 | return (void *)(&pmu->args_v3); | ||
363 | } | ||
364 | |||
365 | static void *get_pmu_cmdline_args_ptr_v2(struct nvgpu_pmu *pmu) | ||
366 | { | ||
367 | return (void *)(&pmu->args_v2); | ||
368 | } | ||
369 | |||
370 | static void *get_pmu_cmdline_args_ptr_v5(struct nvgpu_pmu *pmu) | ||
371 | { | ||
372 | return (void *)(&pmu->args_v5); | ||
373 | } | ||
374 | static void *get_pmu_cmdline_args_ptr_v1(struct nvgpu_pmu *pmu) | ||
375 | { | ||
376 | return (void *)(&pmu->args_v1); | ||
377 | } | ||
378 | |||
379 | static void *get_pmu_cmdline_args_ptr_v0(struct nvgpu_pmu *pmu) | ||
380 | { | ||
381 | return (void *)(&pmu->args_v0); | ||
382 | } | ||
383 | |||
384 | static u32 get_pmu_allocation_size_v3(struct nvgpu_pmu *pmu) | ||
385 | { | ||
386 | return sizeof(struct pmu_allocation_v3); | ||
387 | } | ||
388 | |||
389 | static u32 get_pmu_allocation_size_v2(struct nvgpu_pmu *pmu) | ||
390 | { | ||
391 | return sizeof(struct pmu_allocation_v2); | ||
392 | } | ||
393 | |||
394 | static u32 get_pmu_allocation_size_v1(struct nvgpu_pmu *pmu) | ||
395 | { | ||
396 | return sizeof(struct pmu_allocation_v1); | ||
397 | } | ||
398 | |||
399 | static u32 get_pmu_allocation_size_v0(struct nvgpu_pmu *pmu) | ||
400 | { | ||
401 | return sizeof(struct pmu_allocation_v0); | ||
402 | } | ||
403 | |||
404 | static void set_pmu_allocation_ptr_v3(struct nvgpu_pmu *pmu, | ||
405 | void **pmu_alloc_ptr, void *assign_ptr) | ||
406 | { | ||
407 | struct pmu_allocation_v3 **pmu_a_ptr = | ||
408 | (struct pmu_allocation_v3 **)pmu_alloc_ptr; | ||
409 | *pmu_a_ptr = (struct pmu_allocation_v3 *)assign_ptr; | ||
410 | } | ||
411 | |||
412 | static void set_pmu_allocation_ptr_v2(struct nvgpu_pmu *pmu, | ||
413 | void **pmu_alloc_ptr, void *assign_ptr) | ||
414 | { | ||
415 | struct pmu_allocation_v2 **pmu_a_ptr = | ||
416 | (struct pmu_allocation_v2 **)pmu_alloc_ptr; | ||
417 | *pmu_a_ptr = (struct pmu_allocation_v2 *)assign_ptr; | ||
418 | } | ||
419 | |||
420 | static void set_pmu_allocation_ptr_v1(struct nvgpu_pmu *pmu, | ||
421 | void **pmu_alloc_ptr, void *assign_ptr) | ||
422 | { | ||
423 | struct pmu_allocation_v1 **pmu_a_ptr = | ||
424 | (struct pmu_allocation_v1 **)pmu_alloc_ptr; | ||
425 | *pmu_a_ptr = (struct pmu_allocation_v1 *)assign_ptr; | ||
426 | } | ||
427 | |||
428 | static void set_pmu_allocation_ptr_v0(struct nvgpu_pmu *pmu, | ||
429 | void **pmu_alloc_ptr, void *assign_ptr) | ||
430 | { | ||
431 | struct pmu_allocation_v0 **pmu_a_ptr = | ||
432 | (struct pmu_allocation_v0 **)pmu_alloc_ptr; | ||
433 | *pmu_a_ptr = (struct pmu_allocation_v0 *)assign_ptr; | ||
434 | } | ||
435 | |||
436 | static void pmu_allocation_set_dmem_size_v3(struct nvgpu_pmu *pmu, | ||
437 | void *pmu_alloc_ptr, u16 size) | ||
438 | { | ||
439 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
440 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
441 | pmu_a_ptr->alloc.dmem.size = size; | ||
442 | } | ||
443 | |||
444 | static void pmu_allocation_set_dmem_size_v2(struct nvgpu_pmu *pmu, | ||
445 | void *pmu_alloc_ptr, u16 size) | ||
446 | { | ||
447 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
448 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
449 | pmu_a_ptr->alloc.dmem.size = size; | ||
450 | } | ||
451 | |||
452 | static void pmu_allocation_set_dmem_size_v1(struct nvgpu_pmu *pmu, | ||
453 | void *pmu_alloc_ptr, u16 size) | ||
454 | { | ||
455 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
456 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
457 | pmu_a_ptr->alloc.dmem.size = size; | ||
458 | } | ||
459 | |||
460 | static void pmu_allocation_set_dmem_size_v0(struct nvgpu_pmu *pmu, | ||
461 | void *pmu_alloc_ptr, u16 size) | ||
462 | { | ||
463 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
464 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
465 | pmu_a_ptr->alloc.dmem.size = size; | ||
466 | } | ||
467 | |||
468 | static u16 pmu_allocation_get_dmem_size_v3(struct nvgpu_pmu *pmu, | ||
469 | void *pmu_alloc_ptr) | ||
470 | { | ||
471 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
472 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
473 | return pmu_a_ptr->alloc.dmem.size; | ||
474 | } | ||
475 | |||
476 | static u16 pmu_allocation_get_dmem_size_v2(struct nvgpu_pmu *pmu, | ||
477 | void *pmu_alloc_ptr) | ||
478 | { | ||
479 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
480 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
481 | return pmu_a_ptr->alloc.dmem.size; | ||
482 | } | ||
483 | |||
484 | static u16 pmu_allocation_get_dmem_size_v1(struct nvgpu_pmu *pmu, | ||
485 | void *pmu_alloc_ptr) | ||
486 | { | ||
487 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
488 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
489 | return pmu_a_ptr->alloc.dmem.size; | ||
490 | } | ||
491 | |||
492 | static u16 pmu_allocation_get_dmem_size_v0(struct nvgpu_pmu *pmu, | ||
493 | void *pmu_alloc_ptr) | ||
494 | { | ||
495 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
496 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
497 | return pmu_a_ptr->alloc.dmem.size; | ||
498 | } | ||
499 | |||
500 | static u32 pmu_allocation_get_dmem_offset_v3(struct nvgpu_pmu *pmu, | ||
501 | void *pmu_alloc_ptr) | ||
502 | { | ||
503 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
504 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
505 | return pmu_a_ptr->alloc.dmem.offset; | ||
506 | } | ||
507 | |||
508 | static u32 pmu_allocation_get_dmem_offset_v2(struct nvgpu_pmu *pmu, | ||
509 | void *pmu_alloc_ptr) | ||
510 | { | ||
511 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
512 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
513 | return pmu_a_ptr->alloc.dmem.offset; | ||
514 | } | ||
515 | |||
516 | static u32 pmu_allocation_get_dmem_offset_v1(struct nvgpu_pmu *pmu, | ||
517 | void *pmu_alloc_ptr) | ||
518 | { | ||
519 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
520 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
521 | return pmu_a_ptr->alloc.dmem.offset; | ||
522 | } | ||
523 | |||
524 | static u32 pmu_allocation_get_dmem_offset_v0(struct nvgpu_pmu *pmu, | ||
525 | void *pmu_alloc_ptr) | ||
526 | { | ||
527 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
528 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
529 | return pmu_a_ptr->alloc.dmem.offset; | ||
530 | } | ||
531 | |||
532 | static u32 *pmu_allocation_get_dmem_offset_addr_v3(struct nvgpu_pmu *pmu, | ||
533 | void *pmu_alloc_ptr) | ||
534 | { | ||
535 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
536 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
537 | return &pmu_a_ptr->alloc.dmem.offset; | ||
538 | } | ||
539 | |||
540 | static void *pmu_allocation_get_fb_addr_v3( | ||
541 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) | ||
542 | { | ||
543 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
544 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
545 | return (void *)&pmu_a_ptr->alloc.fb; | ||
546 | } | ||
547 | |||
548 | static u32 pmu_allocation_get_fb_size_v3( | ||
549 | struct nvgpu_pmu *pmu, void *pmu_alloc_ptr) | ||
550 | { | ||
551 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
552 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
553 | return sizeof(pmu_a_ptr->alloc.fb); | ||
554 | } | ||
555 | |||
556 | static u32 *pmu_allocation_get_dmem_offset_addr_v2(struct nvgpu_pmu *pmu, | ||
557 | void *pmu_alloc_ptr) | ||
558 | { | ||
559 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
560 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
561 | return &pmu_a_ptr->alloc.dmem.offset; | ||
562 | } | ||
563 | |||
564 | static u32 *pmu_allocation_get_dmem_offset_addr_v1(struct nvgpu_pmu *pmu, | ||
565 | void *pmu_alloc_ptr) | ||
566 | { | ||
567 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
568 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
569 | return &pmu_a_ptr->alloc.dmem.offset; | ||
570 | } | ||
571 | |||
572 | static u32 *pmu_allocation_get_dmem_offset_addr_v0(struct nvgpu_pmu *pmu, | ||
573 | void *pmu_alloc_ptr) | ||
574 | { | ||
575 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
576 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
577 | return &pmu_a_ptr->alloc.dmem.offset; | ||
578 | } | ||
579 | |||
580 | static void pmu_allocation_set_dmem_offset_v3(struct nvgpu_pmu *pmu, | ||
581 | void *pmu_alloc_ptr, u32 offset) | ||
582 | { | ||
583 | struct pmu_allocation_v3 *pmu_a_ptr = | ||
584 | (struct pmu_allocation_v3 *)pmu_alloc_ptr; | ||
585 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
586 | } | ||
587 | |||
588 | static void pmu_allocation_set_dmem_offset_v2(struct nvgpu_pmu *pmu, | ||
589 | void *pmu_alloc_ptr, u32 offset) | ||
590 | { | ||
591 | struct pmu_allocation_v2 *pmu_a_ptr = | ||
592 | (struct pmu_allocation_v2 *)pmu_alloc_ptr; | ||
593 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
594 | } | ||
595 | |||
596 | static void pmu_allocation_set_dmem_offset_v1(struct nvgpu_pmu *pmu, | ||
597 | void *pmu_alloc_ptr, u32 offset) | ||
598 | { | ||
599 | struct pmu_allocation_v1 *pmu_a_ptr = | ||
600 | (struct pmu_allocation_v1 *)pmu_alloc_ptr; | ||
601 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
602 | } | ||
603 | |||
604 | static void pmu_allocation_set_dmem_offset_v0(struct nvgpu_pmu *pmu, | ||
605 | void *pmu_alloc_ptr, u32 offset) | ||
606 | { | ||
607 | struct pmu_allocation_v0 *pmu_a_ptr = | ||
608 | (struct pmu_allocation_v0 *)pmu_alloc_ptr; | ||
609 | pmu_a_ptr->alloc.dmem.offset = offset; | ||
610 | } | ||
611 | |||
612 | static void *get_pmu_msg_pmu_init_msg_ptr_v4(struct pmu_init_msg *init) | ||
613 | { | ||
614 | return (void *)(&(init->pmu_init_v4)); | ||
615 | } | ||
616 | |||
617 | static void *get_pmu_msg_pmu_init_msg_ptr_v3(struct pmu_init_msg *init) | ||
618 | { | ||
619 | return (void *)(&(init->pmu_init_v3)); | ||
620 | } | ||
621 | |||
622 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v4(union pmu_init_msg_pmu *init_msg) | ||
623 | { | ||
624 | struct pmu_init_msg_pmu_v4 *init = | ||
625 | (struct pmu_init_msg_pmu_v4 *)(&init_msg->v4); | ||
626 | |||
627 | return init->sw_managed_area_offset; | ||
628 | } | ||
629 | |||
630 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v3(union pmu_init_msg_pmu *init_msg) | ||
631 | { | ||
632 | struct pmu_init_msg_pmu_v3 *init = | ||
633 | (struct pmu_init_msg_pmu_v3 *)(&init_msg->v3); | ||
634 | |||
635 | return init->sw_managed_area_offset; | ||
636 | } | ||
637 | |||
638 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v4(union pmu_init_msg_pmu *init_msg) | ||
639 | { | ||
640 | struct pmu_init_msg_pmu_v4 *init = | ||
641 | (struct pmu_init_msg_pmu_v4 *)(&init_msg->v4); | ||
642 | |||
643 | return init->sw_managed_area_size; | ||
644 | } | ||
645 | |||
646 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v3(union pmu_init_msg_pmu *init_msg) | ||
647 | { | ||
648 | struct pmu_init_msg_pmu_v3 *init = | ||
649 | (struct pmu_init_msg_pmu_v3 *)(&init_msg->v3); | ||
650 | |||
651 | return init->sw_managed_area_size; | ||
652 | } | ||
653 | |||
654 | static void *get_pmu_msg_pmu_init_msg_ptr_v2(struct pmu_init_msg *init) | ||
655 | { | ||
656 | return (void *)(&(init->pmu_init_v2)); | ||
657 | } | ||
658 | |||
659 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v2(union pmu_init_msg_pmu *init_msg) | ||
660 | { | ||
661 | struct pmu_init_msg_pmu_v2 *init = | ||
662 | (struct pmu_init_msg_pmu_v2 *)(&init_msg->v1); | ||
663 | return init->sw_managed_area_offset; | ||
664 | } | ||
665 | |||
666 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v2(union pmu_init_msg_pmu *init_msg) | ||
667 | { | ||
668 | struct pmu_init_msg_pmu_v2 *init = | ||
669 | (struct pmu_init_msg_pmu_v2 *)(&init_msg->v1); | ||
670 | return init->sw_managed_area_size; | ||
671 | } | ||
672 | |||
673 | static void *get_pmu_msg_pmu_init_msg_ptr_v1(struct pmu_init_msg *init) | ||
674 | { | ||
675 | return (void *)(&(init->pmu_init_v1)); | ||
676 | } | ||
677 | |||
678 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v1(union pmu_init_msg_pmu *init_msg) | ||
679 | { | ||
680 | struct pmu_init_msg_pmu_v1 *init = | ||
681 | (struct pmu_init_msg_pmu_v1 *)(&init_msg->v1); | ||
682 | return init->sw_managed_area_offset; | ||
683 | } | ||
684 | |||
685 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v1(union pmu_init_msg_pmu *init_msg) | ||
686 | { | ||
687 | struct pmu_init_msg_pmu_v1 *init = | ||
688 | (struct pmu_init_msg_pmu_v1 *)(&init_msg->v1); | ||
689 | return init->sw_managed_area_size; | ||
690 | } | ||
691 | |||
692 | static void *get_pmu_msg_pmu_init_msg_ptr_v0(struct pmu_init_msg *init) | ||
693 | { | ||
694 | return (void *)(&(init->pmu_init_v0)); | ||
695 | } | ||
696 | |||
697 | static u16 get_pmu_init_msg_pmu_sw_mg_off_v0(union pmu_init_msg_pmu *init_msg) | ||
698 | { | ||
699 | struct pmu_init_msg_pmu_v0 *init = | ||
700 | (struct pmu_init_msg_pmu_v0 *)(&init_msg->v0); | ||
701 | return init->sw_managed_area_offset; | ||
702 | } | ||
703 | |||
704 | static u16 get_pmu_init_msg_pmu_sw_mg_size_v0(union pmu_init_msg_pmu *init_msg) | ||
705 | { | ||
706 | struct pmu_init_msg_pmu_v0 *init = | ||
707 | (struct pmu_init_msg_pmu_v0 *)(&init_msg->v0); | ||
708 | return init->sw_managed_area_size; | ||
709 | } | ||
710 | |||
711 | static u32 get_pmu_perfmon_cmd_start_size_v3(void) | ||
712 | { | ||
713 | return sizeof(struct pmu_perfmon_cmd_start_v3); | ||
714 | } | ||
715 | |||
716 | static u32 get_pmu_perfmon_cmd_start_size_v2(void) | ||
717 | { | ||
718 | return sizeof(struct pmu_perfmon_cmd_start_v2); | ||
719 | } | ||
720 | |||
721 | static u32 get_pmu_perfmon_cmd_start_size_v1(void) | ||
722 | { | ||
723 | return sizeof(struct pmu_perfmon_cmd_start_v1); | ||
724 | } | ||
725 | |||
726 | static u32 get_pmu_perfmon_cmd_start_size_v0(void) | ||
727 | { | ||
728 | return sizeof(struct pmu_perfmon_cmd_start_v0); | ||
729 | } | ||
730 | |||
731 | static int get_perfmon_cmd_start_offsetofvar_v3( | ||
732 | enum pmu_perfmon_cmd_start_fields field) | ||
733 | { | ||
734 | switch (field) { | ||
735 | case COUNTER_ALLOC: | ||
736 | return offsetof(struct pmu_perfmon_cmd_start_v3, | ||
737 | counter_alloc); | ||
738 | default: | ||
739 | return -EINVAL; | ||
740 | } | ||
741 | |||
742 | return 0; | ||
743 | } | ||
744 | |||
745 | static int get_perfmon_cmd_start_offsetofvar_v2( | ||
746 | enum pmu_perfmon_cmd_start_fields field) | ||
747 | { | ||
748 | switch (field) { | ||
749 | case COUNTER_ALLOC: | ||
750 | return offsetof(struct pmu_perfmon_cmd_start_v2, | ||
751 | counter_alloc); | ||
752 | default: | ||
753 | return -EINVAL; | ||
754 | } | ||
755 | |||
756 | return 0; | ||
757 | } | ||
758 | |||
759 | static int get_perfmon_cmd_start_offsetofvar_v1( | ||
760 | enum pmu_perfmon_cmd_start_fields field) | ||
761 | { | ||
762 | switch (field) { | ||
763 | case COUNTER_ALLOC: | ||
764 | return offsetof(struct pmu_perfmon_cmd_start_v1, | ||
765 | counter_alloc); | ||
766 | default: | ||
767 | return -EINVAL; | ||
768 | } | ||
769 | |||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | static int get_perfmon_cmd_start_offsetofvar_v0( | ||
774 | enum pmu_perfmon_cmd_start_fields field) | ||
775 | { | ||
776 | switch (field) { | ||
777 | case COUNTER_ALLOC: | ||
778 | return offsetof(struct pmu_perfmon_cmd_start_v0, | ||
779 | counter_alloc); | ||
780 | default: | ||
781 | return -EINVAL; | ||
782 | break; | ||
783 | } | ||
784 | return 0; | ||
785 | } | ||
786 | |||
787 | static u32 get_pmu_perfmon_cmd_init_size_v3(void) | ||
788 | { | ||
789 | return sizeof(struct pmu_perfmon_cmd_init_v3); | ||
790 | } | ||
791 | |||
792 | static u32 get_pmu_perfmon_cmd_init_size_v2(void) | ||
793 | { | ||
794 | return sizeof(struct pmu_perfmon_cmd_init_v2); | ||
795 | } | ||
796 | |||
797 | static u32 get_pmu_perfmon_cmd_init_size_v1(void) | ||
798 | { | ||
799 | return sizeof(struct pmu_perfmon_cmd_init_v1); | ||
800 | } | ||
801 | |||
802 | static u32 get_pmu_perfmon_cmd_init_size_v0(void) | ||
803 | { | ||
804 | return sizeof(struct pmu_perfmon_cmd_init_v0); | ||
805 | } | ||
806 | |||
807 | static int get_perfmon_cmd_init_offsetofvar_v3( | ||
808 | enum pmu_perfmon_cmd_start_fields field) | ||
809 | { | ||
810 | switch (field) { | ||
811 | case COUNTER_ALLOC: | ||
812 | return offsetof(struct pmu_perfmon_cmd_init_v3, | ||
813 | counter_alloc); | ||
814 | default: | ||
815 | return -EINVAL; | ||
816 | } | ||
817 | return 0; | ||
818 | } | ||
819 | |||
820 | static int get_perfmon_cmd_init_offsetofvar_v2( | ||
821 | enum pmu_perfmon_cmd_start_fields field) | ||
822 | { | ||
823 | switch (field) { | ||
824 | case COUNTER_ALLOC: | ||
825 | return offsetof(struct pmu_perfmon_cmd_init_v2, | ||
826 | counter_alloc); | ||
827 | default: | ||
828 | return -EINVAL; | ||
829 | break; | ||
830 | } | ||
831 | return 0; | ||
832 | } | ||
833 | |||
834 | static int get_perfmon_cmd_init_offsetofvar_v1( | ||
835 | enum pmu_perfmon_cmd_start_fields field) | ||
836 | { | ||
837 | switch (field) { | ||
838 | case COUNTER_ALLOC: | ||
839 | return offsetof(struct pmu_perfmon_cmd_init_v1, | ||
840 | counter_alloc); | ||
841 | default: | ||
842 | return -EINVAL; | ||
843 | break; | ||
844 | } | ||
845 | return 0; | ||
846 | } | ||
847 | |||
848 | static int get_perfmon_cmd_init_offsetofvar_v0( | ||
849 | enum pmu_perfmon_cmd_start_fields field) | ||
850 | { | ||
851 | switch (field) { | ||
852 | case COUNTER_ALLOC: | ||
853 | return offsetof(struct pmu_perfmon_cmd_init_v0, | ||
854 | counter_alloc); | ||
855 | default: | ||
856 | return -EINVAL; | ||
857 | break; | ||
858 | } | ||
859 | return 0; | ||
860 | } | ||
861 | |||
862 | static void perfmon_start_set_cmd_type_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
863 | { | ||
864 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
865 | |||
866 | start->cmd_type = value; | ||
867 | } | ||
868 | |||
869 | static void perfmon_start_set_cmd_type_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
870 | { | ||
871 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
872 | start->cmd_type = value; | ||
873 | } | ||
874 | |||
875 | static void perfmon_start_set_cmd_type_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
876 | { | ||
877 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
878 | start->cmd_type = value; | ||
879 | } | ||
880 | |||
881 | static void perfmon_start_set_cmd_type_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
882 | { | ||
883 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
884 | start->cmd_type = value; | ||
885 | } | ||
886 | |||
887 | static void perfmon_start_set_group_id_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
888 | { | ||
889 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
890 | |||
891 | start->group_id = value; | ||
892 | } | ||
893 | |||
894 | static void perfmon_start_set_group_id_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
895 | { | ||
896 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
897 | start->group_id = value; | ||
898 | } | ||
899 | |||
900 | static void perfmon_start_set_group_id_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
901 | { | ||
902 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
903 | start->group_id = value; | ||
904 | } | ||
905 | |||
906 | static void perfmon_start_set_group_id_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
907 | { | ||
908 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
909 | start->group_id = value; | ||
910 | } | ||
911 | |||
912 | static void perfmon_start_set_state_id_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
913 | { | ||
914 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
915 | |||
916 | start->state_id = value; | ||
917 | } | ||
918 | |||
919 | static void perfmon_start_set_state_id_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
920 | { | ||
921 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
922 | start->state_id = value; | ||
923 | } | ||
924 | |||
925 | static void perfmon_start_set_state_id_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
926 | { | ||
927 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
928 | start->state_id = value; | ||
929 | } | ||
930 | |||
931 | static void perfmon_start_set_state_id_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
932 | { | ||
933 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
934 | start->state_id = value; | ||
935 | } | ||
936 | |||
937 | static void perfmon_start_set_flags_v3(struct pmu_perfmon_cmd *pc, u8 value) | ||
938 | { | ||
939 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
940 | |||
941 | start->flags = value; | ||
942 | } | ||
943 | |||
944 | static void perfmon_start_set_flags_v2(struct pmu_perfmon_cmd *pc, u8 value) | ||
945 | { | ||
946 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
947 | start->flags = value; | ||
948 | } | ||
949 | |||
950 | static void perfmon_start_set_flags_v1(struct pmu_perfmon_cmd *pc, u8 value) | ||
951 | { | ||
952 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
953 | start->flags = value; | ||
954 | } | ||
955 | |||
956 | static void perfmon_start_set_flags_v0(struct pmu_perfmon_cmd *pc, u8 value) | ||
957 | { | ||
958 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
959 | start->flags = value; | ||
960 | } | ||
961 | |||
962 | static u8 perfmon_start_get_flags_v3(struct pmu_perfmon_cmd *pc) | ||
963 | { | ||
964 | struct pmu_perfmon_cmd_start_v3 *start = &pc->start_v3; | ||
965 | |||
966 | return start->flags; | ||
967 | } | ||
968 | |||
969 | static u8 perfmon_start_get_flags_v2(struct pmu_perfmon_cmd *pc) | ||
970 | { | ||
971 | struct pmu_perfmon_cmd_start_v2 *start = &pc->start_v2; | ||
972 | return start->flags; | ||
973 | } | ||
974 | |||
975 | static u8 perfmon_start_get_flags_v1(struct pmu_perfmon_cmd *pc) | ||
976 | { | ||
977 | struct pmu_perfmon_cmd_start_v1 *start = &pc->start_v1; | ||
978 | return start->flags; | ||
979 | } | ||
980 | |||
981 | static u8 perfmon_start_get_flags_v0(struct pmu_perfmon_cmd *pc) | ||
982 | { | ||
983 | struct pmu_perfmon_cmd_start_v0 *start = &pc->start_v0; | ||
984 | return start->flags; | ||
985 | } | ||
986 | |||
987 | static void perfmon_cmd_init_set_sample_buffer_v3(struct pmu_perfmon_cmd *pc, | ||
988 | u16 value) | ||
989 | { | ||
990 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
991 | |||
992 | init->sample_buffer = value; | ||
993 | } | ||
994 | |||
995 | static void perfmon_cmd_init_set_sample_buffer_v2(struct pmu_perfmon_cmd *pc, | ||
996 | u16 value) | ||
997 | { | ||
998 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
999 | init->sample_buffer = value; | ||
1000 | } | ||
1001 | |||
1002 | |||
1003 | static void perfmon_cmd_init_set_sample_buffer_v1(struct pmu_perfmon_cmd *pc, | ||
1004 | u16 value) | ||
1005 | { | ||
1006 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1007 | init->sample_buffer = value; | ||
1008 | } | ||
1009 | |||
1010 | static void perfmon_cmd_init_set_sample_buffer_v0(struct pmu_perfmon_cmd *pc, | ||
1011 | u16 value) | ||
1012 | { | ||
1013 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1014 | init->sample_buffer = value; | ||
1015 | } | ||
1016 | |||
1017 | static void perfmon_cmd_init_set_dec_cnt_v3(struct pmu_perfmon_cmd *pc, | ||
1018 | u8 value) | ||
1019 | { | ||
1020 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1021 | |||
1022 | init->to_decrease_count = value; | ||
1023 | } | ||
1024 | |||
1025 | static void perfmon_cmd_init_set_dec_cnt_v2(struct pmu_perfmon_cmd *pc, | ||
1026 | u8 value) | ||
1027 | { | ||
1028 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1029 | init->to_decrease_count = value; | ||
1030 | } | ||
1031 | |||
1032 | static void perfmon_cmd_init_set_dec_cnt_v1(struct pmu_perfmon_cmd *pc, | ||
1033 | u8 value) | ||
1034 | { | ||
1035 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1036 | init->to_decrease_count = value; | ||
1037 | } | ||
1038 | |||
1039 | static void perfmon_cmd_init_set_dec_cnt_v0(struct pmu_perfmon_cmd *pc, | ||
1040 | u8 value) | ||
1041 | { | ||
1042 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1043 | init->to_decrease_count = value; | ||
1044 | } | ||
1045 | |||
1046 | static void perfmon_cmd_init_set_base_cnt_id_v3(struct pmu_perfmon_cmd *pc, | ||
1047 | u8 value) | ||
1048 | { | ||
1049 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1050 | |||
1051 | init->base_counter_id = value; | ||
1052 | } | ||
1053 | |||
1054 | static void perfmon_cmd_init_set_base_cnt_id_v2(struct pmu_perfmon_cmd *pc, | ||
1055 | u8 value) | ||
1056 | { | ||
1057 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1058 | init->base_counter_id = value; | ||
1059 | } | ||
1060 | |||
1061 | static void perfmon_cmd_init_set_base_cnt_id_v1(struct pmu_perfmon_cmd *pc, | ||
1062 | u8 value) | ||
1063 | { | ||
1064 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1065 | init->base_counter_id = value; | ||
1066 | } | ||
1067 | |||
1068 | static void perfmon_cmd_init_set_base_cnt_id_v0(struct pmu_perfmon_cmd *pc, | ||
1069 | u8 value) | ||
1070 | { | ||
1071 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1072 | init->base_counter_id = value; | ||
1073 | } | ||
1074 | |||
1075 | static void perfmon_cmd_init_set_samp_period_us_v3(struct pmu_perfmon_cmd *pc, | ||
1076 | u32 value) | ||
1077 | { | ||
1078 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1079 | |||
1080 | init->sample_period_us = value; | ||
1081 | } | ||
1082 | |||
1083 | static void perfmon_cmd_init_set_samp_period_us_v2(struct pmu_perfmon_cmd *pc, | ||
1084 | u32 value) | ||
1085 | { | ||
1086 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1087 | init->sample_period_us = value; | ||
1088 | } | ||
1089 | |||
1090 | static void perfmon_cmd_init_set_samp_period_us_v1(struct pmu_perfmon_cmd *pc, | ||
1091 | u32 value) | ||
1092 | { | ||
1093 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1094 | init->sample_period_us = value; | ||
1095 | } | ||
1096 | |||
1097 | static void perfmon_cmd_init_set_samp_period_us_v0(struct pmu_perfmon_cmd *pc, | ||
1098 | u32 value) | ||
1099 | { | ||
1100 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1101 | init->sample_period_us = value; | ||
1102 | } | ||
1103 | |||
1104 | static void perfmon_cmd_init_set_num_cnt_v3(struct pmu_perfmon_cmd *pc, | ||
1105 | u8 value) | ||
1106 | { | ||
1107 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1108 | |||
1109 | init->num_counters = value; | ||
1110 | } | ||
1111 | |||
1112 | static void perfmon_cmd_init_set_num_cnt_v2(struct pmu_perfmon_cmd *pc, | ||
1113 | u8 value) | ||
1114 | { | ||
1115 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1116 | init->num_counters = value; | ||
1117 | } | ||
1118 | |||
1119 | static void perfmon_cmd_init_set_num_cnt_v1(struct pmu_perfmon_cmd *pc, | ||
1120 | u8 value) | ||
1121 | { | ||
1122 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1123 | init->num_counters = value; | ||
1124 | } | ||
1125 | |||
1126 | static void perfmon_cmd_init_set_num_cnt_v0(struct pmu_perfmon_cmd *pc, | ||
1127 | u8 value) | ||
1128 | { | ||
1129 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1130 | init->num_counters = value; | ||
1131 | } | ||
1132 | |||
1133 | static void perfmon_cmd_init_set_mov_avg_v3(struct pmu_perfmon_cmd *pc, | ||
1134 | u8 value) | ||
1135 | { | ||
1136 | struct pmu_perfmon_cmd_init_v3 *init = &pc->init_v3; | ||
1137 | |||
1138 | init->samples_in_moving_avg = value; | ||
1139 | } | ||
1140 | |||
1141 | static void perfmon_cmd_init_set_mov_avg_v2(struct pmu_perfmon_cmd *pc, | ||
1142 | u8 value) | ||
1143 | { | ||
1144 | struct pmu_perfmon_cmd_init_v2 *init = &pc->init_v2; | ||
1145 | init->samples_in_moving_avg = value; | ||
1146 | } | ||
1147 | |||
1148 | static void perfmon_cmd_init_set_mov_avg_v1(struct pmu_perfmon_cmd *pc, | ||
1149 | u8 value) | ||
1150 | { | ||
1151 | struct pmu_perfmon_cmd_init_v1 *init = &pc->init_v1; | ||
1152 | init->samples_in_moving_avg = value; | ||
1153 | } | ||
1154 | |||
1155 | static void perfmon_cmd_init_set_mov_avg_v0(struct pmu_perfmon_cmd *pc, | ||
1156 | u8 value) | ||
1157 | { | ||
1158 | struct pmu_perfmon_cmd_init_v0 *init = &pc->init_v0; | ||
1159 | init->samples_in_moving_avg = value; | ||
1160 | } | ||
1161 | |||
1162 | static void get_pmu_init_msg_pmu_queue_params_v0(struct pmu_queue *queue, | ||
1163 | u32 id, void *pmu_init_msg) | ||
1164 | { | ||
1165 | struct pmu_init_msg_pmu_v0 *init = | ||
1166 | (struct pmu_init_msg_pmu_v0 *)pmu_init_msg; | ||
1167 | queue->index = init->queue_info[id].index; | ||
1168 | queue->offset = init->queue_info[id].offset; | ||
1169 | queue->size = init->queue_info[id].size; | ||
1170 | } | ||
1171 | |||
1172 | static void get_pmu_init_msg_pmu_queue_params_v1(struct pmu_queue *queue, | ||
1173 | u32 id, void *pmu_init_msg) | ||
1174 | { | ||
1175 | struct pmu_init_msg_pmu_v1 *init = | ||
1176 | (struct pmu_init_msg_pmu_v1 *)pmu_init_msg; | ||
1177 | queue->index = init->queue_info[id].index; | ||
1178 | queue->offset = init->queue_info[id].offset; | ||
1179 | queue->size = init->queue_info[id].size; | ||
1180 | } | ||
1181 | |||
1182 | static void get_pmu_init_msg_pmu_queue_params_v2(struct pmu_queue *queue, | ||
1183 | u32 id, void *pmu_init_msg) | ||
1184 | { | ||
1185 | struct pmu_init_msg_pmu_v2 *init = | ||
1186 | (struct pmu_init_msg_pmu_v2 *)pmu_init_msg; | ||
1187 | queue->index = init->queue_info[id].index; | ||
1188 | queue->offset = init->queue_info[id].offset; | ||
1189 | queue->size = init->queue_info[id].size; | ||
1190 | } | ||
1191 | |||
1192 | static void get_pmu_init_msg_pmu_queue_params_v4(struct pmu_queue *queue, | ||
1193 | u32 id, void *pmu_init_msg) | ||
1194 | { | ||
1195 | struct pmu_init_msg_pmu_v4 *init = pmu_init_msg; | ||
1196 | u32 current_ptr = 0; | ||
1197 | u8 i; | ||
1198 | u8 tmp_id = id; | ||
1199 | |||
1200 | if (tmp_id == PMU_COMMAND_QUEUE_HPQ) | ||
1201 | tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; | ||
1202 | else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) | ||
1203 | tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; | ||
1204 | else if (tmp_id == PMU_MESSAGE_QUEUE) | ||
1205 | tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; | ||
1206 | else | ||
1207 | return; | ||
1208 | |||
1209 | queue->index = init->queue_index[tmp_id]; | ||
1210 | queue->size = init->queue_size[tmp_id]; | ||
1211 | if (tmp_id != 0) { | ||
1212 | for (i = 0 ; i < tmp_id; i++) | ||
1213 | current_ptr += init->queue_size[i]; | ||
1214 | } | ||
1215 | queue->offset = init->queue_offset + current_ptr; | ||
1216 | } | ||
1217 | static void get_pmu_init_msg_pmu_queue_params_v3(struct pmu_queue *queue, | ||
1218 | u32 id, void *pmu_init_msg) | ||
1219 | { | ||
1220 | struct pmu_init_msg_pmu_v3 *init = | ||
1221 | (struct pmu_init_msg_pmu_v3 *)pmu_init_msg; | ||
1222 | u32 current_ptr = 0; | ||
1223 | u8 i; | ||
1224 | u8 tmp_id = id; | ||
1225 | |||
1226 | if (tmp_id == PMU_COMMAND_QUEUE_HPQ) | ||
1227 | tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; | ||
1228 | else if (tmp_id == PMU_COMMAND_QUEUE_LPQ) | ||
1229 | tmp_id = PMU_QUEUE_LPQ_IDX_FOR_V3; | ||
1230 | else if (tmp_id == PMU_MESSAGE_QUEUE) | ||
1231 | tmp_id = PMU_QUEUE_MSG_IDX_FOR_V3; | ||
1232 | else | ||
1233 | return; | ||
1234 | queue->index = init->queue_index[tmp_id]; | ||
1235 | queue->size = init->queue_size[tmp_id]; | ||
1236 | if (tmp_id != 0) { | ||
1237 | for (i = 0 ; i < tmp_id; i++) | ||
1238 | current_ptr += init->queue_size[i]; | ||
1239 | } | ||
1240 | queue->offset = init->queue_offset + current_ptr; | ||
1241 | } | ||
1242 | |||
1243 | static void *get_pmu_sequence_in_alloc_ptr_v3(struct pmu_sequence *seq) | ||
1244 | { | ||
1245 | return (void *)(&seq->in_v3); | ||
1246 | } | ||
1247 | |||
1248 | static void *get_pmu_sequence_in_alloc_ptr_v1(struct pmu_sequence *seq) | ||
1249 | { | ||
1250 | return (void *)(&seq->in_v1); | ||
1251 | } | ||
1252 | |||
1253 | static void *get_pmu_sequence_in_alloc_ptr_v0(struct pmu_sequence *seq) | ||
1254 | { | ||
1255 | return (void *)(&seq->in_v0); | ||
1256 | } | ||
1257 | |||
1258 | static void *get_pmu_sequence_out_alloc_ptr_v3(struct pmu_sequence *seq) | ||
1259 | { | ||
1260 | return (void *)(&seq->out_v3); | ||
1261 | } | ||
1262 | |||
1263 | static void *get_pmu_sequence_out_alloc_ptr_v1(struct pmu_sequence *seq) | ||
1264 | { | ||
1265 | return (void *)(&seq->out_v1); | ||
1266 | } | ||
1267 | |||
1268 | static void *get_pmu_sequence_out_alloc_ptr_v0(struct pmu_sequence *seq) | ||
1269 | { | ||
1270 | return (void *)(&seq->out_v0); | ||
1271 | } | ||
1272 | |||
1273 | static u8 pg_cmd_eng_buf_load_size_v0(struct pmu_pg_cmd *pg) | ||
1274 | { | ||
1275 | return sizeof(pg->eng_buf_load_v0); | ||
1276 | } | ||
1277 | |||
1278 | static u8 pg_cmd_eng_buf_load_size_v1(struct pmu_pg_cmd *pg) | ||
1279 | { | ||
1280 | return sizeof(pg->eng_buf_load_v1); | ||
1281 | } | ||
1282 | |||
1283 | static u8 pg_cmd_eng_buf_load_size_v2(struct pmu_pg_cmd *pg) | ||
1284 | { | ||
1285 | return sizeof(pg->eng_buf_load_v2); | ||
1286 | } | ||
1287 | |||
1288 | static void pg_cmd_eng_buf_load_set_cmd_type_v0(struct pmu_pg_cmd *pg, | ||
1289 | u8 value) | ||
1290 | { | ||
1291 | pg->eng_buf_load_v0.cmd_type = value; | ||
1292 | } | ||
1293 | |||
1294 | static void pg_cmd_eng_buf_load_set_cmd_type_v1(struct pmu_pg_cmd *pg, | ||
1295 | u8 value) | ||
1296 | { | ||
1297 | pg->eng_buf_load_v1.cmd_type = value; | ||
1298 | } | ||
1299 | |||
1300 | static void pg_cmd_eng_buf_load_set_cmd_type_v2(struct pmu_pg_cmd *pg, | ||
1301 | u8 value) | ||
1302 | { | ||
1303 | pg->eng_buf_load_v2.cmd_type = value; | ||
1304 | } | ||
1305 | |||
1306 | static void pg_cmd_eng_buf_load_set_engine_id_v0(struct pmu_pg_cmd *pg, | ||
1307 | u8 value) | ||
1308 | { | ||
1309 | pg->eng_buf_load_v0.engine_id = value; | ||
1310 | } | ||
1311 | static void pg_cmd_eng_buf_load_set_engine_id_v1(struct pmu_pg_cmd *pg, | ||
1312 | u8 value) | ||
1313 | { | ||
1314 | pg->eng_buf_load_v1.engine_id = value; | ||
1315 | } | ||
1316 | static void pg_cmd_eng_buf_load_set_engine_id_v2(struct pmu_pg_cmd *pg, | ||
1317 | u8 value) | ||
1318 | { | ||
1319 | pg->eng_buf_load_v2.engine_id = value; | ||
1320 | } | ||
1321 | static void pg_cmd_eng_buf_load_set_buf_idx_v0(struct pmu_pg_cmd *pg, | ||
1322 | u8 value) | ||
1323 | { | ||
1324 | pg->eng_buf_load_v0.buf_idx = value; | ||
1325 | } | ||
1326 | static void pg_cmd_eng_buf_load_set_buf_idx_v1(struct pmu_pg_cmd *pg, | ||
1327 | u8 value) | ||
1328 | { | ||
1329 | pg->eng_buf_load_v1.buf_idx = value; | ||
1330 | } | ||
1331 | static void pg_cmd_eng_buf_load_set_buf_idx_v2(struct pmu_pg_cmd *pg, | ||
1332 | u8 value) | ||
1333 | { | ||
1334 | pg->eng_buf_load_v2.buf_idx = value; | ||
1335 | } | ||
1336 | |||
1337 | static void pg_cmd_eng_buf_load_set_pad_v0(struct pmu_pg_cmd *pg, | ||
1338 | u8 value) | ||
1339 | { | ||
1340 | pg->eng_buf_load_v0.pad = value; | ||
1341 | } | ||
1342 | static void pg_cmd_eng_buf_load_set_pad_v1(struct pmu_pg_cmd *pg, | ||
1343 | u8 value) | ||
1344 | { | ||
1345 | pg->eng_buf_load_v1.pad = value; | ||
1346 | } | ||
1347 | static void pg_cmd_eng_buf_load_set_pad_v2(struct pmu_pg_cmd *pg, | ||
1348 | u8 value) | ||
1349 | { | ||
1350 | pg->eng_buf_load_v2.pad = value; | ||
1351 | } | ||
1352 | |||
1353 | static void pg_cmd_eng_buf_load_set_buf_size_v0(struct pmu_pg_cmd *pg, | ||
1354 | u16 value) | ||
1355 | { | ||
1356 | pg->eng_buf_load_v0.buf_size = value; | ||
1357 | } | ||
1358 | static void pg_cmd_eng_buf_load_set_buf_size_v1(struct pmu_pg_cmd *pg, | ||
1359 | u16 value) | ||
1360 | { | ||
1361 | pg->eng_buf_load_v1.dma_desc.dma_size = value; | ||
1362 | } | ||
1363 | static void pg_cmd_eng_buf_load_set_buf_size_v2(struct pmu_pg_cmd *pg, | ||
1364 | u16 value) | ||
1365 | { | ||
1366 | pg->eng_buf_load_v2.dma_desc.params = value; | ||
1367 | } | ||
1368 | |||
1369 | static void pg_cmd_eng_buf_load_set_dma_base_v0(struct pmu_pg_cmd *pg, | ||
1370 | u32 value) | ||
1371 | { | ||
1372 | pg->eng_buf_load_v0.dma_base = (value >> 8); | ||
1373 | } | ||
1374 | static void pg_cmd_eng_buf_load_set_dma_base_v1(struct pmu_pg_cmd *pg, | ||
1375 | u32 value) | ||
1376 | { | ||
1377 | pg->eng_buf_load_v1.dma_desc.dma_addr.lo |= u64_lo32(value); | ||
1378 | pg->eng_buf_load_v1.dma_desc.dma_addr.hi |= u64_hi32(value); | ||
1379 | } | ||
1380 | static void pg_cmd_eng_buf_load_set_dma_base_v2(struct pmu_pg_cmd *pg, | ||
1381 | u32 value) | ||
1382 | { | ||
1383 | pg->eng_buf_load_v2.dma_desc.address.lo = u64_lo32(value); | ||
1384 | pg->eng_buf_load_v2.dma_desc.address.hi = u64_lo32(value); | ||
1385 | } | ||
1386 | |||
1387 | static void pg_cmd_eng_buf_load_set_dma_offset_v0(struct pmu_pg_cmd *pg, | ||
1388 | u8 value) | ||
1389 | { | ||
1390 | pg->eng_buf_load_v0.dma_offset = value; | ||
1391 | } | ||
1392 | static void pg_cmd_eng_buf_load_set_dma_offset_v1(struct pmu_pg_cmd *pg, | ||
1393 | u8 value) | ||
1394 | { | ||
1395 | pg->eng_buf_load_v1.dma_desc.dma_addr.lo |= value; | ||
1396 | } | ||
1397 | static void pg_cmd_eng_buf_load_set_dma_offset_v2(struct pmu_pg_cmd *pg, | ||
1398 | u8 value) | ||
1399 | { | ||
1400 | pg->eng_buf_load_v2.dma_desc.address.lo |= u64_lo32(value); | ||
1401 | pg->eng_buf_load_v2.dma_desc.address.hi |= u64_lo32(value); | ||
1402 | } | ||
1403 | |||
1404 | static void pg_cmd_eng_buf_load_set_dma_idx_v0(struct pmu_pg_cmd *pg, | ||
1405 | u8 value) | ||
1406 | { | ||
1407 | pg->eng_buf_load_v0.dma_idx = value; | ||
1408 | } | ||
1409 | static void pg_cmd_eng_buf_load_set_dma_idx_v1(struct pmu_pg_cmd *pg, | ||
1410 | u8 value) | ||
1411 | { | ||
1412 | pg->eng_buf_load_v1.dma_desc.dma_idx = value; | ||
1413 | } | ||
1414 | static void pg_cmd_eng_buf_load_set_dma_idx_v2(struct pmu_pg_cmd *pg, | ||
1415 | u8 value) | ||
1416 | { | ||
1417 | pg->eng_buf_load_v2.dma_desc.params |= (value << 24); | ||
1418 | } | ||
1419 | |||
1420 | int gk20a_init_pmu(struct nvgpu_pmu *pmu) | ||
1421 | { | ||
1422 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
1423 | struct pmu_v *pv = &g->ops.pmu_ver; | ||
1424 | int err; | ||
1425 | |||
1426 | err = nvgpu_mutex_init(&pmu->elpg_mutex); | ||
1427 | if (err) | ||
1428 | return err; | ||
1429 | |||
1430 | err = nvgpu_mutex_init(&pmu->pg_mutex); | ||
1431 | if (err) | ||
1432 | goto fail_elpg; | ||
1433 | |||
1434 | err = nvgpu_mutex_init(&pmu->isr_mutex); | ||
1435 | if (err) | ||
1436 | goto fail_pg; | ||
1437 | |||
1438 | err = nvgpu_mutex_init(&pmu->pmu_copy_lock); | ||
1439 | if (err) | ||
1440 | goto fail_isr; | ||
1441 | |||
1442 | err = nvgpu_mutex_init(&pmu->pmu_seq_lock); | ||
1443 | if (err) | ||
1444 | goto fail_pmu_copy; | ||
1445 | |||
1446 | pmu->remove_support = gk20a_remove_pmu_support; | ||
1447 | |||
1448 | switch (pmu->desc->app_version) { | ||
1449 | case APP_VERSION_NC_2: | ||
1450 | case APP_VERSION_NC_1: | ||
1451 | case APP_VERSION_NC_0: | ||
1452 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1453 | pg_cmd_eng_buf_load_size_v1; | ||
1454 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1455 | pg_cmd_eng_buf_load_set_cmd_type_v1; | ||
1456 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1457 | pg_cmd_eng_buf_load_set_engine_id_v1; | ||
1458 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1459 | pg_cmd_eng_buf_load_set_buf_idx_v1; | ||
1460 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1461 | pg_cmd_eng_buf_load_set_pad_v1; | ||
1462 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1463 | pg_cmd_eng_buf_load_set_buf_size_v1; | ||
1464 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1465 | pg_cmd_eng_buf_load_set_dma_base_v1; | ||
1466 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1467 | pg_cmd_eng_buf_load_set_dma_offset_v1; | ||
1468 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1469 | pg_cmd_eng_buf_load_set_dma_idx_v1; | ||
1470 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1471 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1472 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1473 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1474 | set_perfmon_cntr_valid_v2; | ||
1475 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1476 | set_perfmon_cntr_index_v2; | ||
1477 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1478 | set_perfmon_cntr_group_id_v2; | ||
1479 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1480 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1481 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1482 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1483 | pmu_cmdline_size_v4; | ||
1484 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1485 | set_pmu_cmdline_args_cpufreq_v4; | ||
1486 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1487 | set_pmu_cmdline_args_secure_mode_v4; | ||
1488 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1489 | set_pmu_cmdline_args_falctracesize_v4; | ||
1490 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1491 | set_pmu_cmdline_args_falctracedmabase_v4; | ||
1492 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1493 | set_pmu_cmdline_args_falctracedmaidx_v4; | ||
1494 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1495 | get_pmu_cmdline_args_ptr_v4; | ||
1496 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1497 | get_pmu_allocation_size_v2; | ||
1498 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1499 | set_pmu_allocation_ptr_v2; | ||
1500 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1501 | pmu_allocation_set_dmem_size_v2; | ||
1502 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1503 | pmu_allocation_get_dmem_size_v2; | ||
1504 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1505 | pmu_allocation_get_dmem_offset_v2; | ||
1506 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1507 | pmu_allocation_get_dmem_offset_addr_v2; | ||
1508 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1509 | pmu_allocation_set_dmem_offset_v2; | ||
1510 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1511 | get_pmu_init_msg_pmu_queue_params_v1; | ||
1512 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1513 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
1514 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1515 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
1516 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1517 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
1518 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1519 | get_pmu_perfmon_cmd_start_size_v2; | ||
1520 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1521 | get_perfmon_cmd_start_offsetofvar_v2; | ||
1522 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1523 | perfmon_start_set_cmd_type_v2; | ||
1524 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1525 | perfmon_start_set_group_id_v2; | ||
1526 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1527 | perfmon_start_set_state_id_v2; | ||
1528 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1529 | perfmon_start_set_flags_v2; | ||
1530 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1531 | perfmon_start_get_flags_v2; | ||
1532 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1533 | get_pmu_perfmon_cmd_init_size_v2; | ||
1534 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1535 | get_perfmon_cmd_init_offsetofvar_v2; | ||
1536 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1537 | perfmon_cmd_init_set_sample_buffer_v2; | ||
1538 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1539 | perfmon_cmd_init_set_dec_cnt_v2; | ||
1540 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1541 | perfmon_cmd_init_set_base_cnt_id_v2; | ||
1542 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1543 | perfmon_cmd_init_set_samp_period_us_v2; | ||
1544 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1545 | perfmon_cmd_init_set_num_cnt_v2; | ||
1546 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1547 | perfmon_cmd_init_set_mov_avg_v2; | ||
1548 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1549 | get_pmu_sequence_in_alloc_ptr_v1; | ||
1550 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1551 | get_pmu_sequence_out_alloc_ptr_v1; | ||
1552 | break; | ||
1553 | case APP_VERSION_NC_3: | ||
1554 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1555 | pg_cmd_eng_buf_load_size_v2; | ||
1556 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1557 | pg_cmd_eng_buf_load_set_cmd_type_v2; | ||
1558 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1559 | pg_cmd_eng_buf_load_set_engine_id_v2; | ||
1560 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1561 | pg_cmd_eng_buf_load_set_buf_idx_v2; | ||
1562 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1563 | pg_cmd_eng_buf_load_set_pad_v2; | ||
1564 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1565 | pg_cmd_eng_buf_load_set_buf_size_v2; | ||
1566 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1567 | pg_cmd_eng_buf_load_set_dma_base_v2; | ||
1568 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1569 | pg_cmd_eng_buf_load_set_dma_offset_v2; | ||
1570 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1571 | pg_cmd_eng_buf_load_set_dma_idx_v2; | ||
1572 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1573 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1574 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1575 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1576 | set_perfmon_cntr_valid_v2; | ||
1577 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1578 | set_perfmon_cntr_index_v2; | ||
1579 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1580 | set_perfmon_cntr_group_id_v2; | ||
1581 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1582 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1583 | g->ops.pmu_ver.is_pmu_zbc_save_supported = false; | ||
1584 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1585 | pmu_cmdline_size_v6; | ||
1586 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1587 | set_pmu_cmdline_args_cpufreq_v5; | ||
1588 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1589 | set_pmu_cmdline_args_secure_mode_v5; | ||
1590 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1591 | set_pmu_cmdline_args_falctracesize_v5; | ||
1592 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1593 | set_pmu_cmdline_args_falctracedmabase_v5; | ||
1594 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1595 | set_pmu_cmdline_args_falctracedmaidx_v5; | ||
1596 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1597 | get_pmu_cmdline_args_ptr_v5; | ||
1598 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1599 | get_pmu_allocation_size_v3; | ||
1600 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1601 | set_pmu_allocation_ptr_v3; | ||
1602 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1603 | pmu_allocation_set_dmem_size_v3; | ||
1604 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1605 | pmu_allocation_get_dmem_size_v3; | ||
1606 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1607 | pmu_allocation_get_dmem_offset_v3; | ||
1608 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1609 | pmu_allocation_get_dmem_offset_addr_v3; | ||
1610 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1611 | pmu_allocation_set_dmem_offset_v3; | ||
1612 | g->ops.pmu_ver.pmu_allocation_get_fb_addr = | ||
1613 | pmu_allocation_get_fb_addr_v3; | ||
1614 | g->ops.pmu_ver.pmu_allocation_get_fb_size = | ||
1615 | pmu_allocation_get_fb_size_v3; | ||
1616 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1617 | get_pmu_init_msg_pmu_queue_params_v4; | ||
1618 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1619 | get_pmu_msg_pmu_init_msg_ptr_v4; | ||
1620 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1621 | get_pmu_init_msg_pmu_sw_mg_off_v4; | ||
1622 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1623 | get_pmu_init_msg_pmu_sw_mg_size_v4; | ||
1624 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1625 | get_pmu_perfmon_cmd_start_size_v3; | ||
1626 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1627 | get_perfmon_cmd_start_offsetofvar_v3; | ||
1628 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1629 | perfmon_start_set_cmd_type_v3; | ||
1630 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1631 | perfmon_start_set_group_id_v3; | ||
1632 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1633 | perfmon_start_set_state_id_v3; | ||
1634 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1635 | perfmon_start_set_flags_v3; | ||
1636 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1637 | perfmon_start_get_flags_v3; | ||
1638 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1639 | get_pmu_perfmon_cmd_init_size_v3; | ||
1640 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1641 | get_perfmon_cmd_init_offsetofvar_v3; | ||
1642 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1643 | perfmon_cmd_init_set_sample_buffer_v3; | ||
1644 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1645 | perfmon_cmd_init_set_dec_cnt_v3; | ||
1646 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1647 | perfmon_cmd_init_set_base_cnt_id_v3; | ||
1648 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1649 | perfmon_cmd_init_set_samp_period_us_v3; | ||
1650 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1651 | perfmon_cmd_init_set_num_cnt_v3; | ||
1652 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1653 | perfmon_cmd_init_set_mov_avg_v3; | ||
1654 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1655 | get_pmu_sequence_in_alloc_ptr_v3; | ||
1656 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1657 | get_pmu_sequence_out_alloc_ptr_v3; | ||
1658 | break; | ||
1659 | case APP_VERSION_GM206: | ||
1660 | case APP_VERSION_NV_GPU: | ||
1661 | case APP_VERSION_NV_GPU_1: | ||
1662 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1663 | pg_cmd_eng_buf_load_size_v2; | ||
1664 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1665 | pg_cmd_eng_buf_load_set_cmd_type_v2; | ||
1666 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1667 | pg_cmd_eng_buf_load_set_engine_id_v2; | ||
1668 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1669 | pg_cmd_eng_buf_load_set_buf_idx_v2; | ||
1670 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1671 | pg_cmd_eng_buf_load_set_pad_v2; | ||
1672 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1673 | pg_cmd_eng_buf_load_set_buf_size_v2; | ||
1674 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1675 | pg_cmd_eng_buf_load_set_dma_base_v2; | ||
1676 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1677 | pg_cmd_eng_buf_load_set_dma_offset_v2; | ||
1678 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1679 | pg_cmd_eng_buf_load_set_dma_idx_v2; | ||
1680 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1681 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1682 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1683 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1684 | set_perfmon_cntr_valid_v2; | ||
1685 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1686 | set_perfmon_cntr_index_v2; | ||
1687 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1688 | set_perfmon_cntr_group_id_v2; | ||
1689 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1690 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1691 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1692 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1693 | pmu_cmdline_size_v5; | ||
1694 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1695 | set_pmu_cmdline_args_cpufreq_v5; | ||
1696 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1697 | set_pmu_cmdline_args_secure_mode_v5; | ||
1698 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1699 | set_pmu_cmdline_args_falctracesize_v5; | ||
1700 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1701 | set_pmu_cmdline_args_falctracedmabase_v5; | ||
1702 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1703 | set_pmu_cmdline_args_falctracedmaidx_v5; | ||
1704 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1705 | get_pmu_cmdline_args_ptr_v5; | ||
1706 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1707 | get_pmu_allocation_size_v3; | ||
1708 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1709 | set_pmu_allocation_ptr_v3; | ||
1710 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1711 | pmu_allocation_set_dmem_size_v3; | ||
1712 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1713 | pmu_allocation_get_dmem_size_v3; | ||
1714 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1715 | pmu_allocation_get_dmem_offset_v3; | ||
1716 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1717 | pmu_allocation_get_dmem_offset_addr_v3; | ||
1718 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1719 | pmu_allocation_set_dmem_offset_v3; | ||
1720 | g->ops.pmu_ver.pmu_allocation_get_fb_addr = | ||
1721 | pmu_allocation_get_fb_addr_v3; | ||
1722 | g->ops.pmu_ver.pmu_allocation_get_fb_size = | ||
1723 | pmu_allocation_get_fb_size_v3; | ||
1724 | if(pmu->desc->app_version != APP_VERSION_NV_GPU && | ||
1725 | pmu->desc->app_version != APP_VERSION_NV_GPU_1) { | ||
1726 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1727 | get_pmu_init_msg_pmu_queue_params_v2; | ||
1728 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1729 | get_pmu_msg_pmu_init_msg_ptr_v2; | ||
1730 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1731 | get_pmu_init_msg_pmu_sw_mg_off_v2; | ||
1732 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1733 | get_pmu_init_msg_pmu_sw_mg_size_v2; | ||
1734 | } | ||
1735 | else | ||
1736 | { | ||
1737 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1738 | get_pmu_init_msg_pmu_queue_params_v3; | ||
1739 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1740 | get_pmu_msg_pmu_init_msg_ptr_v3; | ||
1741 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1742 | get_pmu_init_msg_pmu_sw_mg_off_v3; | ||
1743 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1744 | get_pmu_init_msg_pmu_sw_mg_size_v3; | ||
1745 | } | ||
1746 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1747 | get_pmu_perfmon_cmd_start_size_v3; | ||
1748 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1749 | get_perfmon_cmd_start_offsetofvar_v3; | ||
1750 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1751 | perfmon_start_set_cmd_type_v3; | ||
1752 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1753 | perfmon_start_set_group_id_v3; | ||
1754 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1755 | perfmon_start_set_state_id_v3; | ||
1756 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1757 | perfmon_start_set_flags_v3; | ||
1758 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1759 | perfmon_start_get_flags_v3; | ||
1760 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1761 | get_pmu_perfmon_cmd_init_size_v3; | ||
1762 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1763 | get_perfmon_cmd_init_offsetofvar_v3; | ||
1764 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1765 | perfmon_cmd_init_set_sample_buffer_v3; | ||
1766 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1767 | perfmon_cmd_init_set_dec_cnt_v3; | ||
1768 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1769 | perfmon_cmd_init_set_base_cnt_id_v3; | ||
1770 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1771 | perfmon_cmd_init_set_samp_period_us_v3; | ||
1772 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1773 | perfmon_cmd_init_set_num_cnt_v3; | ||
1774 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1775 | perfmon_cmd_init_set_mov_avg_v3; | ||
1776 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1777 | get_pmu_sequence_in_alloc_ptr_v3; | ||
1778 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1779 | get_pmu_sequence_out_alloc_ptr_v3; | ||
1780 | break; | ||
1781 | case APP_VERSION_GM20B_5: | ||
1782 | case APP_VERSION_GM20B_4: | ||
1783 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1784 | pg_cmd_eng_buf_load_size_v0; | ||
1785 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1786 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
1787 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1788 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
1789 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1790 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
1791 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1792 | pg_cmd_eng_buf_load_set_pad_v0; | ||
1793 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1794 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
1795 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1796 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
1797 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1798 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
1799 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1800 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
1801 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1802 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1803 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1804 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1805 | set_perfmon_cntr_valid_v2; | ||
1806 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1807 | set_perfmon_cntr_index_v2; | ||
1808 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1809 | set_perfmon_cntr_group_id_v2; | ||
1810 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1811 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1812 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1813 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1814 | pmu_cmdline_size_v3; | ||
1815 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1816 | set_pmu_cmdline_args_cpufreq_v3; | ||
1817 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1818 | set_pmu_cmdline_args_secure_mode_v3; | ||
1819 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1820 | set_pmu_cmdline_args_falctracesize_v3; | ||
1821 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1822 | set_pmu_cmdline_args_falctracedmabase_v3; | ||
1823 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1824 | set_pmu_cmdline_args_falctracedmaidx_v3; | ||
1825 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1826 | get_pmu_cmdline_args_ptr_v3; | ||
1827 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1828 | get_pmu_allocation_size_v1; | ||
1829 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1830 | set_pmu_allocation_ptr_v1; | ||
1831 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1832 | pmu_allocation_set_dmem_size_v1; | ||
1833 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1834 | pmu_allocation_get_dmem_size_v1; | ||
1835 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1836 | pmu_allocation_get_dmem_offset_v1; | ||
1837 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1838 | pmu_allocation_get_dmem_offset_addr_v1; | ||
1839 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1840 | pmu_allocation_set_dmem_offset_v1; | ||
1841 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1842 | get_pmu_init_msg_pmu_queue_params_v1; | ||
1843 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1844 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
1845 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1846 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
1847 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1848 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
1849 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1850 | get_pmu_perfmon_cmd_start_size_v1; | ||
1851 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1852 | get_perfmon_cmd_start_offsetofvar_v1; | ||
1853 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1854 | perfmon_start_set_cmd_type_v1; | ||
1855 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1856 | perfmon_start_set_group_id_v1; | ||
1857 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1858 | perfmon_start_set_state_id_v1; | ||
1859 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1860 | perfmon_start_set_flags_v1; | ||
1861 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1862 | perfmon_start_get_flags_v1; | ||
1863 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1864 | get_pmu_perfmon_cmd_init_size_v1; | ||
1865 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1866 | get_perfmon_cmd_init_offsetofvar_v1; | ||
1867 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1868 | perfmon_cmd_init_set_sample_buffer_v1; | ||
1869 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1870 | perfmon_cmd_init_set_dec_cnt_v1; | ||
1871 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1872 | perfmon_cmd_init_set_base_cnt_id_v1; | ||
1873 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1874 | perfmon_cmd_init_set_samp_period_us_v1; | ||
1875 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1876 | perfmon_cmd_init_set_num_cnt_v1; | ||
1877 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1878 | perfmon_cmd_init_set_mov_avg_v1; | ||
1879 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1880 | get_pmu_sequence_in_alloc_ptr_v1; | ||
1881 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1882 | get_pmu_sequence_out_alloc_ptr_v1; | ||
1883 | break; | ||
1884 | case APP_VERSION_GM20B_3: | ||
1885 | case APP_VERSION_GM20B_2: | ||
1886 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1887 | pg_cmd_eng_buf_load_size_v0; | ||
1888 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1889 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
1890 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1891 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
1892 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1893 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
1894 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
1895 | pg_cmd_eng_buf_load_set_pad_v0; | ||
1896 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
1897 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
1898 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
1899 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
1900 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
1901 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
1902 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
1903 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
1904 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v2; | ||
1905 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v2; | ||
1906 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v2; | ||
1907 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
1908 | set_perfmon_cntr_valid_v2; | ||
1909 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
1910 | set_perfmon_cntr_index_v2; | ||
1911 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
1912 | set_perfmon_cntr_group_id_v2; | ||
1913 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v2; | ||
1914 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
1915 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
1916 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
1917 | pmu_cmdline_size_v2; | ||
1918 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
1919 | set_pmu_cmdline_args_cpufreq_v2; | ||
1920 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
1921 | set_pmu_cmdline_args_secure_mode_v2; | ||
1922 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
1923 | set_pmu_cmdline_args_falctracesize_v2; | ||
1924 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
1925 | set_pmu_cmdline_args_falctracedmabase_v2; | ||
1926 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
1927 | set_pmu_cmdline_args_falctracedmaidx_v2; | ||
1928 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
1929 | get_pmu_cmdline_args_ptr_v2; | ||
1930 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
1931 | get_pmu_allocation_size_v1; | ||
1932 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
1933 | set_pmu_allocation_ptr_v1; | ||
1934 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
1935 | pmu_allocation_set_dmem_size_v1; | ||
1936 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
1937 | pmu_allocation_get_dmem_size_v1; | ||
1938 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
1939 | pmu_allocation_get_dmem_offset_v1; | ||
1940 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
1941 | pmu_allocation_get_dmem_offset_addr_v1; | ||
1942 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
1943 | pmu_allocation_set_dmem_offset_v1; | ||
1944 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
1945 | get_pmu_init_msg_pmu_queue_params_v1; | ||
1946 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
1947 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
1948 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
1949 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
1950 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
1951 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
1952 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
1953 | get_pmu_perfmon_cmd_start_size_v1; | ||
1954 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
1955 | get_perfmon_cmd_start_offsetofvar_v1; | ||
1956 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
1957 | perfmon_start_set_cmd_type_v1; | ||
1958 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
1959 | perfmon_start_set_group_id_v1; | ||
1960 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
1961 | perfmon_start_set_state_id_v1; | ||
1962 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
1963 | perfmon_start_set_flags_v1; | ||
1964 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
1965 | perfmon_start_get_flags_v1; | ||
1966 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
1967 | get_pmu_perfmon_cmd_init_size_v1; | ||
1968 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
1969 | get_perfmon_cmd_init_offsetofvar_v1; | ||
1970 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
1971 | perfmon_cmd_init_set_sample_buffer_v1; | ||
1972 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
1973 | perfmon_cmd_init_set_dec_cnt_v1; | ||
1974 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
1975 | perfmon_cmd_init_set_base_cnt_id_v1; | ||
1976 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
1977 | perfmon_cmd_init_set_samp_period_us_v1; | ||
1978 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
1979 | perfmon_cmd_init_set_num_cnt_v1; | ||
1980 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
1981 | perfmon_cmd_init_set_mov_avg_v1; | ||
1982 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
1983 | get_pmu_sequence_in_alloc_ptr_v1; | ||
1984 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
1985 | get_pmu_sequence_out_alloc_ptr_v1; | ||
1986 | break; | ||
1987 | case APP_VERSION_GM20B_1: | ||
1988 | case APP_VERSION_GM20B: | ||
1989 | case APP_VERSION_1: | ||
1990 | case APP_VERSION_2: | ||
1991 | case APP_VERSION_3: | ||
1992 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
1993 | pg_cmd_eng_buf_load_size_v0; | ||
1994 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
1995 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
1996 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
1997 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
1998 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
1999 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
2000 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
2001 | pg_cmd_eng_buf_load_set_pad_v0; | ||
2002 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
2003 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
2004 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
2005 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
2006 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
2007 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
2008 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
2009 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
2010 | g->ops.pmu_ver.cmd_id_zbc_table_update = 16; | ||
2011 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
2012 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0; | ||
2013 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0; | ||
2014 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0; | ||
2015 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
2016 | set_perfmon_cntr_valid_v0; | ||
2017 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
2018 | set_perfmon_cntr_index_v0; | ||
2019 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
2020 | set_perfmon_cntr_group_id_v0; | ||
2021 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v0; | ||
2022 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
2023 | pmu_cmdline_size_v1; | ||
2024 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
2025 | set_pmu_cmdline_args_cpufreq_v1; | ||
2026 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
2027 | set_pmu_cmdline_args_secure_mode_v1; | ||
2028 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size = | ||
2029 | set_pmu_cmdline_args_falctracesize_v1; | ||
2030 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base = | ||
2031 | set_pmu_cmdline_args_falctracedmabase_v1; | ||
2032 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx = | ||
2033 | set_pmu_cmdline_args_falctracedmaidx_v1; | ||
2034 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
2035 | get_pmu_cmdline_args_ptr_v1; | ||
2036 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
2037 | get_pmu_allocation_size_v1; | ||
2038 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
2039 | set_pmu_allocation_ptr_v1; | ||
2040 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
2041 | pmu_allocation_set_dmem_size_v1; | ||
2042 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
2043 | pmu_allocation_get_dmem_size_v1; | ||
2044 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
2045 | pmu_allocation_get_dmem_offset_v1; | ||
2046 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
2047 | pmu_allocation_get_dmem_offset_addr_v1; | ||
2048 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
2049 | pmu_allocation_set_dmem_offset_v1; | ||
2050 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
2051 | get_pmu_init_msg_pmu_queue_params_v1; | ||
2052 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
2053 | get_pmu_msg_pmu_init_msg_ptr_v1; | ||
2054 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
2055 | get_pmu_init_msg_pmu_sw_mg_off_v1; | ||
2056 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
2057 | get_pmu_init_msg_pmu_sw_mg_size_v1; | ||
2058 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
2059 | get_pmu_perfmon_cmd_start_size_v1; | ||
2060 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
2061 | get_perfmon_cmd_start_offsetofvar_v1; | ||
2062 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
2063 | perfmon_start_set_cmd_type_v1; | ||
2064 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
2065 | perfmon_start_set_group_id_v1; | ||
2066 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
2067 | perfmon_start_set_state_id_v1; | ||
2068 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
2069 | perfmon_start_set_flags_v1; | ||
2070 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
2071 | perfmon_start_get_flags_v1; | ||
2072 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
2073 | get_pmu_perfmon_cmd_init_size_v1; | ||
2074 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
2075 | get_perfmon_cmd_init_offsetofvar_v1; | ||
2076 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
2077 | perfmon_cmd_init_set_sample_buffer_v1; | ||
2078 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
2079 | perfmon_cmd_init_set_dec_cnt_v1; | ||
2080 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
2081 | perfmon_cmd_init_set_base_cnt_id_v1; | ||
2082 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
2083 | perfmon_cmd_init_set_samp_period_us_v1; | ||
2084 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
2085 | perfmon_cmd_init_set_num_cnt_v1; | ||
2086 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
2087 | perfmon_cmd_init_set_mov_avg_v1; | ||
2088 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
2089 | get_pmu_sequence_in_alloc_ptr_v1; | ||
2090 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
2091 | get_pmu_sequence_out_alloc_ptr_v1; | ||
2092 | break; | ||
2093 | case APP_VERSION_0: | ||
2094 | g->ops.pmu_ver.pg_cmd_eng_buf_load_size = | ||
2095 | pg_cmd_eng_buf_load_size_v0; | ||
2096 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_cmd_type = | ||
2097 | pg_cmd_eng_buf_load_set_cmd_type_v0; | ||
2098 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_engine_id = | ||
2099 | pg_cmd_eng_buf_load_set_engine_id_v0; | ||
2100 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_idx = | ||
2101 | pg_cmd_eng_buf_load_set_buf_idx_v0; | ||
2102 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_pad = | ||
2103 | pg_cmd_eng_buf_load_set_pad_v0; | ||
2104 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_buf_size = | ||
2105 | pg_cmd_eng_buf_load_set_buf_size_v0; | ||
2106 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_base = | ||
2107 | pg_cmd_eng_buf_load_set_dma_base_v0; | ||
2108 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_offset = | ||
2109 | pg_cmd_eng_buf_load_set_dma_offset_v0; | ||
2110 | g->ops.pmu_ver.pg_cmd_eng_buf_load_set_dma_idx = | ||
2111 | pg_cmd_eng_buf_load_set_dma_idx_v0; | ||
2112 | g->ops.pmu_ver.cmd_id_zbc_table_update = 14; | ||
2113 | g->ops.pmu_ver.is_pmu_zbc_save_supported = true; | ||
2114 | g->ops.pmu_ver.get_perfmon_cntr_ptr = get_perfmon_cntr_ptr_v0; | ||
2115 | g->ops.pmu_ver.set_perfmon_cntr_ut = set_perfmon_cntr_ut_v0; | ||
2116 | g->ops.pmu_ver.set_perfmon_cntr_lt = set_perfmon_cntr_lt_v0; | ||
2117 | g->ops.pmu_ver.set_perfmon_cntr_valid = | ||
2118 | set_perfmon_cntr_valid_v0; | ||
2119 | g->ops.pmu_ver.set_perfmon_cntr_index = | ||
2120 | set_perfmon_cntr_index_v0; | ||
2121 | g->ops.pmu_ver.set_perfmon_cntr_group_id = | ||
2122 | set_perfmon_cntr_group_id_v0; | ||
2123 | g->ops.pmu_ver.get_perfmon_cntr_sz = pmu_perfmon_cntr_sz_v0; | ||
2124 | g->ops.pmu_ver.get_pmu_cmdline_args_size = | ||
2125 | pmu_cmdline_size_v0; | ||
2126 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq = | ||
2127 | set_pmu_cmdline_args_cpufreq_v0; | ||
2128 | g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode = | ||
2129 | NULL; | ||
2130 | g->ops.pmu_ver.get_pmu_cmdline_args_ptr = | ||
2131 | get_pmu_cmdline_args_ptr_v0; | ||
2132 | g->ops.pmu_ver.get_pmu_allocation_struct_size = | ||
2133 | get_pmu_allocation_size_v0; | ||
2134 | g->ops.pmu_ver.set_pmu_allocation_ptr = | ||
2135 | set_pmu_allocation_ptr_v0; | ||
2136 | g->ops.pmu_ver.pmu_allocation_set_dmem_size = | ||
2137 | pmu_allocation_set_dmem_size_v0; | ||
2138 | g->ops.pmu_ver.pmu_allocation_get_dmem_size = | ||
2139 | pmu_allocation_get_dmem_size_v0; | ||
2140 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset = | ||
2141 | pmu_allocation_get_dmem_offset_v0; | ||
2142 | g->ops.pmu_ver.pmu_allocation_get_dmem_offset_addr = | ||
2143 | pmu_allocation_get_dmem_offset_addr_v0; | ||
2144 | g->ops.pmu_ver.pmu_allocation_set_dmem_offset = | ||
2145 | pmu_allocation_set_dmem_offset_v0; | ||
2146 | g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params = | ||
2147 | get_pmu_init_msg_pmu_queue_params_v0; | ||
2148 | g->ops.pmu_ver.get_pmu_msg_pmu_init_msg_ptr = | ||
2149 | get_pmu_msg_pmu_init_msg_ptr_v0; | ||
2150 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_off = | ||
2151 | get_pmu_init_msg_pmu_sw_mg_off_v0; | ||
2152 | g->ops.pmu_ver.get_pmu_init_msg_pmu_sw_mg_size = | ||
2153 | get_pmu_init_msg_pmu_sw_mg_size_v0; | ||
2154 | g->ops.pmu_ver.get_pmu_perfmon_cmd_start_size = | ||
2155 | get_pmu_perfmon_cmd_start_size_v0; | ||
2156 | g->ops.pmu_ver.get_perfmon_cmd_start_offsetofvar = | ||
2157 | get_perfmon_cmd_start_offsetofvar_v0; | ||
2158 | g->ops.pmu_ver.perfmon_start_set_cmd_type = | ||
2159 | perfmon_start_set_cmd_type_v0; | ||
2160 | g->ops.pmu_ver.perfmon_start_set_group_id = | ||
2161 | perfmon_start_set_group_id_v0; | ||
2162 | g->ops.pmu_ver.perfmon_start_set_state_id = | ||
2163 | perfmon_start_set_state_id_v0; | ||
2164 | g->ops.pmu_ver.perfmon_start_set_flags = | ||
2165 | perfmon_start_set_flags_v0; | ||
2166 | g->ops.pmu_ver.perfmon_start_get_flags = | ||
2167 | perfmon_start_get_flags_v0; | ||
2168 | g->ops.pmu_ver.get_pmu_perfmon_cmd_init_size = | ||
2169 | get_pmu_perfmon_cmd_init_size_v0; | ||
2170 | g->ops.pmu_ver.get_perfmon_cmd_init_offsetofvar = | ||
2171 | get_perfmon_cmd_init_offsetofvar_v0; | ||
2172 | g->ops.pmu_ver.perfmon_cmd_init_set_sample_buffer = | ||
2173 | perfmon_cmd_init_set_sample_buffer_v0; | ||
2174 | g->ops.pmu_ver.perfmon_cmd_init_set_dec_cnt = | ||
2175 | perfmon_cmd_init_set_dec_cnt_v0; | ||
2176 | g->ops.pmu_ver.perfmon_cmd_init_set_base_cnt_id = | ||
2177 | perfmon_cmd_init_set_base_cnt_id_v0; | ||
2178 | g->ops.pmu_ver.perfmon_cmd_init_set_samp_period_us = | ||
2179 | perfmon_cmd_init_set_samp_period_us_v0; | ||
2180 | g->ops.pmu_ver.perfmon_cmd_init_set_num_cnt = | ||
2181 | perfmon_cmd_init_set_num_cnt_v0; | ||
2182 | g->ops.pmu_ver.perfmon_cmd_init_set_mov_avg = | ||
2183 | perfmon_cmd_init_set_mov_avg_v0; | ||
2184 | g->ops.pmu_ver.get_pmu_seq_in_a_ptr = | ||
2185 | get_pmu_sequence_in_alloc_ptr_v0; | ||
2186 | g->ops.pmu_ver.get_pmu_seq_out_a_ptr = | ||
2187 | get_pmu_sequence_out_alloc_ptr_v0; | ||
2188 | break; | ||
2189 | default: | ||
2190 | nvgpu_err(g, "PMU code version not supported version: %d", | ||
2191 | pmu->desc->app_version); | ||
2192 | err = -EINVAL; | ||
2193 | goto fail_pmu_seq; | ||
2194 | } | ||
2195 | pv->set_perfmon_cntr_index(pmu, 3); /* GR & CE2 */ | ||
2196 | pv->set_perfmon_cntr_group_id(pmu, PMU_DOMAIN_GROUP_PSTATE); | ||
2197 | |||
2198 | return 0; | ||
2199 | |||
2200 | fail_pmu_seq: | ||
2201 | nvgpu_mutex_destroy(&pmu->pmu_seq_lock); | ||
2202 | fail_pmu_copy: | ||
2203 | nvgpu_mutex_destroy(&pmu->pmu_copy_lock); | ||
2204 | fail_isr: | ||
2205 | nvgpu_mutex_destroy(&pmu->isr_mutex); | ||
2206 | fail_pg: | ||
2207 | nvgpu_mutex_destroy(&pmu->pg_mutex); | ||
2208 | fail_elpg: | ||
2209 | nvgpu_mutex_destroy(&pmu->elpg_mutex); | ||
2210 | return err; | ||
2211 | } | ||
2212 | 111 | ||
2213 | void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, | 112 | void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, |
2214 | u32 src, u8 *dst, u32 size, u8 port) | 113 | u32 src, u8 *dst, u32 size, u8 port) |
@@ -2793,63 +692,6 @@ void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set) | |||
2793 | pwr_pmu_msgq_tail_val_f(*tail)); | 692 | pwr_pmu_msgq_tail_val_f(*tail)); |
2794 | } | 693 | } |
2795 | 694 | ||
2796 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu) | ||
2797 | { | ||
2798 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
2799 | |||
2800 | gk20a_dbg_fn(""); | ||
2801 | |||
2802 | if (nvgpu_alloc_initialized(&pmu->dmem)) | ||
2803 | nvgpu_alloc_destroy(&pmu->dmem); | ||
2804 | |||
2805 | nvgpu_release_firmware(g, pmu->fw); | ||
2806 | |||
2807 | nvgpu_mutex_destroy(&pmu->elpg_mutex); | ||
2808 | nvgpu_mutex_destroy(&pmu->pg_mutex); | ||
2809 | nvgpu_mutex_destroy(&pmu->isr_mutex); | ||
2810 | nvgpu_mutex_destroy(&pmu->pmu_copy_lock); | ||
2811 | nvgpu_mutex_destroy(&pmu->pmu_seq_lock); | ||
2812 | } | ||
2813 | |||
2814 | static int gk20a_prepare_ucode(struct gk20a *g) | ||
2815 | { | ||
2816 | struct nvgpu_pmu *pmu = &g->pmu; | ||
2817 | int err = 0; | ||
2818 | struct mm_gk20a *mm = &g->mm; | ||
2819 | struct vm_gk20a *vm = mm->pmu.vm; | ||
2820 | |||
2821 | if (pmu->fw) | ||
2822 | return gk20a_init_pmu(pmu); | ||
2823 | |||
2824 | pmu->fw = nvgpu_request_firmware(g, GK20A_PMU_UCODE_IMAGE, 0); | ||
2825 | if (!pmu->fw) { | ||
2826 | nvgpu_err(g, "failed to load pmu ucode!!"); | ||
2827 | return err; | ||
2828 | } | ||
2829 | |||
2830 | gk20a_dbg_fn("firmware loaded"); | ||
2831 | |||
2832 | pmu->desc = (struct pmu_ucode_desc *)pmu->fw->data; | ||
2833 | pmu->ucode_image = (u32 *)((u8 *)pmu->desc + | ||
2834 | pmu->desc->descriptor_size); | ||
2835 | |||
2836 | err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, | ||
2837 | &pmu->ucode); | ||
2838 | if (err) | ||
2839 | goto err_release_fw; | ||
2840 | |||
2841 | nvgpu_mem_wr_n(g, &pmu->ucode, 0, pmu->ucode_image, | ||
2842 | pmu->desc->app_start_offset + pmu->desc->app_size); | ||
2843 | |||
2844 | return gk20a_init_pmu(pmu); | ||
2845 | |||
2846 | err_release_fw: | ||
2847 | nvgpu_release_firmware(g, pmu->fw); | ||
2848 | pmu->fw = NULL; | ||
2849 | |||
2850 | return err; | ||
2851 | } | ||
2852 | |||
2853 | static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, | 695 | static void pmu_handle_pg_buf_config_msg(struct gk20a *g, struct pmu_msg *msg, |
2854 | void *param, u32 handle, u32 status) | 696 | void *param, u32 handle, u32 status) |
2855 | { | 697 | { |
@@ -3018,7 +860,7 @@ u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) | |||
3018 | void gk20a_init_pmu_ops(struct gpu_ops *gops) | 860 | void gk20a_init_pmu_ops(struct gpu_ops *gops) |
3019 | { | 861 | { |
3020 | gops->pmu.is_pmu_supported = gk20a_is_pmu_supported; | 862 | gops->pmu.is_pmu_supported = gk20a_is_pmu_supported; |
3021 | gops->pmu.prepare_ucode = gk20a_prepare_ucode; | 863 | gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob; |
3022 | gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1; | 864 | gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1; |
3023 | gops->pmu.pmu_nsbootstrap = pmu_bootstrap; | 865 | gops->pmu.pmu_nsbootstrap = pmu_bootstrap; |
3024 | gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r; | 866 | gops->pmu.pmu_get_queue_head = pwr_pmu_queue_head_r; |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h index 1d2e20e6..b5038bd4 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.h | |||
@@ -30,24 +30,6 @@ struct nvgpu_firmware; | |||
30 | 30 | ||
31 | #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) | 31 | #define ZBC_MASK(i) (~(~(0) << ((i)+1)) & 0xfffe) |
32 | 32 | ||
33 | #define APP_VERSION_NC_3 22204331 | ||
34 | #define APP_VERSION_NC_2 20429989 | ||
35 | #define APP_VERSION_NC_1 20313802 | ||
36 | #define APP_VERSION_NC_0 20360931 | ||
37 | #define APP_VERSION_GM206 20652057 | ||
38 | #define APP_VERSION_NV_GPU 21307569 | ||
39 | #define APP_VERSION_NV_GPU_1 21308030 | ||
40 | #define APP_VERSION_GM20B_5 20490253 | ||
41 | #define APP_VERSION_GM20B_4 19008461 | ||
42 | #define APP_VERSION_GM20B_3 18935575 | ||
43 | #define APP_VERSION_GM20B_2 18694072 | ||
44 | #define APP_VERSION_GM20B_1 18547257 | ||
45 | #define APP_VERSION_GM20B 17615280 | ||
46 | #define APP_VERSION_3 18357968 | ||
47 | #define APP_VERSION_2 18542378 | ||
48 | #define APP_VERSION_1 17997577 /*Obsolete this once 18357968 gets in*/ | ||
49 | #define APP_VERSION_0 16856675 | ||
50 | |||
51 | /*Fuse defines*/ | 33 | /*Fuse defines*/ |
52 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) | 34 | #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) |
53 | #define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 | 35 | #define FUSE_GCPLEX_CONFIG_FUSE_0 0x2C8 |
@@ -127,11 +109,7 @@ void pmu_copy_from_dmem(struct nvgpu_pmu *pmu, | |||
127 | u32 src, u8 *dst, u32 size, u8 port); | 109 | u32 src, u8 *dst, u32 size, u8 port); |
128 | int pmu_reset(struct nvgpu_pmu *pmu); | 110 | int pmu_reset(struct nvgpu_pmu *pmu); |
129 | int pmu_bootstrap(struct nvgpu_pmu *pmu); | 111 | int pmu_bootstrap(struct nvgpu_pmu *pmu); |
130 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); | ||
131 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); | 112 | void pmu_dump_falcon_stats(struct nvgpu_pmu *pmu); |
132 | void gk20a_remove_pmu_support(struct nvgpu_pmu *pmu); | ||
133 | |||
134 | int gk20a_init_pmu(struct nvgpu_pmu *pmu); | ||
135 | 113 | ||
136 | int gk20a_pmu_ap_send_command(struct gk20a *g, | 114 | int gk20a_pmu_ap_send_command(struct gk20a *g, |
137 | union pmu_ap_cmd *p_ap_cmd, bool b_block); | 115 | union pmu_ap_cmd *p_ap_cmd, bool b_block); |
diff --git a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c index 580ba5e5..02da3de9 100644 --- a/drivers/gpu/nvgpu/gm20b/acr_gm20b.c +++ b/drivers/gpu/nvgpu/gm20b/acr_gm20b.c | |||
@@ -161,7 +161,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img *p_img) | |||
161 | pmu->ucode_image = (u32 *)pmu_fw->data; | 161 | pmu->ucode_image = (u32 *)pmu_fw->data; |
162 | g->acr.pmu_desc = pmu_desc; | 162 | g->acr.pmu_desc = pmu_desc; |
163 | 163 | ||
164 | err = gk20a_init_pmu(pmu); | 164 | err = nvgpu_init_pmu_fw_support(pmu); |
165 | if (err) { | 165 | if (err) { |
166 | gm20b_dbg_pmu("failed to set function pointers\n"); | 166 | gm20b_dbg_pmu("failed to set function pointers\n"); |
167 | goto release_sig; | 167 | goto release_sig; |
@@ -395,7 +395,7 @@ int prepare_ucode_blob(struct gk20a *g) | |||
395 | if (g->acr.ucode_blob.cpu_va) { | 395 | if (g->acr.ucode_blob.cpu_va) { |
396 | /*Recovery case, we do not need to form | 396 | /*Recovery case, we do not need to form |
397 | non WPR blob of ucodes*/ | 397 | non WPR blob of ucodes*/ |
398 | err = gk20a_init_pmu(pmu); | 398 | err = nvgpu_init_pmu_fw_support(pmu); |
399 | if (err) { | 399 | if (err) { |
400 | gm20b_dbg_pmu("failed to set function pointers\n"); | 400 | gm20b_dbg_pmu("failed to set function pointers\n"); |
401 | return err; | 401 | return err; |
diff --git a/drivers/gpu/nvgpu/gp106/acr_gp106.c b/drivers/gpu/nvgpu/gp106/acr_gp106.c index 46bc5055..076c8829 100644 --- a/drivers/gpu/nvgpu/gp106/acr_gp106.c +++ b/drivers/gpu/nvgpu/gp106/acr_gp106.c | |||
@@ -174,7 +174,7 @@ static int pmu_ucode_details(struct gk20a *g, struct flcn_ucode_img_v1 *p_img) | |||
174 | pmu->ucode_image = (u32 *)pmu_fw->data; | 174 | pmu->ucode_image = (u32 *)pmu_fw->data; |
175 | g->acr.pmu_desc = pmu_desc; | 175 | g->acr.pmu_desc = pmu_desc; |
176 | 176 | ||
177 | err = gk20a_init_pmu(pmu); | 177 | err = nvgpu_init_pmu_fw_support(pmu); |
178 | if (err) { | 178 | if (err) { |
179 | nvgpu_err(g, "failed to set function pointers"); | 179 | nvgpu_err(g, "failed to set function pointers"); |
180 | goto release_sig; | 180 | goto release_sig; |
@@ -389,7 +389,7 @@ static int gp106_prepare_ucode_blob(struct gk20a *g) | |||
389 | if (g->acr.ucode_blob.cpu_va) { | 389 | if (g->acr.ucode_blob.cpu_va) { |
390 | /*Recovery case, we do not need to form | 390 | /*Recovery case, we do not need to form |
391 | non WPR blob of ucodes*/ | 391 | non WPR blob of ucodes*/ |
392 | err = gk20a_init_pmu(pmu); | 392 | err = nvgpu_init_pmu_fw_support(pmu); |
393 | if (err) { | 393 | if (err) { |
394 | gp106_dbg_pmu("failed to set function pointers\n"); | 394 | gp106_dbg_pmu("failed to set function pointers\n"); |
395 | return err; | 395 | return err; |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/pmu.h b/drivers/gpu/nvgpu/include/nvgpu/pmu.h index 169d8b98..15f37bda 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/pmu.h +++ b/drivers/gpu/nvgpu/include/nvgpu/pmu.h | |||
@@ -401,4 +401,8 @@ int nvgpu_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | |||
401 | int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | 401 | int nvgpu_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, |
402 | u32 size); | 402 | u32 size); |
403 | 403 | ||
404 | /* PMU F/W support */ | ||
405 | int nvgpu_init_pmu_fw_support(struct nvgpu_pmu *pmu); | ||
406 | int nvgpu_pmu_prepare_ns_ucode_blob(struct gk20a *g); | ||
407 | |||
404 | #endif /* __NVGPU_PMU_H__ */ | 408 | #endif /* __NVGPU_PMU_H__ */ |