diff options
author | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
---|---|---|
committer | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
commit | f347fde22f1297e4f022600d201780d5ead78114 (patch) | |
tree | 76be305d6187003a1e0486ff6e91efb1062ae118 /include/gk20a/pmu_gk20a.c | |
parent | 8340d234d78a7d0f46c11a584de538148b78b7cb (diff) |
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/gk20a/pmu_gk20a.c')
-rw-r--r-- | include/gk20a/pmu_gk20a.c | 879 |
1 files changed, 0 insertions, 879 deletions
diff --git a/include/gk20a/pmu_gk20a.c b/include/gk20a/pmu_gk20a.c deleted file mode 100644 index 63a32f0..0000000 --- a/include/gk20a/pmu_gk20a.c +++ /dev/null | |||
@@ -1,879 +0,0 @@ | |||
1 | /* | ||
2 | * GK20A PMU (aka. gPMU outside gk20a context) | ||
3 | * | ||
4 | * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice shall be included in | ||
14 | * all copies or substantial portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
22 | * DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | |||
25 | #include <nvgpu/nvgpu_common.h> | ||
26 | #include <nvgpu/timers.h> | ||
27 | #include <nvgpu/kmem.h> | ||
28 | #include <nvgpu/dma.h> | ||
29 | #include <nvgpu/log.h> | ||
30 | #include <nvgpu/bug.h> | ||
31 | #include <nvgpu/firmware.h> | ||
32 | #include <nvgpu/falcon.h> | ||
33 | #include <nvgpu/mm.h> | ||
34 | #include <nvgpu/io.h> | ||
35 | #include <nvgpu/clk_arb.h> | ||
36 | #include <nvgpu/utils.h> | ||
37 | #include <nvgpu/unit.h> | ||
38 | |||
39 | #include "gk20a.h" | ||
40 | #include "gr_gk20a.h" | ||
41 | #include "pmu_gk20a.h" | ||
42 | |||
43 | #include <nvgpu/hw/gk20a/hw_mc_gk20a.h> | ||
44 | #include <nvgpu/hw/gk20a/hw_pwr_gk20a.h> | ||
45 | #include <nvgpu/hw/gk20a/hw_top_gk20a.h> | ||
46 | |||
47 | #define gk20a_dbg_pmu(g, fmt, arg...) \ | ||
48 | nvgpu_log(g, gpu_dbg_pmu, fmt, ##arg) | ||
49 | |||
50 | bool nvgpu_find_hex_in_string(char *strings, struct gk20a *g, u32 *hex_pos) | ||
51 | { | ||
52 | u32 i = 0, j = strlen(strings); | ||
53 | |||
54 | for (; i < j; i++) { | ||
55 | if (strings[i] == '%') { | ||
56 | if (strings[i + 1] == 'x' || strings[i + 1] == 'X') { | ||
57 | *hex_pos = i; | ||
58 | return true; | ||
59 | } | ||
60 | } | ||
61 | } | ||
62 | *hex_pos = -1; | ||
63 | return false; | ||
64 | } | ||
65 | |||
66 | static void print_pmu_trace(struct nvgpu_pmu *pmu) | ||
67 | { | ||
68 | struct gk20a *g = pmu->g; | ||
69 | u32 i = 0, j = 0, k, l, m, count; | ||
70 | char part_str[40], buf[0x40]; | ||
71 | void *tracebuffer; | ||
72 | char *trace; | ||
73 | u32 *trace1; | ||
74 | |||
75 | /* allocate system memory to copy pmu trace buffer */ | ||
76 | tracebuffer = nvgpu_kzalloc(g, GK20A_PMU_TRACE_BUFSIZE); | ||
77 | if (tracebuffer == NULL) { | ||
78 | return; | ||
79 | } | ||
80 | |||
81 | /* read pmu traces into system memory buffer */ | ||
82 | nvgpu_mem_rd_n(g, &pmu->trace_buf, 0, tracebuffer, | ||
83 | GK20A_PMU_TRACE_BUFSIZE); | ||
84 | |||
85 | trace = (char *)tracebuffer; | ||
86 | trace1 = (u32 *)tracebuffer; | ||
87 | |||
88 | nvgpu_err(g, "dump PMU trace buffer"); | ||
89 | for (i = 0; i < GK20A_PMU_TRACE_BUFSIZE; i += 0x40) { | ||
90 | for (j = 0; j < 0x40; j++) { | ||
91 | if (trace1[(i / 4) + j]) { | ||
92 | break; | ||
93 | } | ||
94 | } | ||
95 | if (j == 0x40) { | ||
96 | break; | ||
97 | } | ||
98 | count = scnprintf(buf, 0x40, "Index %x: ", trace1[(i / 4)]); | ||
99 | l = 0; | ||
100 | m = 0; | ||
101 | while (nvgpu_find_hex_in_string((trace+i+20+m), g, &k)) { | ||
102 | if (k >= 40) { | ||
103 | break; | ||
104 | } | ||
105 | strncpy(part_str, (trace+i+20+m), k); | ||
106 | part_str[k] = '\0'; | ||
107 | count += scnprintf((buf + count), 0x40, "%s0x%x", | ||
108 | part_str, trace1[(i / 4) + 1 + l]); | ||
109 | l++; | ||
110 | m += k + 2; | ||
111 | } | ||
112 | |||
113 | scnprintf((buf + count), 0x40, "%s", (trace+i+20+m)); | ||
114 | nvgpu_err(g, "%s", buf); | ||
115 | } | ||
116 | |||
117 | nvgpu_kfree(g, tracebuffer); | ||
118 | } | ||
119 | |||
120 | u32 gk20a_pmu_get_irqdest(struct gk20a *g) | ||
121 | { | ||
122 | u32 intr_dest; | ||
123 | |||
124 | /* dest 0=falcon, 1=host; level 0=irq0, 1=irq1 */ | ||
125 | intr_dest = pwr_falcon_irqdest_host_gptmr_f(0) | | ||
126 | pwr_falcon_irqdest_host_wdtmr_f(1) | | ||
127 | pwr_falcon_irqdest_host_mthd_f(0) | | ||
128 | pwr_falcon_irqdest_host_ctxsw_f(0) | | ||
129 | pwr_falcon_irqdest_host_halt_f(1) | | ||
130 | pwr_falcon_irqdest_host_exterr_f(0) | | ||
131 | pwr_falcon_irqdest_host_swgen0_f(1) | | ||
132 | pwr_falcon_irqdest_host_swgen1_f(0) | | ||
133 | pwr_falcon_irqdest_host_ext_f(0xff) | | ||
134 | pwr_falcon_irqdest_target_gptmr_f(1) | | ||
135 | pwr_falcon_irqdest_target_wdtmr_f(0) | | ||
136 | pwr_falcon_irqdest_target_mthd_f(0) | | ||
137 | pwr_falcon_irqdest_target_ctxsw_f(0) | | ||
138 | pwr_falcon_irqdest_target_halt_f(0) | | ||
139 | pwr_falcon_irqdest_target_exterr_f(0) | | ||
140 | pwr_falcon_irqdest_target_swgen0_f(0) | | ||
141 | pwr_falcon_irqdest_target_swgen1_f(0) | | ||
142 | pwr_falcon_irqdest_target_ext_f(0xff); | ||
143 | |||
144 | return intr_dest; | ||
145 | } | ||
146 | |||
147 | void gk20a_pmu_enable_irq(struct nvgpu_pmu *pmu, bool enable) | ||
148 | { | ||
149 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
150 | u32 intr_mask; | ||
151 | u32 intr_dest; | ||
152 | |||
153 | nvgpu_log_fn(g, " "); | ||
154 | |||
155 | g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, true, | ||
156 | mc_intr_mask_0_pmu_enabled_f()); | ||
157 | g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_DISABLE, false, | ||
158 | mc_intr_mask_1_pmu_enabled_f()); | ||
159 | |||
160 | nvgpu_flcn_set_irq(pmu->flcn, false, 0x0, 0x0); | ||
161 | |||
162 | if (enable) { | ||
163 | intr_dest = g->ops.pmu.get_irqdest(g); | ||
164 | /* 0=disable, 1=enable */ | ||
165 | intr_mask = pwr_falcon_irqmset_gptmr_f(1) | | ||
166 | pwr_falcon_irqmset_wdtmr_f(1) | | ||
167 | pwr_falcon_irqmset_mthd_f(0) | | ||
168 | pwr_falcon_irqmset_ctxsw_f(0) | | ||
169 | pwr_falcon_irqmset_halt_f(1) | | ||
170 | pwr_falcon_irqmset_exterr_f(1) | | ||
171 | pwr_falcon_irqmset_swgen0_f(1) | | ||
172 | pwr_falcon_irqmset_swgen1_f(1); | ||
173 | |||
174 | nvgpu_flcn_set_irq(pmu->flcn, true, intr_mask, intr_dest); | ||
175 | |||
176 | g->ops.mc.intr_unit_config(g, MC_INTR_UNIT_ENABLE, true, | ||
177 | mc_intr_mask_0_pmu_enabled_f()); | ||
178 | } | ||
179 | |||
180 | nvgpu_log_fn(g, "done"); | ||
181 | } | ||
182 | |||
183 | |||
184 | |||
185 | int pmu_bootstrap(struct nvgpu_pmu *pmu) | ||
186 | { | ||
187 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
188 | struct mm_gk20a *mm = &g->mm; | ||
189 | struct pmu_ucode_desc *desc = pmu->desc; | ||
190 | u64 addr_code, addr_data, addr_load; | ||
191 | u32 i, blocks, addr_args; | ||
192 | |||
193 | nvgpu_log_fn(g, " "); | ||
194 | |||
195 | gk20a_writel(g, pwr_falcon_itfen_r(), | ||
196 | gk20a_readl(g, pwr_falcon_itfen_r()) | | ||
197 | pwr_falcon_itfen_ctxen_enable_f()); | ||
198 | gk20a_writel(g, pwr_pmu_new_instblk_r(), | ||
199 | pwr_pmu_new_instblk_ptr_f( | ||
200 | nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) | | ||
201 | pwr_pmu_new_instblk_valid_f(1) | | ||
202 | pwr_pmu_new_instblk_target_sys_coh_f()); | ||
203 | |||
204 | /* TBD: load all other surfaces */ | ||
205 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_size( | ||
206 | pmu, GK20A_PMU_TRACE_BUFSIZE); | ||
207 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu); | ||
208 | g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx( | ||
209 | pmu, GK20A_PMU_DMAIDX_VIRT); | ||
210 | |||
211 | g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu, | ||
212 | g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK)); | ||
213 | |||
214 | addr_args = (pwr_falcon_hwcfg_dmem_size_v( | ||
215 | gk20a_readl(g, pwr_falcon_hwcfg_r())) | ||
216 | << GK20A_PMU_DMEM_BLKSIZE2) - | ||
217 | g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); | ||
218 | |||
219 | nvgpu_flcn_copy_to_dmem(pmu->flcn, addr_args, | ||
220 | (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), | ||
221 | g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); | ||
222 | |||
223 | gk20a_writel(g, pwr_falcon_dmemc_r(0), | ||
224 | pwr_falcon_dmemc_offs_f(0) | | ||
225 | pwr_falcon_dmemc_blk_f(0) | | ||
226 | pwr_falcon_dmemc_aincw_f(1)); | ||
227 | |||
228 | addr_code = u64_lo32((pmu->ucode.gpu_va + | ||
229 | desc->app_start_offset + | ||
230 | desc->app_resident_code_offset) >> 8) ; | ||
231 | addr_data = u64_lo32((pmu->ucode.gpu_va + | ||
232 | desc->app_start_offset + | ||
233 | desc->app_resident_data_offset) >> 8); | ||
234 | addr_load = u64_lo32((pmu->ucode.gpu_va + | ||
235 | desc->bootloader_start_offset) >> 8); | ||
236 | |||
237 | gk20a_writel(g, pwr_falcon_dmemd_r(0), GK20A_PMU_DMAIDX_UCODE); | ||
238 | gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code); | ||
239 | gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_size); | ||
240 | gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_size); | ||
241 | gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_imem_entry); | ||
242 | gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data); | ||
243 | gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_data_size); | ||
244 | gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code); | ||
245 | gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x1); | ||
246 | gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args); | ||
247 | |||
248 | g->ops.pmu.write_dmatrfbase(g, | ||
249 | addr_load - (desc->bootloader_imem_offset >> 8)); | ||
250 | |||
251 | blocks = ((desc->bootloader_size + 0xFF) & ~0xFF) >> 8; | ||
252 | |||
253 | for (i = 0; i < blocks; i++) { | ||
254 | gk20a_writel(g, pwr_falcon_dmatrfmoffs_r(), | ||
255 | desc->bootloader_imem_offset + (i << 8)); | ||
256 | gk20a_writel(g, pwr_falcon_dmatrffboffs_r(), | ||
257 | desc->bootloader_imem_offset + (i << 8)); | ||
258 | gk20a_writel(g, pwr_falcon_dmatrfcmd_r(), | ||
259 | pwr_falcon_dmatrfcmd_imem_f(1) | | ||
260 | pwr_falcon_dmatrfcmd_write_f(0) | | ||
261 | pwr_falcon_dmatrfcmd_size_f(6) | | ||
262 | pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE)); | ||
263 | } | ||
264 | |||
265 | nvgpu_flcn_bootstrap(g->pmu.flcn, desc->bootloader_entry_point); | ||
266 | |||
267 | gk20a_writel(g, pwr_falcon_os_r(), desc->app_version); | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | void gk20a_pmu_pg_idle_counter_config(struct gk20a *g, u32 pg_engine_id) | ||
273 | { | ||
274 | gk20a_writel(g, pwr_pmu_pg_idlefilth_r(pg_engine_id), | ||
275 | PMU_PG_IDLE_THRESHOLD); | ||
276 | gk20a_writel(g, pwr_pmu_pg_ppuidlefilth_r(pg_engine_id), | ||
277 | PMU_PG_POST_POWERUP_IDLE_THRESHOLD); | ||
278 | } | ||
279 | |||
280 | int gk20a_pmu_mutex_acquire(struct nvgpu_pmu *pmu, u32 id, u32 *token) | ||
281 | { | ||
282 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
283 | struct pmu_mutex *mutex; | ||
284 | u32 data, owner, max_retry; | ||
285 | |||
286 | if (!pmu->initialized) { | ||
287 | return -EINVAL; | ||
288 | } | ||
289 | |||
290 | BUG_ON(!token); | ||
291 | BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); | ||
292 | BUG_ON(id > pmu->mutex_cnt); | ||
293 | |||
294 | mutex = &pmu->mutex[id]; | ||
295 | |||
296 | owner = pwr_pmu_mutex_value_v( | ||
297 | gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); | ||
298 | |||
299 | if (*token != PMU_INVALID_MUTEX_OWNER_ID && *token == owner) { | ||
300 | BUG_ON(mutex->ref_cnt == 0); | ||
301 | gk20a_dbg_pmu(g, "already acquired by owner : 0x%08x", *token); | ||
302 | mutex->ref_cnt++; | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | max_retry = 40; | ||
307 | do { | ||
308 | data = pwr_pmu_mutex_id_value_v( | ||
309 | gk20a_readl(g, pwr_pmu_mutex_id_r())); | ||
310 | if (data == pwr_pmu_mutex_id_value_init_v() || | ||
311 | data == pwr_pmu_mutex_id_value_not_avail_v()) { | ||
312 | nvgpu_warn(g, | ||
313 | "fail to generate mutex token: val 0x%08x", | ||
314 | owner); | ||
315 | nvgpu_usleep_range(20, 40); | ||
316 | continue; | ||
317 | } | ||
318 | |||
319 | owner = data; | ||
320 | gk20a_writel(g, pwr_pmu_mutex_r(mutex->index), | ||
321 | pwr_pmu_mutex_value_f(owner)); | ||
322 | |||
323 | data = pwr_pmu_mutex_value_v( | ||
324 | gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); | ||
325 | |||
326 | if (owner == data) { | ||
327 | mutex->ref_cnt = 1; | ||
328 | gk20a_dbg_pmu(g, "mutex acquired: id=%d, token=0x%x", | ||
329 | mutex->index, *token); | ||
330 | *token = owner; | ||
331 | return 0; | ||
332 | } else { | ||
333 | nvgpu_log_info(g, "fail to acquire mutex idx=0x%08x", | ||
334 | mutex->index); | ||
335 | |||
336 | data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); | ||
337 | data = set_field(data, | ||
338 | pwr_pmu_mutex_id_release_value_m(), | ||
339 | pwr_pmu_mutex_id_release_value_f(owner)); | ||
340 | gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); | ||
341 | |||
342 | nvgpu_usleep_range(20, 40); | ||
343 | continue; | ||
344 | } | ||
345 | } while (max_retry-- > 0); | ||
346 | |||
347 | return -EBUSY; | ||
348 | } | ||
349 | |||
350 | int gk20a_pmu_mutex_release(struct nvgpu_pmu *pmu, u32 id, u32 *token) | ||
351 | { | ||
352 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
353 | struct pmu_mutex *mutex; | ||
354 | u32 owner, data; | ||
355 | |||
356 | if (!pmu->initialized) { | ||
357 | return -EINVAL; | ||
358 | } | ||
359 | |||
360 | BUG_ON(!token); | ||
361 | BUG_ON(!PMU_MUTEX_ID_IS_VALID(id)); | ||
362 | BUG_ON(id > pmu->mutex_cnt); | ||
363 | |||
364 | mutex = &pmu->mutex[id]; | ||
365 | |||
366 | owner = pwr_pmu_mutex_value_v( | ||
367 | gk20a_readl(g, pwr_pmu_mutex_r(mutex->index))); | ||
368 | |||
369 | if (*token != owner) { | ||
370 | nvgpu_err(g, "requester 0x%08x NOT match owner 0x%08x", | ||
371 | *token, owner); | ||
372 | return -EINVAL; | ||
373 | } | ||
374 | |||
375 | if (--mutex->ref_cnt > 0) { | ||
376 | return -EBUSY; | ||
377 | } | ||
378 | |||
379 | gk20a_writel(g, pwr_pmu_mutex_r(mutex->index), | ||
380 | pwr_pmu_mutex_value_initial_lock_f()); | ||
381 | |||
382 | data = gk20a_readl(g, pwr_pmu_mutex_id_release_r()); | ||
383 | data = set_field(data, pwr_pmu_mutex_id_release_value_m(), | ||
384 | pwr_pmu_mutex_id_release_value_f(owner)); | ||
385 | gk20a_writel(g, pwr_pmu_mutex_id_release_r(), data); | ||
386 | |||
387 | gk20a_dbg_pmu(g, "mutex released: id=%d, token=0x%x", | ||
388 | mutex->index, *token); | ||
389 | |||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | int gk20a_pmu_queue_head(struct gk20a *g, struct nvgpu_falcon_queue *queue, | ||
394 | u32 *head, bool set) | ||
395 | { | ||
396 | u32 queue_head_size = 0; | ||
397 | |||
398 | if (g->ops.pmu.pmu_get_queue_head_size) { | ||
399 | queue_head_size = g->ops.pmu.pmu_get_queue_head_size(); | ||
400 | } | ||
401 | |||
402 | BUG_ON(!head || !queue_head_size); | ||
403 | |||
404 | if (PMU_IS_COMMAND_QUEUE(queue->id)) { | ||
405 | |||
406 | if (queue->index >= queue_head_size) { | ||
407 | return -EINVAL; | ||
408 | } | ||
409 | |||
410 | if (!set) { | ||
411 | *head = pwr_pmu_queue_head_address_v( | ||
412 | gk20a_readl(g, | ||
413 | g->ops.pmu.pmu_get_queue_head(queue->index))); | ||
414 | } else { | ||
415 | gk20a_writel(g, | ||
416 | g->ops.pmu.pmu_get_queue_head(queue->index), | ||
417 | pwr_pmu_queue_head_address_f(*head)); | ||
418 | } | ||
419 | } else { | ||
420 | if (!set) { | ||
421 | *head = pwr_pmu_msgq_head_val_v( | ||
422 | gk20a_readl(g, pwr_pmu_msgq_head_r())); | ||
423 | } else { | ||
424 | gk20a_writel(g, | ||
425 | pwr_pmu_msgq_head_r(), | ||
426 | pwr_pmu_msgq_head_val_f(*head)); | ||
427 | } | ||
428 | } | ||
429 | |||
430 | return 0; | ||
431 | } | ||
432 | |||
433 | int gk20a_pmu_queue_tail(struct gk20a *g, struct nvgpu_falcon_queue *queue, | ||
434 | u32 *tail, bool set) | ||
435 | { | ||
436 | u32 queue_tail_size = 0; | ||
437 | |||
438 | if (g->ops.pmu.pmu_get_queue_tail_size) { | ||
439 | queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); | ||
440 | } | ||
441 | |||
442 | BUG_ON(!tail || !queue_tail_size); | ||
443 | |||
444 | if (PMU_IS_COMMAND_QUEUE(queue->id)) { | ||
445 | |||
446 | if (queue->index >= queue_tail_size) { | ||
447 | return -EINVAL; | ||
448 | } | ||
449 | |||
450 | if (!set) { | ||
451 | *tail = pwr_pmu_queue_tail_address_v(gk20a_readl(g, | ||
452 | g->ops.pmu.pmu_get_queue_tail(queue->index))); | ||
453 | } else { | ||
454 | gk20a_writel(g, | ||
455 | g->ops.pmu.pmu_get_queue_tail(queue->index), | ||
456 | pwr_pmu_queue_tail_address_f(*tail)); | ||
457 | } | ||
458 | |||
459 | } else { | ||
460 | if (!set) { | ||
461 | *tail = pwr_pmu_msgq_tail_val_v( | ||
462 | gk20a_readl(g, pwr_pmu_msgq_tail_r())); | ||
463 | } else { | ||
464 | gk20a_writel(g, | ||
465 | pwr_pmu_msgq_tail_r(), | ||
466 | pwr_pmu_msgq_tail_val_f(*tail)); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | void gk20a_pmu_msgq_tail(struct nvgpu_pmu *pmu, u32 *tail, bool set) | ||
474 | { | ||
475 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
476 | u32 queue_tail_size = 0; | ||
477 | |||
478 | if (g->ops.pmu.pmu_get_queue_tail_size) { | ||
479 | queue_tail_size = g->ops.pmu.pmu_get_queue_tail_size(); | ||
480 | } | ||
481 | |||
482 | BUG_ON(!tail || !queue_tail_size); | ||
483 | |||
484 | if (!set) { | ||
485 | *tail = pwr_pmu_msgq_tail_val_v( | ||
486 | gk20a_readl(g, pwr_pmu_msgq_tail_r())); | ||
487 | } else { | ||
488 | gk20a_writel(g, | ||
489 | pwr_pmu_msgq_tail_r(), | ||
490 | pwr_pmu_msgq_tail_val_f(*tail)); | ||
491 | } | ||
492 | } | ||
493 | |||
494 | void gk20a_write_dmatrfbase(struct gk20a *g, u32 addr) | ||
495 | { | ||
496 | gk20a_writel(g, pwr_falcon_dmatrfbase_r(), addr); | ||
497 | } | ||
498 | |||
499 | bool gk20a_pmu_is_engine_in_reset(struct gk20a *g) | ||
500 | { | ||
501 | bool status = false; | ||
502 | |||
503 | status = g->ops.mc.is_enabled(g, NVGPU_UNIT_PWR); | ||
504 | |||
505 | return status; | ||
506 | } | ||
507 | |||
508 | int gk20a_pmu_engine_reset(struct gk20a *g, bool do_reset) | ||
509 | { | ||
510 | u32 reset_mask = g->ops.mc.reset_mask(g, NVGPU_UNIT_PWR); | ||
511 | |||
512 | if (do_reset) { | ||
513 | g->ops.mc.enable(g, reset_mask); | ||
514 | } else { | ||
515 | g->ops.mc.disable(g, reset_mask); | ||
516 | } | ||
517 | |||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | bool gk20a_is_pmu_supported(struct gk20a *g) | ||
522 | { | ||
523 | return true; | ||
524 | } | ||
525 | |||
526 | u32 gk20a_pmu_pg_engines_list(struct gk20a *g) | ||
527 | { | ||
528 | return BIT(PMU_PG_ELPG_ENGINE_ID_GRAPHICS); | ||
529 | } | ||
530 | |||
531 | u32 gk20a_pmu_pg_feature_list(struct gk20a *g, u32 pg_engine_id) | ||
532 | { | ||
533 | if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { | ||
534 | return NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING; | ||
535 | } | ||
536 | |||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | static void pmu_handle_zbc_msg(struct gk20a *g, struct pmu_msg *msg, | ||
541 | void *param, u32 handle, u32 status) | ||
542 | { | ||
543 | struct nvgpu_pmu *pmu = param; | ||
544 | gk20a_dbg_pmu(g, "reply ZBC_TABLE_UPDATE"); | ||
545 | pmu->zbc_save_done = 1; | ||
546 | } | ||
547 | |||
548 | void gk20a_pmu_save_zbc(struct gk20a *g, u32 entries) | ||
549 | { | ||
550 | struct nvgpu_pmu *pmu = &g->pmu; | ||
551 | struct pmu_cmd cmd; | ||
552 | u32 seq; | ||
553 | |||
554 | if (!pmu->pmu_ready || !entries || !pmu->zbc_ready) { | ||
555 | return; | ||
556 | } | ||
557 | |||
558 | memset(&cmd, 0, sizeof(struct pmu_cmd)); | ||
559 | cmd.hdr.unit_id = PMU_UNIT_PG; | ||
560 | cmd.hdr.size = PMU_CMD_HDR_SIZE + sizeof(struct pmu_zbc_cmd); | ||
561 | cmd.cmd.zbc.cmd_type = g->pmu_ver_cmd_id_zbc_table_update; | ||
562 | cmd.cmd.zbc.entry_mask = ZBC_MASK(entries); | ||
563 | |||
564 | pmu->zbc_save_done = 0; | ||
565 | |||
566 | gk20a_dbg_pmu(g, "cmd post ZBC_TABLE_UPDATE"); | ||
567 | nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, | ||
568 | pmu_handle_zbc_msg, pmu, &seq, ~0); | ||
569 | pmu_wait_message_cond(pmu, gk20a_get_gr_idle_timeout(g), | ||
570 | &pmu->zbc_save_done, 1); | ||
571 | if (!pmu->zbc_save_done) { | ||
572 | nvgpu_err(g, "ZBC save timeout"); | ||
573 | } | ||
574 | } | ||
575 | |||
576 | int nvgpu_pmu_handle_therm_event(struct nvgpu_pmu *pmu, | ||
577 | struct nv_pmu_therm_msg *msg) | ||
578 | { | ||
579 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
580 | |||
581 | nvgpu_log_fn(g, " "); | ||
582 | |||
583 | switch (msg->msg_type) { | ||
584 | case NV_PMU_THERM_MSG_ID_EVENT_HW_SLOWDOWN_NOTIFICATION: | ||
585 | if (msg->hw_slct_msg.mask == BIT(NV_PMU_THERM_EVENT_THERMAL_1)) { | ||
586 | nvgpu_clk_arb_send_thermal_alarm(pmu->g); | ||
587 | } else { | ||
588 | gk20a_dbg_pmu(g, "Unwanted/Unregistered thermal event received %d", | ||
589 | msg->hw_slct_msg.mask); | ||
590 | } | ||
591 | break; | ||
592 | default: | ||
593 | gk20a_dbg_pmu(g, "unkown therm event received %d", msg->msg_type); | ||
594 | break; | ||
595 | } | ||
596 | |||
597 | return 0; | ||
598 | } | ||
599 | |||
600 | void gk20a_pmu_dump_elpg_stats(struct nvgpu_pmu *pmu) | ||
601 | { | ||
602 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
603 | |||
604 | gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_supp_r(3): 0x%08x", | ||
605 | gk20a_readl(g, pwr_pmu_idle_mask_supp_r(3))); | ||
606 | gk20a_dbg_pmu(g, "pwr_pmu_idle_mask_1_supp_r(3): 0x%08x", | ||
607 | gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(3))); | ||
608 | gk20a_dbg_pmu(g, "pwr_pmu_idle_ctrl_supp_r(3): 0x%08x", | ||
609 | gk20a_readl(g, pwr_pmu_idle_ctrl_supp_r(3))); | ||
610 | gk20a_dbg_pmu(g, "pwr_pmu_pg_idle_cnt_r(0): 0x%08x", | ||
611 | gk20a_readl(g, pwr_pmu_pg_idle_cnt_r(0))); | ||
612 | gk20a_dbg_pmu(g, "pwr_pmu_pg_intren_r(0): 0x%08x", | ||
613 | gk20a_readl(g, pwr_pmu_pg_intren_r(0))); | ||
614 | |||
615 | gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(3): 0x%08x", | ||
616 | gk20a_readl(g, pwr_pmu_idle_count_r(3))); | ||
617 | gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(4): 0x%08x", | ||
618 | gk20a_readl(g, pwr_pmu_idle_count_r(4))); | ||
619 | gk20a_dbg_pmu(g, "pwr_pmu_idle_count_r(7): 0x%08x", | ||
620 | gk20a_readl(g, pwr_pmu_idle_count_r(7))); | ||
621 | } | ||
622 | |||
623 | void gk20a_pmu_dump_falcon_stats(struct nvgpu_pmu *pmu) | ||
624 | { | ||
625 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
626 | unsigned int i; | ||
627 | |||
628 | for (i = 0; i < pwr_pmu_mailbox__size_1_v(); i++) { | ||
629 | nvgpu_err(g, "pwr_pmu_mailbox_r(%d) : 0x%x", | ||
630 | i, gk20a_readl(g, pwr_pmu_mailbox_r(i))); | ||
631 | } | ||
632 | |||
633 | for (i = 0; i < pwr_pmu_debug__size_1_v(); i++) { | ||
634 | nvgpu_err(g, "pwr_pmu_debug_r(%d) : 0x%x", | ||
635 | i, gk20a_readl(g, pwr_pmu_debug_r(i))); | ||
636 | } | ||
637 | |||
638 | i = gk20a_readl(g, pwr_pmu_bar0_error_status_r()); | ||
639 | nvgpu_err(g, "pwr_pmu_bar0_error_status_r : 0x%x", i); | ||
640 | if (i != 0) { | ||
641 | nvgpu_err(g, "pwr_pmu_bar0_addr_r : 0x%x", | ||
642 | gk20a_readl(g, pwr_pmu_bar0_addr_r())); | ||
643 | nvgpu_err(g, "pwr_pmu_bar0_data_r : 0x%x", | ||
644 | gk20a_readl(g, pwr_pmu_bar0_data_r())); | ||
645 | nvgpu_err(g, "pwr_pmu_bar0_timeout_r : 0x%x", | ||
646 | gk20a_readl(g, pwr_pmu_bar0_timeout_r())); | ||
647 | nvgpu_err(g, "pwr_pmu_bar0_ctl_r : 0x%x", | ||
648 | gk20a_readl(g, pwr_pmu_bar0_ctl_r())); | ||
649 | } | ||
650 | |||
651 | i = gk20a_readl(g, pwr_pmu_bar0_fecs_error_r()); | ||
652 | nvgpu_err(g, "pwr_pmu_bar0_fecs_error_r : 0x%x", i); | ||
653 | |||
654 | i = gk20a_readl(g, pwr_falcon_exterrstat_r()); | ||
655 | nvgpu_err(g, "pwr_falcon_exterrstat_r : 0x%x", i); | ||
656 | if (pwr_falcon_exterrstat_valid_v(i) == | ||
657 | pwr_falcon_exterrstat_valid_true_v()) { | ||
658 | nvgpu_err(g, "pwr_falcon_exterraddr_r : 0x%x", | ||
659 | gk20a_readl(g, pwr_falcon_exterraddr_r())); | ||
660 | } | ||
661 | |||
662 | /* Print PMU F/W debug prints */ | ||
663 | print_pmu_trace(pmu); | ||
664 | } | ||
665 | |||
666 | bool gk20a_pmu_is_interrupted(struct nvgpu_pmu *pmu) | ||
667 | { | ||
668 | struct gk20a *g = gk20a_from_pmu(pmu); | ||
669 | u32 servicedpmuint; | ||
670 | |||
671 | servicedpmuint = pwr_falcon_irqstat_halt_true_f() | | ||
672 | pwr_falcon_irqstat_exterr_true_f() | | ||
673 | pwr_falcon_irqstat_swgen0_true_f(); | ||
674 | |||
675 | if (gk20a_readl(g, pwr_falcon_irqstat_r()) & servicedpmuint) { | ||
676 | return true; | ||
677 | } | ||
678 | |||
679 | return false; | ||
680 | } | ||
681 | |||
682 | void gk20a_pmu_isr(struct gk20a *g) | ||
683 | { | ||
684 | struct nvgpu_pmu *pmu = &g->pmu; | ||
685 | struct nvgpu_falcon_queue *queue; | ||
686 | u32 intr, mask; | ||
687 | bool recheck = false; | ||
688 | |||
689 | nvgpu_log_fn(g, " "); | ||
690 | |||
691 | nvgpu_mutex_acquire(&pmu->isr_mutex); | ||
692 | if (!pmu->isr_enabled) { | ||
693 | nvgpu_mutex_release(&pmu->isr_mutex); | ||
694 | return; | ||
695 | } | ||
696 | |||
697 | mask = gk20a_readl(g, pwr_falcon_irqmask_r()) & | ||
698 | gk20a_readl(g, pwr_falcon_irqdest_r()); | ||
699 | |||
700 | intr = gk20a_readl(g, pwr_falcon_irqstat_r()); | ||
701 | |||
702 | gk20a_dbg_pmu(g, "received falcon interrupt: 0x%08x", intr); | ||
703 | |||
704 | intr = gk20a_readl(g, pwr_falcon_irqstat_r()) & mask; | ||
705 | if (!intr || pmu->pmu_state == PMU_STATE_OFF) { | ||
706 | gk20a_writel(g, pwr_falcon_irqsclr_r(), intr); | ||
707 | nvgpu_mutex_release(&pmu->isr_mutex); | ||
708 | return; | ||
709 | } | ||
710 | |||
711 | if (intr & pwr_falcon_irqstat_halt_true_f()) { | ||
712 | nvgpu_err(g, "pmu halt intr not implemented"); | ||
713 | nvgpu_pmu_dump_falcon_stats(pmu); | ||
714 | if (gk20a_readl(g, pwr_pmu_mailbox_r | ||
715 | (PMU_MODE_MISMATCH_STATUS_MAILBOX_R)) == | ||
716 | PMU_MODE_MISMATCH_STATUS_VAL) { | ||
717 | if (g->ops.pmu.dump_secure_fuses) { | ||
718 | g->ops.pmu.dump_secure_fuses(g); | ||
719 | } | ||
720 | } | ||
721 | } | ||
722 | if (intr & pwr_falcon_irqstat_exterr_true_f()) { | ||
723 | nvgpu_err(g, | ||
724 | "pmu exterr intr not implemented. Clearing interrupt."); | ||
725 | nvgpu_pmu_dump_falcon_stats(pmu); | ||
726 | |||
727 | gk20a_writel(g, pwr_falcon_exterrstat_r(), | ||
728 | gk20a_readl(g, pwr_falcon_exterrstat_r()) & | ||
729 | ~pwr_falcon_exterrstat_valid_m()); | ||
730 | } | ||
731 | |||
732 | if (g->ops.pmu.handle_ext_irq) { | ||
733 | g->ops.pmu.handle_ext_irq(g, intr); | ||
734 | } | ||
735 | |||
736 | if (intr & pwr_falcon_irqstat_swgen0_true_f()) { | ||
737 | nvgpu_pmu_process_message(pmu); | ||
738 | recheck = true; | ||
739 | } | ||
740 | |||
741 | gk20a_writel(g, pwr_falcon_irqsclr_r(), intr); | ||
742 | |||
743 | if (recheck) { | ||
744 | queue = &pmu->queue[PMU_MESSAGE_QUEUE]; | ||
745 | if (!nvgpu_flcn_queue_is_empty(pmu->flcn, queue)) { | ||
746 | gk20a_writel(g, pwr_falcon_irqsset_r(), | ||
747 | pwr_falcon_irqsset_swgen0_set_f()); | ||
748 | } | ||
749 | } | ||
750 | |||
751 | nvgpu_mutex_release(&pmu->isr_mutex); | ||
752 | } | ||
753 | |||
754 | void gk20a_pmu_init_perfmon_counter(struct gk20a *g) | ||
755 | { | ||
756 | u32 data; | ||
757 | |||
758 | /* use counter #3 for GR && CE2 busy cycles */ | ||
759 | gk20a_writel(g, pwr_pmu_idle_mask_r(3), | ||
760 | pwr_pmu_idle_mask_gr_enabled_f() | | ||
761 | pwr_pmu_idle_mask_ce_2_enabled_f()); | ||
762 | |||
763 | /* assign same mask setting from GR ELPG to counter #3 */ | ||
764 | data = gk20a_readl(g, pwr_pmu_idle_mask_1_supp_r(0)); | ||
765 | gk20a_writel(g, pwr_pmu_idle_mask_1_r(3), data); | ||
766 | |||
767 | /* disable idle filtering for counters 3 and 6 */ | ||
768 | data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(3)); | ||
769 | data = set_field(data, pwr_pmu_idle_ctrl_value_m() | | ||
770 | pwr_pmu_idle_ctrl_filter_m(), | ||
771 | pwr_pmu_idle_ctrl_value_busy_f() | | ||
772 | pwr_pmu_idle_ctrl_filter_disabled_f()); | ||
773 | gk20a_writel(g, pwr_pmu_idle_ctrl_r(3), data); | ||
774 | |||
775 | /* use counter #6 for total cycles */ | ||
776 | data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(6)); | ||
777 | data = set_field(data, pwr_pmu_idle_ctrl_value_m() | | ||
778 | pwr_pmu_idle_ctrl_filter_m(), | ||
779 | pwr_pmu_idle_ctrl_value_always_f() | | ||
780 | pwr_pmu_idle_ctrl_filter_disabled_f()); | ||
781 | gk20a_writel(g, pwr_pmu_idle_ctrl_r(6), data); | ||
782 | |||
783 | /* | ||
784 | * We don't want to disturb counters #3 and #6, which are used by | ||
785 | * perfmon, so we add wiring also to counters #1 and #2 for | ||
786 | * exposing raw counter readings. | ||
787 | */ | ||
788 | gk20a_writel(g, pwr_pmu_idle_mask_r(1), | ||
789 | pwr_pmu_idle_mask_gr_enabled_f() | | ||
790 | pwr_pmu_idle_mask_ce_2_enabled_f()); | ||
791 | |||
792 | data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(1)); | ||
793 | data = set_field(data, pwr_pmu_idle_ctrl_value_m() | | ||
794 | pwr_pmu_idle_ctrl_filter_m(), | ||
795 | pwr_pmu_idle_ctrl_value_busy_f() | | ||
796 | pwr_pmu_idle_ctrl_filter_disabled_f()); | ||
797 | gk20a_writel(g, pwr_pmu_idle_ctrl_r(1), data); | ||
798 | |||
799 | data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(2)); | ||
800 | data = set_field(data, pwr_pmu_idle_ctrl_value_m() | | ||
801 | pwr_pmu_idle_ctrl_filter_m(), | ||
802 | pwr_pmu_idle_ctrl_value_always_f() | | ||
803 | pwr_pmu_idle_ctrl_filter_disabled_f()); | ||
804 | gk20a_writel(g, pwr_pmu_idle_ctrl_r(2), data); | ||
805 | |||
806 | /* | ||
807 | * use counters 4 and 0 for perfmon to log busy cycles and total cycles | ||
808 | * counter #0 overflow sets pmu idle intr status bit | ||
809 | */ | ||
810 | gk20a_writel(g, pwr_pmu_idle_intr_r(), | ||
811 | pwr_pmu_idle_intr_en_f(0)); | ||
812 | |||
813 | gk20a_writel(g, pwr_pmu_idle_threshold_r(0), | ||
814 | pwr_pmu_idle_threshold_value_f(0x7FFFFFFF)); | ||
815 | |||
816 | data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(0)); | ||
817 | data = set_field(data, pwr_pmu_idle_ctrl_value_m() | | ||
818 | pwr_pmu_idle_ctrl_filter_m(), | ||
819 | pwr_pmu_idle_ctrl_value_always_f() | | ||
820 | pwr_pmu_idle_ctrl_filter_disabled_f()); | ||
821 | gk20a_writel(g, pwr_pmu_idle_ctrl_r(0), data); | ||
822 | |||
823 | gk20a_writel(g, pwr_pmu_idle_mask_r(4), | ||
824 | pwr_pmu_idle_mask_gr_enabled_f() | | ||
825 | pwr_pmu_idle_mask_ce_2_enabled_f()); | ||
826 | |||
827 | data = gk20a_readl(g, pwr_pmu_idle_ctrl_r(4)); | ||
828 | data = set_field(data, pwr_pmu_idle_ctrl_value_m() | | ||
829 | pwr_pmu_idle_ctrl_filter_m(), | ||
830 | pwr_pmu_idle_ctrl_value_busy_f() | | ||
831 | pwr_pmu_idle_ctrl_filter_disabled_f()); | ||
832 | gk20a_writel(g, pwr_pmu_idle_ctrl_r(4), data); | ||
833 | |||
834 | gk20a_writel(g, pwr_pmu_idle_count_r(0), pwr_pmu_idle_count_reset_f(1)); | ||
835 | gk20a_writel(g, pwr_pmu_idle_count_r(4), pwr_pmu_idle_count_reset_f(1)); | ||
836 | gk20a_writel(g, pwr_pmu_idle_intr_status_r(), | ||
837 | pwr_pmu_idle_intr_status_intr_f(1)); | ||
838 | } | ||
839 | |||
840 | u32 gk20a_pmu_read_idle_counter(struct gk20a *g, u32 counter_id) | ||
841 | { | ||
842 | return pwr_pmu_idle_count_value_v( | ||
843 | gk20a_readl(g, pwr_pmu_idle_count_r(counter_id))); | ||
844 | } | ||
845 | |||
846 | void gk20a_pmu_reset_idle_counter(struct gk20a *g, u32 counter_id) | ||
847 | { | ||
848 | gk20a_writel(g, pwr_pmu_idle_count_r(counter_id), | ||
849 | pwr_pmu_idle_count_reset_f(1)); | ||
850 | } | ||
851 | |||
852 | u32 gk20a_pmu_read_idle_intr_status(struct gk20a *g) | ||
853 | { | ||
854 | return pwr_pmu_idle_intr_status_intr_v( | ||
855 | gk20a_readl(g, pwr_pmu_idle_intr_status_r())); | ||
856 | } | ||
857 | |||
858 | void gk20a_pmu_clear_idle_intr_status(struct gk20a *g) | ||
859 | { | ||
860 | gk20a_writel(g, pwr_pmu_idle_intr_status_r(), | ||
861 | pwr_pmu_idle_intr_status_intr_f(1)); | ||
862 | } | ||
863 | |||
864 | void gk20a_pmu_elpg_statistics(struct gk20a *g, u32 pg_engine_id, | ||
865 | struct pmu_pg_stats_data *pg_stat_data) | ||
866 | { | ||
867 | struct nvgpu_pmu *pmu = &g->pmu; | ||
868 | struct pmu_pg_stats stats; | ||
869 | |||
870 | nvgpu_flcn_copy_from_dmem(pmu->flcn, | ||
871 | pmu->stat_dmem_offset[pg_engine_id], | ||
872 | (u8 *)&stats, sizeof(struct pmu_pg_stats), 0); | ||
873 | |||
874 | pg_stat_data->ingating_time = stats.pg_ingating_time_us; | ||
875 | pg_stat_data->ungating_time = stats.pg_ungating_time_us; | ||
876 | pg_stat_data->gating_cnt = stats.pg_gating_cnt; | ||
877 | pg_stat_data->avg_entry_latency_us = stats.pg_avg_entry_time_us; | ||
878 | pg_stat_data->avg_exit_latency_us = stats.pg_avg_exit_time_us; | ||
879 | } | ||