aboutsummaryrefslogtreecommitdiffstats
path: root/include/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'include/clk/clk.c')
-rw-r--r--include/clk/clk.c942
1 files changed, 942 insertions, 0 deletions
diff --git a/include/clk/clk.c b/include/clk/clk.c
new file mode 100644
index 0000000..d8e30c4
--- /dev/null
+++ b/include/clk/clk.c
@@ -0,0 +1,942 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/pmu.h>
24#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
25#include <nvgpu/gk20a.h>
26
27#include "clk.h"
28#include "ctrl/ctrlclk.h"
29#include "ctrl/ctrlvolt.h"
30#include "volt/volt.h"
31
32#define BOOT_GPC2CLK_MHZ 2581
33#define BOOT_MCLK_MHZ 3003
34
35struct clkrpc_pmucmdhandler_params {
36 struct nv_pmu_clk_rpc *prpccall;
37 u32 success;
38};
39
40static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
41 void *param, u32 handle, u32 status)
42{
43 struct clkrpc_pmucmdhandler_params *phandlerparams =
44 (struct clkrpc_pmucmdhandler_params *)param;
45
46 nvgpu_log_info(g, " ");
47
48 if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) {
49 nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x",
50 msg->msg.clk.msg_type);
51 return;
52 }
53
54 if (phandlerparams->prpccall->b_supported) {
55 phandlerparams->success = 1;
56 }
57}
58
59
60int clk_pmu_freq_effective_avg_load(struct gk20a *g, bool bload)
61{
62 struct pmu_cmd cmd;
63 struct pmu_payload payload;
64 u32 status;
65 u32 seqdesc;
66 struct nv_pmu_clk_rpc rpccall;
67 struct clkrpc_pmucmdhandler_params handler;
68 struct nv_pmu_clk_load *clkload;
69
70 memset(&payload, 0, sizeof(struct pmu_payload));
71 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
72 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
73 memset(&cmd, 0, sizeof(struct pmu_cmd));
74
75 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD;
76 clkload = &rpccall.params.clk_load;
77 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_EFFECTIVE_AVG;
78 clkload->action_mask = bload ?
79 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_YES :
80 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_EFFECTIVE_AVG_CALLBACK_NO;
81
82 cmd.hdr.unit_id = PMU_UNIT_CLK;
83 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
84 (u32)sizeof(struct pmu_hdr);
85
86 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
87
88 payload.in.buf = (u8 *)&rpccall;
89 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
90 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
91 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
92
93 payload.out.buf = (u8 *)&rpccall;
94 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
95 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
96 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
97
98 handler.prpccall = &rpccall;
99 handler.success = 0;
100
101 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
102 PMU_COMMAND_QUEUE_LPQ,
103 clkrpc_pmucmdhandler, (void *)&handler,
104 &seqdesc, ~0);
105 if (status) {
106 nvgpu_err(g, "unable to post clk RPC cmd %x",
107 cmd.cmd.clk.cmd_type);
108 goto done;
109 }
110
111 pmu_wait_message_cond(&g->pmu,
112 gk20a_get_gr_idle_timeout(g),
113 &handler.success, 1);
114 if (handler.success == 0) {
115 nvgpu_err(g, "rpc call to load Effective avg clk domain freq failed");
116 status = -EINVAL;
117 }
118
119done:
120 return status;
121}
122
123u32 clk_freq_effective_avg(struct gk20a *g, u32 clkDomainMask) {
124
125 struct pmu_cmd cmd;
126 struct pmu_payload payload;
127 u32 status;
128 u32 seqdesc;
129 struct nv_pmu_clk_rpc rpccall;
130 struct clkrpc_pmucmdhandler_params handler;
131 struct nv_pmu_clk_freq_effective_avg *clk_freq_effective_avg;
132
133 memset(&payload, 0, sizeof(struct pmu_payload));
134 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
135 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
136 memset(&cmd, 0, sizeof(struct pmu_cmd));
137
138 rpccall.function = NV_PMU_CLK_RPC_ID_CLK_FREQ_EFF_AVG;
139 clk_freq_effective_avg = &rpccall.params.clk_freq_effective_avg;
140 clk_freq_effective_avg->clkDomainMask = clkDomainMask;
141
142 cmd.hdr.unit_id = PMU_UNIT_CLK;
143 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
144 (u32)sizeof(struct pmu_hdr);
145
146 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
147
148 payload.in.buf = (u8 *)&rpccall;
149 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
150 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
151 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
152
153 payload.out.buf = (u8 *)&rpccall;
154 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
155 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
156 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
157
158 handler.prpccall = &rpccall;
159 handler.success = 0;
160
161 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
162 PMU_COMMAND_QUEUE_LPQ,
163 clkrpc_pmucmdhandler, (void *)&handler,
164 &seqdesc, ~0);
165 if (status) {
166 nvgpu_err(g, "unable to post clk RPC cmd %x",
167 cmd.cmd.clk.cmd_type);
168 goto done;
169 }
170
171 pmu_wait_message_cond(&g->pmu,
172 gk20a_get_gr_idle_timeout(g),
173 &handler.success, 1);
174 if (handler.success == 0) {
175 nvgpu_err(g, "rpc call to get clk frequency average failed");
176 status = -EINVAL;
177 goto done;
178 }
179
180 return rpccall.params.clk_freq_effective_avg.freqkHz[clkDomainMask];
181
182done:
183 return status;
184}
185
186int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx)
187{
188 struct pmu_cmd cmd;
189 struct pmu_payload payload;
190 u32 status;
191 u32 seqdesc;
192 struct nv_pmu_clk_rpc rpccall;
193 struct clkrpc_pmucmdhandler_params handler;
194 struct nv_pmu_clk_load *clkload;
195 struct clk_freq_controllers *pclk_freq_controllers;
196 struct ctrl_boardobjgrp_mask_e32 *load_mask;
197 struct boardobjgrpmask_e32 isolate_cfc_mask;
198
199 memset(&payload, 0, sizeof(struct pmu_payload));
200 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
201 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
202
203 pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers;
204 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD;
205 clkload = &rpccall.params.clk_load;
206 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_CONTROLLER;
207 clkload->action_mask = bload ?
208 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_YES :
209 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_NO;
210
211 load_mask = &rpccall.params.clk_load.payload.freq_controllers.load_mask;
212
213 status = boardobjgrpmask_e32_init(&isolate_cfc_mask, NULL);
214
215 if (bit_idx == CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL) {
216 status = boardobjgrpmask_export(
217 &pclk_freq_controllers->
218 freq_ctrl_load_mask.super,
219 pclk_freq_controllers->
220 freq_ctrl_load_mask.super.bitcount,
221 &load_mask->super);
222
223
224 } else {
225 status = boardobjgrpmask_bitset(&isolate_cfc_mask.super,
226 bit_idx);
227 status = boardobjgrpmask_export(&isolate_cfc_mask.super,
228 isolate_cfc_mask.super.bitcount,
229 &load_mask->super);
230 if (bload) {
231 status = boardobjgrpmask_bitset(
232 &pclk_freq_controllers->
233 freq_ctrl_load_mask.super,
234 bit_idx);
235 } else {
236 status = boardobjgrpmask_bitclr(
237 &pclk_freq_controllers->
238 freq_ctrl_load_mask.super,
239 bit_idx);
240 }
241 }
242
243 if (status) {
244 nvgpu_err(g, "Error in generating mask used to select CFC");
245 goto done;
246 }
247
248 cmd.hdr.unit_id = PMU_UNIT_CLK;
249 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
250 (u32)sizeof(struct pmu_hdr);
251
252 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
253
254 payload.in.buf = (u8 *)&rpccall;
255 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
256 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
257 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
258
259 payload.out.buf = (u8 *)&rpccall;
260 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
261 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
262 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
263
264 handler.prpccall = &rpccall;
265 handler.success = 0;
266 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
267 PMU_COMMAND_QUEUE_LPQ,
268 clkrpc_pmucmdhandler, (void *)&handler,
269 &seqdesc, ~0);
270
271 if (status) {
272 nvgpu_err(g, "unable to post clk RPC cmd %x",
273 cmd.cmd.clk.cmd_type);
274 goto done;
275 }
276
277 pmu_wait_message_cond(&g->pmu,
278 gk20a_get_gr_idle_timeout(g),
279 &handler.success, 1);
280
281 if (handler.success == 0) {
282 nvgpu_err(g, "rpc call to load freq cntlr cal failed");
283 status = -EINVAL;
284 }
285
286done:
287 return status;
288}
289
290u32 clk_pmu_vin_load(struct gk20a *g)
291{
292 struct pmu_cmd cmd;
293 struct pmu_payload payload;
294 u32 status;
295 u32 seqdesc;
296 struct nv_pmu_clk_rpc rpccall;
297 struct clkrpc_pmucmdhandler_params handler;
298 struct nv_pmu_clk_load *clkload;
299
300 memset(&payload, 0, sizeof(struct pmu_payload));
301 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
302 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
303
304 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD;
305 clkload = &rpccall.params.clk_load;
306 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_VIN;
307 clkload->action_mask = NV_NV_PMU_CLK_LOAD_ACTION_MASK_VIN_HW_CAL_PROGRAM_YES << 4;
308
309 cmd.hdr.unit_id = PMU_UNIT_CLK;
310 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
311 (u32)sizeof(struct pmu_hdr);
312
313 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
314 cmd.cmd.clk.generic.b_perf_daemon_cmd =false;
315
316 payload.in.buf = (u8 *)&rpccall;
317 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
318 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
319 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
320
321 payload.out.buf = (u8 *)&rpccall;
322 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
323 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
324 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
325
326 handler.prpccall = &rpccall;
327 handler.success = 0;
328 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
329 PMU_COMMAND_QUEUE_LPQ,
330 clkrpc_pmucmdhandler, (void *)&handler,
331 &seqdesc, ~0);
332
333 if (status) {
334 nvgpu_err(g, "unable to post clk RPC cmd %x",
335 cmd.cmd.clk.cmd_type);
336 goto done;
337 }
338
339 pmu_wait_message_cond(&g->pmu,
340 gk20a_get_gr_idle_timeout(g),
341 &handler.success, 1);
342
343 if (handler.success == 0) {
344 nvgpu_err(g, "rpc call to load vin cal failed");
345 status = -EINVAL;
346 }
347
348done:
349 return status;
350}
351
352u32 nvgpu_clk_vf_change_inject_data_fill_gp10x(struct gk20a *g,
353 struct nv_pmu_clk_rpc *rpccall,
354 struct set_fll_clk *setfllclk)
355{
356 struct nv_pmu_clk_vf_change_inject *vfchange;
357
358 vfchange = &rpccall->params.clk_vf_change_inject;
359 vfchange->flags = 0;
360 vfchange->clk_list.num_domains = 3;
361 vfchange->clk_list.clk_domains[0].clk_domain = CTRL_CLK_DOMAIN_GPC2CLK;
362 vfchange->clk_list.clk_domains[0].clk_freq_khz =
363 setfllclk->gpc2clkmhz * 1000;
364 vfchange->clk_list.clk_domains[0].clk_flags = 0;
365 vfchange->clk_list.clk_domains[0].current_regime_id =
366 setfllclk->current_regime_id_gpc;
367 vfchange->clk_list.clk_domains[0].target_regime_id =
368 setfllclk->target_regime_id_gpc;
369 vfchange->clk_list.clk_domains[1].clk_domain = CTRL_CLK_DOMAIN_XBAR2CLK;
370 vfchange->clk_list.clk_domains[1].clk_freq_khz =
371 setfllclk->xbar2clkmhz * 1000;
372 vfchange->clk_list.clk_domains[1].clk_flags = 0;
373 vfchange->clk_list.clk_domains[1].current_regime_id =
374 setfllclk->current_regime_id_xbar;
375 vfchange->clk_list.clk_domains[1].target_regime_id =
376 setfllclk->target_regime_id_xbar;
377 vfchange->clk_list.clk_domains[2].clk_domain = CTRL_CLK_DOMAIN_SYS2CLK;
378 vfchange->clk_list.clk_domains[2].clk_freq_khz =
379 setfllclk->sys2clkmhz * 1000;
380 vfchange->clk_list.clk_domains[2].clk_flags = 0;
381 vfchange->clk_list.clk_domains[2].current_regime_id =
382 setfllclk->current_regime_id_sys;
383 vfchange->clk_list.clk_domains[2].target_regime_id =
384 setfllclk->target_regime_id_sys;
385 vfchange->volt_list.num_rails = 1;
386 vfchange->volt_list.rails[0].volt_domain = CTRL_VOLT_DOMAIN_LOGIC;
387 vfchange->volt_list.rails[0].voltage_uv = setfllclk->voltuv;
388 vfchange->volt_list.rails[0].voltage_min_noise_unaware_uv =
389 setfllclk->voltuv;
390
391 return 0;
392}
393
394u32 nvgpu_clk_vf_change_inject_data_fill_gv10x(struct gk20a *g,
395 struct nv_pmu_clk_rpc *rpccall,
396 struct set_fll_clk *setfllclk)
397{
398 struct nv_pmu_clk_vf_change_inject_v1 *vfchange;
399
400 vfchange = &rpccall->params.clk_vf_change_inject_v1;
401 vfchange->flags = 0;
402 vfchange->clk_list.num_domains = 4;
403 vfchange->clk_list.clk_domains[0].clk_domain = CTRL_CLK_DOMAIN_GPCCLK;
404 vfchange->clk_list.clk_domains[0].clk_freq_khz =
405 setfllclk->gpc2clkmhz * 1000;
406
407 vfchange->clk_list.clk_domains[1].clk_domain = CTRL_CLK_DOMAIN_XBARCLK;
408 vfchange->clk_list.clk_domains[1].clk_freq_khz =
409 setfllclk->xbar2clkmhz * 1000;
410
411 vfchange->clk_list.clk_domains[2].clk_domain = CTRL_CLK_DOMAIN_SYSCLK;
412 vfchange->clk_list.clk_domains[2].clk_freq_khz =
413 setfllclk->sys2clkmhz * 1000;
414
415 vfchange->clk_list.clk_domains[3].clk_domain = CTRL_CLK_DOMAIN_NVDCLK;
416 vfchange->clk_list.clk_domains[3].clk_freq_khz = 855 * 1000;
417
418 vfchange->volt_list.num_rails = 1;
419 vfchange->volt_list.rails[0].rail_idx = 0;
420 vfchange->volt_list.rails[0].voltage_uv = setfllclk->voltuv;
421 vfchange->volt_list.rails[0].voltage_min_noise_unaware_uv =
422 setfllclk->voltuv;
423
424 return 0;
425}
426
427static u32 clk_pmu_vf_inject(struct gk20a *g, struct set_fll_clk *setfllclk)
428{
429 struct pmu_cmd cmd;
430 struct pmu_payload payload;
431 u32 status;
432 u32 seqdesc;
433 struct nv_pmu_clk_rpc rpccall;
434 struct clkrpc_pmucmdhandler_params handler;
435
436 memset(&payload, 0, sizeof(struct pmu_payload));
437 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
438 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
439 memset(&cmd, 0, sizeof(struct pmu_cmd));
440
441 if ((setfllclk->gpc2clkmhz == 0) || (setfllclk->xbar2clkmhz == 0) ||
442 (setfllclk->sys2clkmhz == 0) || (setfllclk->voltuv == 0)) {
443 return -EINVAL;
444 }
445
446 if ((setfllclk->target_regime_id_gpc > CTRL_CLK_FLL_REGIME_ID_FR) ||
447 (setfllclk->target_regime_id_sys > CTRL_CLK_FLL_REGIME_ID_FR) ||
448 (setfllclk->target_regime_id_xbar > CTRL_CLK_FLL_REGIME_ID_FR)) {
449 return -EINVAL;
450 }
451
452 rpccall.function = NV_PMU_CLK_RPC_ID_CLK_VF_CHANGE_INJECT;
453
454 g->ops.pmu_ver.clk.clk_vf_change_inject_data_fill(g,
455 &rpccall, setfllclk);
456
457 cmd.hdr.unit_id = PMU_UNIT_CLK;
458 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
459 (u32)sizeof(struct pmu_hdr);
460
461 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
462
463 payload.in.buf = (u8 *)&rpccall;
464 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
465 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
466 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
467
468 payload.out.buf = (u8 *)&rpccall;
469 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
470 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
471 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
472
473 handler.prpccall = &rpccall;
474 handler.success = 0;
475
476 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
477 PMU_COMMAND_QUEUE_LPQ,
478 clkrpc_pmucmdhandler, (void *)&handler,
479 &seqdesc, ~0);
480
481 if (status) {
482 nvgpu_err(g, "unable to post clk RPC cmd %x",
483 cmd.cmd.clk.cmd_type);
484 goto done;
485 }
486
487 pmu_wait_message_cond(&g->pmu,
488 gk20a_get_gr_idle_timeout(g),
489 &handler.success, 1);
490
491 if (handler.success == 0) {
492 nvgpu_err(g, "rpc call to inject clock failed");
493 status = -EINVAL;
494 }
495done:
496 return status;
497}
498
499static u32 find_regime_id(struct gk20a *g, u32 domain, u16 clkmhz)
500{
501 struct fll_device *pflldev;
502 u8 j;
503 struct clk_pmupstate *pclk = &g->clk_pmu;
504
505 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super),
506 struct fll_device *, pflldev, j) {
507 if (pflldev->clk_domain == domain) {
508 if (pflldev->regime_desc.fixed_freq_regime_limit_mhz >=
509 clkmhz) {
510 return CTRL_CLK_FLL_REGIME_ID_FFR;
511 } else {
512 return CTRL_CLK_FLL_REGIME_ID_FR;
513 }
514 }
515 }
516 return CTRL_CLK_FLL_REGIME_ID_INVALID;
517}
518
519static int set_regime_id(struct gk20a *g, u32 domain, u32 regimeid)
520{
521 struct fll_device *pflldev;
522 u8 j;
523 struct clk_pmupstate *pclk = &g->clk_pmu;
524
525 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super),
526 struct fll_device *, pflldev, j) {
527 if (pflldev->clk_domain == domain) {
528 pflldev->regime_desc.regime_id = regimeid;
529 return 0;
530 }
531 }
532 return -EINVAL;
533}
534
535static int get_regime_id(struct gk20a *g, u32 domain, u32 *regimeid)
536{
537 struct fll_device *pflldev;
538 u8 j;
539 struct clk_pmupstate *pclk = &g->clk_pmu;
540
541 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super),
542 struct fll_device *, pflldev, j) {
543 if (pflldev->clk_domain == domain) {
544 *regimeid = pflldev->regime_desc.regime_id;
545 return 0;
546 }
547 }
548 return -EINVAL;
549}
550
551int clk_set_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk)
552{
553 int status = -EINVAL;
554
555 /*set regime ids */
556 status = get_regime_id(g, CTRL_CLK_DOMAIN_GPC2CLK,
557 &setfllclk->current_regime_id_gpc);
558 if (status) {
559 goto done;
560 }
561
562 setfllclk->target_regime_id_gpc = find_regime_id(g,
563 CTRL_CLK_DOMAIN_GPC2CLK, setfllclk->gpc2clkmhz);
564
565 status = get_regime_id(g, CTRL_CLK_DOMAIN_SYS2CLK,
566 &setfllclk->current_regime_id_sys);
567 if (status) {
568 goto done;
569 }
570
571 setfllclk->target_regime_id_sys = find_regime_id(g,
572 CTRL_CLK_DOMAIN_SYS2CLK, setfllclk->sys2clkmhz);
573
574 status = get_regime_id(g, CTRL_CLK_DOMAIN_XBAR2CLK,
575 &setfllclk->current_regime_id_xbar);
576 if (status) {
577 goto done;
578 }
579
580 setfllclk->target_regime_id_xbar = find_regime_id(g,
581 CTRL_CLK_DOMAIN_XBAR2CLK, setfllclk->xbar2clkmhz);
582
583 status = clk_pmu_vf_inject(g, setfllclk);
584
585 if (status) {
586 nvgpu_err(g, "vf inject to change clk failed");
587 }
588
589 /* save regime ids */
590 status = set_regime_id(g, CTRL_CLK_DOMAIN_XBAR2CLK,
591 setfllclk->target_regime_id_xbar);
592 if (status) {
593 goto done;
594 }
595
596 status = set_regime_id(g, CTRL_CLK_DOMAIN_GPC2CLK,
597 setfllclk->target_regime_id_gpc);
598 if (status) {
599 goto done;
600 }
601
602 status = set_regime_id(g, CTRL_CLK_DOMAIN_SYS2CLK,
603 setfllclk->target_regime_id_sys);
604 if (status) {
605 goto done;
606 }
607done:
608 return status;
609}
610
611int clk_get_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk)
612{
613 int status = -EINVAL;
614 struct clk_domain *pdomain;
615 u8 i;
616 struct clk_pmupstate *pclk = &g->clk_pmu;
617 u16 clkmhz = 0;
618 struct clk_domain_3x_master *p3xmaster;
619 struct clk_domain_3x_slave *p3xslave;
620 unsigned long slaveidxmask;
621
622 if (setfllclk->gpc2clkmhz == 0) {
623 return -EINVAL;
624 }
625
626 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
627 struct clk_domain *, pdomain, i) {
628
629 if (pdomain->api_domain == CTRL_CLK_DOMAIN_GPC2CLK) {
630
631 if (!pdomain->super.implements(g, &pdomain->super,
632 CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER)) {
633 status = -EINVAL;
634 goto done;
635 }
636 p3xmaster = (struct clk_domain_3x_master *)pdomain;
637 slaveidxmask = p3xmaster->slave_idxs_mask;
638 for_each_set_bit(i, &slaveidxmask, 32) {
639 p3xslave = (struct clk_domain_3x_slave *)
640 CLK_CLK_DOMAIN_GET(pclk, i);
641 if ((p3xslave->super.super.super.api_domain !=
642 CTRL_CLK_DOMAIN_XBAR2CLK) &&
643 (p3xslave->super.super.super.api_domain !=
644 CTRL_CLK_DOMAIN_SYS2CLK)) {
645 continue;
646 }
647 clkmhz = 0;
648 status = p3xslave->clkdomainclkgetslaveclk(g,
649 pclk,
650 (struct clk_domain *)p3xslave,
651 &clkmhz,
652 setfllclk->gpc2clkmhz);
653 if (status) {
654 status = -EINVAL;
655 goto done;
656 }
657 if (p3xslave->super.super.super.api_domain ==
658 CTRL_CLK_DOMAIN_XBAR2CLK) {
659 setfllclk->xbar2clkmhz = clkmhz;
660 }
661 if (p3xslave->super.super.super.api_domain ==
662 CTRL_CLK_DOMAIN_SYS2CLK) {
663 setfllclk->sys2clkmhz = clkmhz;
664 }
665 }
666 }
667 }
668done:
669 return status;
670}
671
672u32 clk_domain_print_vf_table(struct gk20a *g, u32 clkapidomain)
673{
674 u32 status = -EINVAL;
675 struct clk_domain *pdomain;
676 u8 i;
677 struct clk_pmupstate *pclk = &g->clk_pmu;
678 u16 clkmhz = 0;
679 u32 volt = 0;
680
681 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
682 struct clk_domain *, pdomain, i) {
683 if (pdomain->api_domain == clkapidomain) {
684 status = pdomain->clkdomainclkvfsearch(g, pclk,
685 pdomain, &clkmhz, &volt,
686 CLK_PROG_VFE_ENTRY_LOGIC);
687 status = pdomain->clkdomainclkvfsearch(g, pclk,
688 pdomain, &clkmhz, &volt,
689 CLK_PROG_VFE_ENTRY_SRAM);
690 }
691 }
692 return status;
693}
694
695static int clk_program_fllclks(struct gk20a *g, struct change_fll_clk *fllclk)
696{
697 int status = -EINVAL;
698 struct clk_domain *pdomain;
699 u8 i;
700 struct clk_pmupstate *pclk = &g->clk_pmu;
701 u16 clkmhz = 0;
702 struct clk_domain_3x_master *p3xmaster;
703 struct clk_domain_3x_slave *p3xslave;
704 unsigned long slaveidxmask;
705 struct set_fll_clk setfllclk;
706
707 if (fllclk->api_clk_domain != CTRL_CLK_DOMAIN_GPCCLK) {
708 return -EINVAL;
709 }
710 if (fllclk->voltuv == 0) {
711 return -EINVAL;
712 }
713 if (fllclk->clkmhz == 0) {
714 return -EINVAL;
715 }
716
717 setfllclk.voltuv = fllclk->voltuv;
718 setfllclk.gpc2clkmhz = fllclk->clkmhz;
719
720 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
721 struct clk_domain *, pdomain, i) {
722
723 if (pdomain->api_domain == fllclk->api_clk_domain) {
724
725 if (!pdomain->super.implements(g, &pdomain->super,
726 CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER)) {
727 status = -EINVAL;
728 goto done;
729 }
730 p3xmaster = (struct clk_domain_3x_master *)pdomain;
731 slaveidxmask = p3xmaster->slave_idxs_mask;
732 for_each_set_bit(i, &slaveidxmask, 32) {
733 p3xslave = (struct clk_domain_3x_slave *)
734 CLK_CLK_DOMAIN_GET(pclk, i);
735 if ((p3xslave->super.super.super.api_domain !=
736 CTRL_CLK_DOMAIN_XBARCLK) &&
737 (p3xslave->super.super.super.api_domain !=
738 CTRL_CLK_DOMAIN_SYSCLK)) {
739 continue;
740 }
741 clkmhz = 0;
742 status = p3xslave->clkdomainclkgetslaveclk(g,
743 pclk,
744 (struct clk_domain *)p3xslave,
745 &clkmhz,
746 fllclk->clkmhz);
747 if (status) {
748 status = -EINVAL;
749 goto done;
750 }
751 if (p3xslave->super.super.super.api_domain ==
752 CTRL_CLK_DOMAIN_XBARCLK) {
753 setfllclk.xbar2clkmhz = clkmhz;
754 }
755 if (p3xslave->super.super.super.api_domain ==
756 CTRL_CLK_DOMAIN_SYSCLK) {
757 setfllclk.sys2clkmhz = clkmhz;
758 }
759 }
760 }
761 }
762 /*set regime ids */
763 status = get_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK,
764 &setfllclk.current_regime_id_gpc);
765 if (status) {
766 goto done;
767 }
768
769 setfllclk.target_regime_id_gpc = find_regime_id(g,
770 CTRL_CLK_DOMAIN_GPCCLK, setfllclk.gpc2clkmhz);
771
772 status = get_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK,
773 &setfllclk.current_regime_id_sys);
774 if (status) {
775 goto done;
776 }
777
778 setfllclk.target_regime_id_sys = find_regime_id(g,
779 CTRL_CLK_DOMAIN_SYSCLK, setfllclk.sys2clkmhz);
780
781 status = get_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK,
782 &setfllclk.current_regime_id_xbar);
783 if (status) {
784 goto done;
785 }
786
787 setfllclk.target_regime_id_xbar = find_regime_id(g,
788 CTRL_CLK_DOMAIN_XBARCLK, setfllclk.xbar2clkmhz);
789
790 status = clk_pmu_vf_inject(g, &setfllclk);
791
792 if (status) {
793 nvgpu_err(g,
794 "vf inject to change clk failed");
795 }
796
797 /* save regime ids */
798 status = set_regime_id(g, CTRL_CLK_DOMAIN_XBARCLK,
799 setfllclk.target_regime_id_xbar);
800 if (status) {
801 goto done;
802 }
803
804 status = set_regime_id(g, CTRL_CLK_DOMAIN_GPCCLK,
805 setfllclk.target_regime_id_gpc);
806 if (status) {
807 goto done;
808 }
809
810 status = set_regime_id(g, CTRL_CLK_DOMAIN_SYSCLK,
811 setfllclk.target_regime_id_sys);
812 if (status) {
813 goto done;
814 }
815done:
816 return status;
817}
818
819u32 nvgpu_clk_set_boot_fll_clk_gv10x(struct gk20a *g)
820{
821 int status;
822 struct change_fll_clk bootfllclk;
823 u16 gpcclk_clkmhz = BOOT_GPCCLK_MHZ;
824 u32 gpcclk_voltuv = 0;
825 u32 voltuv = 0;
826
827 status = clk_vf_point_cache(g);
828 if (status) {
829 nvgpu_err(g,"caching failed");
830 return status;
831 }
832
833 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK,
834 &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC);
835 if (status) {
836 return status;
837 }
838
839 voltuv = gpcclk_voltuv;
840
841 status = volt_set_voltage(g, voltuv, 0);
842 if (status) {
843 nvgpu_err(g,
844 "attempt to set boot voltage failed %d",
845 voltuv);
846 }
847
848 bootfllclk.api_clk_domain = CTRL_CLK_DOMAIN_GPCCLK;
849 bootfllclk.clkmhz = gpcclk_clkmhz;
850 bootfllclk.voltuv = voltuv;
851 status = clk_program_fllclks(g, &bootfllclk);
852 if (status) {
853 nvgpu_err(g, "attempt to set boot gpcclk failed");
854 }
855
856 status = clk_pmu_freq_effective_avg_load(g, true);
857
858 /*
859 * Read clocks after some delay with below method
860 * & extract clock data from buffer
861 * clk_freq_effective_avg(g, CTRL_CLK_DOMAIN_GPCCLK |
862 * CTRL_CLK_DOMAIN_XBARCLK |
863 * CTRL_CLK_DOMAIN_SYSCLK |
864 * CTRL_CLK_DOMAIN_NVDCLK)
865 * */
866
867 return status;
868}
869
870int nvgpu_clk_set_fll_clk_gv10x(struct gk20a *g)
871{
872 int status;
873 struct change_fll_clk bootfllclk;
874 u16 gpcclk_clkmhz = BOOT_GPCCLK_MHZ;
875 u32 gpcclk_voltuv = 0U;
876 u32 voltuv = 0U;
877
878 status = clk_vf_point_cache(g);
879 if (status != 0) {
880 nvgpu_err(g, "caching failed");
881 return status;
882 }
883
884 status = clk_domain_get_f_or_v(g, CTRL_CLK_DOMAIN_GPCCLK,
885 &gpcclk_clkmhz, &gpcclk_voltuv, CTRL_VOLT_DOMAIN_LOGIC);
886 if (status != 0) {
887 return status;
888 }
889
890 voltuv = gpcclk_voltuv;
891
892 status = volt_set_voltage(g, voltuv, 0U);
893 if (status != 0) {
894 nvgpu_err(g, "attempt to set max voltage failed %d", voltuv);
895 }
896
897 bootfllclk.api_clk_domain = CTRL_CLK_DOMAIN_GPCCLK;
898 bootfllclk.clkmhz = gpcclk_clkmhz;
899 bootfllclk.voltuv = voltuv;
900 status = clk_program_fllclks(g, &bootfllclk);
901 if (status != 0) {
902 nvgpu_err(g, "attempt to set max gpcclk failed");
903 }
904 return status;
905}
906
907u32 clk_domain_get_f_or_v(
908 struct gk20a *g,
909 u32 clkapidomain,
910 u16 *pclkmhz,
911 u32 *pvoltuv,
912 u8 railidx
913)
914{
915 u32 status = -EINVAL;
916 struct clk_domain *pdomain;
917 u8 i;
918 struct clk_pmupstate *pclk = &g->clk_pmu;
919 u8 rail;
920
921 if ((pclkmhz == NULL) || (pvoltuv == NULL)) {
922 return -EINVAL;
923 }
924
925 if (railidx == CTRL_VOLT_DOMAIN_LOGIC) {
926 rail = CLK_PROG_VFE_ENTRY_LOGIC;
927 } else if (railidx == CTRL_VOLT_DOMAIN_SRAM) {
928 rail = CLK_PROG_VFE_ENTRY_SRAM;
929 } else {
930 return -EINVAL;
931 }
932
933 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
934 struct clk_domain *, pdomain, i) {
935 if (pdomain->api_domain == clkapidomain) {
936 status = pdomain->clkdomainclkvfsearch(g, pclk,
937 pdomain, pclkmhz, pvoltuv, rail);
938 return status;
939 }
940 }
941 return status;
942}