summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/clk/clk.c')
-rw-r--r--drivers/gpu/nvgpu/clk/clk.c575
1 files changed, 575 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk.c b/drivers/gpu/nvgpu/clk/clk.c
new file mode 100644
index 00000000..3906be48
--- /dev/null
+++ b/drivers/gpu/nvgpu/clk/clk.c
@@ -0,0 +1,575 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/pmu.h>
24#include <nvgpu/pmuif/nvgpu_gpmu_cmdif.h>
25
26#include "gk20a/gk20a.h"
27#include "clk.h"
28#include "ctrl/ctrlclk.h"
29#include "ctrl/ctrlvolt.h"
30#include "volt/volt.h"
31
32#define BOOT_GPC2CLK_MHZ 2581
33#define BOOT_MCLK_MHZ 3003
34
35struct clkrpc_pmucmdhandler_params {
36 struct nv_pmu_clk_rpc *prpccall;
37 u32 success;
38};
39
40static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
41 void *param, u32 handle, u32 status)
42{
43 struct clkrpc_pmucmdhandler_params *phandlerparams =
44 (struct clkrpc_pmucmdhandler_params *)param;
45
46 gk20a_dbg_info("");
47
48 if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) {
49 nvgpu_err(g, "unsupported msg for VFE LOAD RPC %x",
50 msg->msg.clk.msg_type);
51 return;
52 }
53
54 if (phandlerparams->prpccall->b_supported)
55 phandlerparams->success = 1;
56}
57
58int clk_pmu_freq_controller_load(struct gk20a *g, bool bload, u8 bit_idx)
59{
60 struct pmu_cmd cmd;
61 struct pmu_msg msg;
62 struct pmu_payload payload;
63 u32 status;
64 u32 seqdesc;
65 struct nv_pmu_clk_rpc rpccall;
66 struct clkrpc_pmucmdhandler_params handler;
67 struct nv_pmu_clk_load *clkload;
68 struct clk_freq_controllers *pclk_freq_controllers;
69 struct ctrl_boardobjgrp_mask_e32 *load_mask;
70 struct boardobjgrpmask_e32 isolate_cfc_mask;
71
72 memset(&payload, 0, sizeof(struct pmu_payload));
73 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
74 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
75
76 pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers;
77 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD;
78 clkload = &rpccall.params.clk_load;
79 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_CONTROLLER;
80 clkload->action_mask = bload ?
81 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_YES :
82 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_NO;
83
84 load_mask = &rpccall.params.clk_load.payload.freq_controllers.load_mask;
85
86 status = boardobjgrpmask_e32_init(&isolate_cfc_mask, NULL);
87
88 if (bit_idx == CTRL_CLK_CLK_FREQ_CONTROLLER_ID_ALL) {
89 status = boardobjgrpmask_export(
90 &pclk_freq_controllers->
91 freq_ctrl_load_mask.super,
92 pclk_freq_controllers->
93 freq_ctrl_load_mask.super.bitcount,
94 &load_mask->super);
95
96
97 } else {
98 status = boardobjgrpmask_bitset(&isolate_cfc_mask.super,
99 bit_idx);
100 status = boardobjgrpmask_export(&isolate_cfc_mask.super,
101 isolate_cfc_mask.super.bitcount,
102 &load_mask->super);
103 if (bload)
104 status = boardobjgrpmask_bitset(
105 &pclk_freq_controllers->
106 freq_ctrl_load_mask.super,
107 bit_idx);
108 else
109 status = boardobjgrpmask_bitclr(
110 &pclk_freq_controllers->
111 freq_ctrl_load_mask.super,
112 bit_idx);
113 }
114
115 if (status) {
116 nvgpu_err(g, "Error in generating mask used to select CFC");
117 goto done;
118 }
119
120 cmd.hdr.unit_id = PMU_UNIT_CLK;
121 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
122 (u32)sizeof(struct pmu_hdr);
123
124 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
125 msg.hdr.size = sizeof(struct pmu_msg);
126
127 payload.in.buf = (u8 *)&rpccall;
128 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
129 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
130 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
131
132 payload.out.buf = (u8 *)&rpccall;
133 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
134 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
135 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
136
137 handler.prpccall = &rpccall;
138 handler.success = 0;
139 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
140 PMU_COMMAND_QUEUE_LPQ,
141 clkrpc_pmucmdhandler, (void *)&handler,
142 &seqdesc, ~0);
143
144 if (status) {
145 nvgpu_err(g, "unable to post clk RPC cmd %x",
146 cmd.cmd.clk.cmd_type);
147 goto done;
148 }
149
150 pmu_wait_message_cond(&g->pmu,
151 gk20a_get_gr_idle_timeout(g),
152 &handler.success, 1);
153
154 if (handler.success == 0) {
155 nvgpu_err(g, "rpc call to load freq cntlr cal failed");
156 status = -EINVAL;
157 }
158
159done:
160 return status;
161}
162
163u32 clk_pmu_vin_load(struct gk20a *g)
164{
165 struct pmu_cmd cmd;
166 struct pmu_msg msg;
167 struct pmu_payload payload;
168 u32 status;
169 u32 seqdesc;
170 struct nv_pmu_clk_rpc rpccall;
171 struct clkrpc_pmucmdhandler_params handler;
172 struct nv_pmu_clk_load *clkload;
173
174 memset(&payload, 0, sizeof(struct pmu_payload));
175 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
176 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
177
178 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD;
179 clkload = &rpccall.params.clk_load;
180 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_VIN;
181 clkload->action_mask = NV_NV_PMU_CLK_LOAD_ACTION_MASK_VIN_HW_CAL_PROGRAM_YES << 4;
182
183 cmd.hdr.unit_id = PMU_UNIT_CLK;
184 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
185 (u32)sizeof(struct pmu_hdr);
186
187 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
188 msg.hdr.size = sizeof(struct pmu_msg);
189
190 payload.in.buf = (u8 *)&rpccall;
191 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
192 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
193 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
194
195 payload.out.buf = (u8 *)&rpccall;
196 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
197 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
198 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
199
200 handler.prpccall = &rpccall;
201 handler.success = 0;
202 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
203 PMU_COMMAND_QUEUE_LPQ,
204 clkrpc_pmucmdhandler, (void *)&handler,
205 &seqdesc, ~0);
206
207 if (status) {
208 nvgpu_err(g, "unable to post clk RPC cmd %x",
209 cmd.cmd.clk.cmd_type);
210 goto done;
211 }
212
213 pmu_wait_message_cond(&g->pmu,
214 gk20a_get_gr_idle_timeout(g),
215 &handler.success, 1);
216
217 if (handler.success == 0) {
218 nvgpu_err(g, "rpc call to load vin cal failed");
219 status = -EINVAL;
220 }
221
222done:
223 return status;
224}
225
226static u32 clk_pmu_vf_inject(struct gk20a *g, struct set_fll_clk *setfllclk)
227{
228 struct pmu_cmd cmd;
229 struct pmu_msg msg;
230 struct pmu_payload payload;
231 u32 status;
232 u32 seqdesc;
233 struct nv_pmu_clk_rpc rpccall;
234 struct clkrpc_pmucmdhandler_params handler;
235 struct nv_pmu_clk_vf_change_inject *vfchange;
236
237 memset(&payload, 0, sizeof(struct pmu_payload));
238 memset(&rpccall, 0, sizeof(struct nv_pmu_clk_rpc));
239 memset(&handler, 0, sizeof(struct clkrpc_pmucmdhandler_params));
240
241 if ((setfllclk->gpc2clkmhz == 0) || (setfllclk->xbar2clkmhz == 0) ||
242 (setfllclk->sys2clkmhz == 0) || (setfllclk->voltuv == 0))
243 return -EINVAL;
244
245 if ((setfllclk->target_regime_id_gpc > CTRL_CLK_FLL_REGIME_ID_FR) ||
246 (setfllclk->target_regime_id_sys > CTRL_CLK_FLL_REGIME_ID_FR) ||
247 (setfllclk->target_regime_id_xbar > CTRL_CLK_FLL_REGIME_ID_FR))
248 return -EINVAL;
249
250 rpccall.function = NV_PMU_CLK_RPC_ID_CLK_VF_CHANGE_INJECT;
251 vfchange = &rpccall.params.clk_vf_change_inject;
252 vfchange->flags = 0;
253 vfchange->clk_list.num_domains = 3;
254 vfchange->clk_list.clk_domains[0].clk_domain = CTRL_CLK_DOMAIN_GPC2CLK;
255 vfchange->clk_list.clk_domains[0].clk_freq_khz =
256 setfllclk->gpc2clkmhz * 1000;
257 vfchange->clk_list.clk_domains[0].clk_flags = 0;
258 vfchange->clk_list.clk_domains[0].current_regime_id =
259 setfllclk->current_regime_id_gpc;
260 vfchange->clk_list.clk_domains[0].target_regime_id =
261 setfllclk->target_regime_id_gpc;
262 vfchange->clk_list.clk_domains[1].clk_domain = CTRL_CLK_DOMAIN_XBAR2CLK;
263 vfchange->clk_list.clk_domains[1].clk_freq_khz =
264 setfllclk->xbar2clkmhz * 1000;
265 vfchange->clk_list.clk_domains[1].clk_flags = 0;
266 vfchange->clk_list.clk_domains[1].current_regime_id =
267 setfllclk->current_regime_id_xbar;
268 vfchange->clk_list.clk_domains[1].target_regime_id =
269 setfllclk->target_regime_id_xbar;
270 vfchange->clk_list.clk_domains[2].clk_domain = CTRL_CLK_DOMAIN_SYS2CLK;
271 vfchange->clk_list.clk_domains[2].clk_freq_khz =
272 setfllclk->sys2clkmhz * 1000;
273 vfchange->clk_list.clk_domains[2].clk_flags = 0;
274 vfchange->clk_list.clk_domains[2].current_regime_id =
275 setfllclk->current_regime_id_sys;
276 vfchange->clk_list.clk_domains[2].target_regime_id =
277 setfllclk->target_regime_id_sys;
278 vfchange->volt_list.num_rails = 1;
279 vfchange->volt_list.rails[0].volt_domain = CTRL_VOLT_DOMAIN_LOGIC;
280 vfchange->volt_list.rails[0].voltage_uv = setfllclk->voltuv;
281 vfchange->volt_list.rails[0].voltage_min_noise_unaware_uv =
282 setfllclk->voltuv;
283
284 cmd.hdr.unit_id = PMU_UNIT_CLK;
285 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
286 (u32)sizeof(struct pmu_hdr);
287
288 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
289 msg.hdr.size = sizeof(struct pmu_msg);
290
291 payload.in.buf = (u8 *)&rpccall;
292 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
293 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
294 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
295
296 payload.out.buf = (u8 *)&rpccall;
297 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
298 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
299 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
300
301 handler.prpccall = &rpccall;
302 handler.success = 0;
303
304 status = nvgpu_pmu_cmd_post(g, &cmd, NULL, &payload,
305 PMU_COMMAND_QUEUE_LPQ,
306 clkrpc_pmucmdhandler, (void *)&handler,
307 &seqdesc, ~0);
308
309 if (status) {
310 nvgpu_err(g, "unable to post clk RPC cmd %x",
311 cmd.cmd.clk.cmd_type);
312 goto done;
313 }
314
315 pmu_wait_message_cond(&g->pmu,
316 gk20a_get_gr_idle_timeout(g),
317 &handler.success, 1);
318
319 if (handler.success == 0) {
320 nvgpu_err(g, "rpc call to inject clock failed");
321 status = -EINVAL;
322 }
323done:
324 return status;
325}
326
327static u32 find_regime_id(struct gk20a *g, u32 domain, u16 clkmhz)
328{
329 struct fll_device *pflldev;
330 u8 j;
331 struct clk_pmupstate *pclk = &g->clk_pmu;
332
333 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super),
334 struct fll_device *, pflldev, j) {
335 if (pflldev->clk_domain == domain) {
336 if (pflldev->regime_desc.fixed_freq_regime_limit_mhz >=
337 clkmhz)
338 return CTRL_CLK_FLL_REGIME_ID_FFR;
339 else
340 return CTRL_CLK_FLL_REGIME_ID_FR;
341 }
342 }
343 return CTRL_CLK_FLL_REGIME_ID_INVALID;
344}
345
346static int set_regime_id(struct gk20a *g, u32 domain, u32 regimeid)
347{
348 struct fll_device *pflldev;
349 u8 j;
350 struct clk_pmupstate *pclk = &g->clk_pmu;
351
352 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super),
353 struct fll_device *, pflldev, j) {
354 if (pflldev->clk_domain == domain) {
355 pflldev->regime_desc.regime_id = regimeid;
356 return 0;
357 }
358 }
359 return -EINVAL;
360}
361
362static int get_regime_id(struct gk20a *g, u32 domain, u32 *regimeid)
363{
364 struct fll_device *pflldev;
365 u8 j;
366 struct clk_pmupstate *pclk = &g->clk_pmu;
367
368 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super),
369 struct fll_device *, pflldev, j) {
370 if (pflldev->clk_domain == domain) {
371 *regimeid = pflldev->regime_desc.regime_id;
372 return 0;
373 }
374 }
375 return -EINVAL;
376}
377
378int clk_set_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk)
379{
380 int status = -EINVAL;
381
382 /*set regime ids */
383 status = get_regime_id(g, CTRL_CLK_DOMAIN_GPC2CLK,
384 &setfllclk->current_regime_id_gpc);
385 if (status)
386 goto done;
387
388 setfllclk->target_regime_id_gpc = find_regime_id(g,
389 CTRL_CLK_DOMAIN_GPC2CLK, setfllclk->gpc2clkmhz);
390
391 status = get_regime_id(g, CTRL_CLK_DOMAIN_SYS2CLK,
392 &setfllclk->current_regime_id_sys);
393 if (status)
394 goto done;
395
396 setfllclk->target_regime_id_sys = find_regime_id(g,
397 CTRL_CLK_DOMAIN_SYS2CLK, setfllclk->sys2clkmhz);
398
399 status = get_regime_id(g, CTRL_CLK_DOMAIN_XBAR2CLK,
400 &setfllclk->current_regime_id_xbar);
401 if (status)
402 goto done;
403
404 setfllclk->target_regime_id_xbar = find_regime_id(g,
405 CTRL_CLK_DOMAIN_XBAR2CLK, setfllclk->xbar2clkmhz);
406
407 status = clk_pmu_vf_inject(g, setfllclk);
408
409 if (status)
410 nvgpu_err(g, "vf inject to change clk failed");
411
412 /* save regime ids */
413 status = set_regime_id(g, CTRL_CLK_DOMAIN_XBAR2CLK,
414 setfllclk->target_regime_id_xbar);
415 if (status)
416 goto done;
417
418 status = set_regime_id(g, CTRL_CLK_DOMAIN_GPC2CLK,
419 setfllclk->target_regime_id_gpc);
420 if (status)
421 goto done;
422
423 status = set_regime_id(g, CTRL_CLK_DOMAIN_SYS2CLK,
424 setfllclk->target_regime_id_sys);
425 if (status)
426 goto done;
427done:
428 return status;
429}
430
431int clk_get_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk)
432{
433 int status = -EINVAL;
434 struct clk_domain *pdomain;
435 u8 i;
436 struct clk_pmupstate *pclk = &g->clk_pmu;
437 u16 clkmhz = 0;
438 struct clk_domain_3x_master *p3xmaster;
439 struct clk_domain_3x_slave *p3xslave;
440 unsigned long slaveidxmask;
441
442 if (setfllclk->gpc2clkmhz == 0)
443 return -EINVAL;
444
445 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
446 struct clk_domain *, pdomain, i) {
447
448 if (pdomain->api_domain == CTRL_CLK_DOMAIN_GPC2CLK) {
449
450 if (!pdomain->super.implements(g, &pdomain->super,
451 CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER)) {
452 status = -EINVAL;
453 goto done;
454 }
455 p3xmaster = (struct clk_domain_3x_master *)pdomain;
456 slaveidxmask = p3xmaster->slave_idxs_mask;
457 for_each_set_bit(i, &slaveidxmask, 32) {
458 p3xslave = (struct clk_domain_3x_slave *)
459 CLK_CLK_DOMAIN_GET(pclk, i);
460 if ((p3xslave->super.super.super.api_domain !=
461 CTRL_CLK_DOMAIN_XBAR2CLK) &&
462 (p3xslave->super.super.super.api_domain !=
463 CTRL_CLK_DOMAIN_SYS2CLK))
464 continue;
465 clkmhz = 0;
466 status = p3xslave->clkdomainclkgetslaveclk(g,
467 pclk,
468 (struct clk_domain *)p3xslave,
469 &clkmhz,
470 setfllclk->gpc2clkmhz);
471 if (status) {
472 status = -EINVAL;
473 goto done;
474 }
475 if (p3xslave->super.super.super.api_domain ==
476 CTRL_CLK_DOMAIN_XBAR2CLK)
477 setfllclk->xbar2clkmhz = clkmhz;
478 if (p3xslave->super.super.super.api_domain ==
479 CTRL_CLK_DOMAIN_SYS2CLK)
480 setfllclk->sys2clkmhz = clkmhz;
481 }
482 }
483 }
484done:
485 return status;
486}
487
488u32 clk_domain_print_vf_table(struct gk20a *g, u32 clkapidomain)
489{
490 u32 status = -EINVAL;
491 struct clk_domain *pdomain;
492 u8 i;
493 struct clk_pmupstate *pclk = &g->clk_pmu;
494 u16 clkmhz = 0;
495 u32 volt = 0;
496
497 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
498 struct clk_domain *, pdomain, i) {
499 if (pdomain->api_domain == clkapidomain) {
500 status = pdomain->clkdomainclkvfsearch(g, pclk,
501 pdomain, &clkmhz, &volt,
502 CLK_PROG_VFE_ENTRY_LOGIC);
503 status = pdomain->clkdomainclkvfsearch(g, pclk,
504 pdomain, &clkmhz, &volt,
505 CLK_PROG_VFE_ENTRY_SRAM);
506 }
507 }
508 return status;
509}
510
511u32 clk_domain_get_f_or_v(
512 struct gk20a *g,
513 u32 clkapidomain,
514 u16 *pclkmhz,
515 u32 *pvoltuv,
516 u8 railidx
517)
518{
519 u32 status = -EINVAL;
520 struct clk_domain *pdomain;
521 u8 i;
522 struct clk_pmupstate *pclk = &g->clk_pmu;
523 u8 rail;
524
525 if ((pclkmhz == NULL) || (pvoltuv == NULL))
526 return -EINVAL;
527
528 if (railidx == CTRL_VOLT_DOMAIN_LOGIC)
529 rail = CLK_PROG_VFE_ENTRY_LOGIC;
530 else if (railidx == CTRL_VOLT_DOMAIN_SRAM)
531 rail = CLK_PROG_VFE_ENTRY_SRAM;
532 else
533 return -EINVAL;
534
535 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
536 struct clk_domain *, pdomain, i) {
537 if (pdomain->api_domain == clkapidomain) {
538 status = pdomain->clkdomainclkvfsearch(g, pclk,
539 pdomain, pclkmhz, pvoltuv, rail);
540 return status;
541 }
542 }
543 return status;
544}
545
546u32 clk_domain_get_f_points(
547 struct gk20a *g,
548 u32 clkapidomain,
549 u32 *pfpointscount,
550 u16 *pfreqpointsinmhz
551)
552{
553 u32 status = -EINVAL;
554 struct clk_domain *pdomain;
555 u8 i;
556 struct clk_pmupstate *pclk = &g->clk_pmu;
557
558 if (pfpointscount == NULL)
559 return -EINVAL;
560
561 if ((pfreqpointsinmhz == NULL) && (*pfpointscount != 0))
562 return -EINVAL;
563
564 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
565 struct clk_domain *, pdomain, i) {
566 if (pdomain->api_domain == clkapidomain) {
567 status = pdomain->clkdomainclkgetfpoints(g, pclk,
568 pdomain, pfpointscount,
569 pfreqpointsinmhz,
570 CLK_PROG_VFE_ENTRY_LOGIC);
571 return status;
572 }
573 }
574 return status;
575}