summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/clk/clk.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/clk/clk.c')
-rw-r--r--drivers/gpu/nvgpu/clk/clk.c529
1 files changed, 529 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk.c b/drivers/gpu/nvgpu/clk/clk.c
new file mode 100644
index 00000000..ecd53c02
--- /dev/null
+++ b/drivers/gpu/nvgpu/clk/clk.c
@@ -0,0 +1,529 @@
1/*
2 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#include "gk20a/gk20a.h"
15#include "clk.h"
16#include "pmuif/gpmuifclk.h"
17#include "pmuif/gpmuifvolt.h"
18#include "ctrl/ctrlclk.h"
19#include "ctrl/ctrlvolt.h"
20#include "volt/volt.h"
21#include "gk20a/pmu_gk20a.h"
22
23#define BOOT_GPC2CLK_MHZ 2581
24#define BOOT_MCLK_MHZ 3003
25
26struct clkrpc_pmucmdhandler_params {
27 struct nv_pmu_clk_rpc *prpccall;
28 u32 success;
29};
30
31static void clkrpc_pmucmdhandler(struct gk20a *g, struct pmu_msg *msg,
32 void *param, u32 handle, u32 status)
33{
34 struct clkrpc_pmucmdhandler_params *phandlerparams =
35 (struct clkrpc_pmucmdhandler_params *)param;
36
37 gk20a_dbg_info("");
38
39 if (msg->msg.clk.msg_type != NV_PMU_CLK_MSG_ID_RPC) {
40 gk20a_err(dev_from_gk20a(g),
41 "unsupported msg for VFE LOAD RPC %x",
42 msg->msg.clk.msg_type);
43 return;
44 }
45
46 if (phandlerparams->prpccall->b_supported)
47 phandlerparams->success = 1;
48}
49
50int clk_pmu_freq_controller_load(struct gk20a *g, bool bload)
51{
52 struct pmu_cmd cmd;
53 struct pmu_msg msg;
54 struct pmu_payload payload = { {0} };
55 u32 status;
56 u32 seqdesc;
57 struct nv_pmu_clk_rpc rpccall = {0};
58 struct clkrpc_pmucmdhandler_params handler = {0};
59 struct nv_pmu_clk_load *clkload;
60 struct clk_freq_controllers *pclk_freq_controllers;
61 struct ctrl_boardobjgrp_mask_e32 *load_mask;
62
63 pclk_freq_controllers = &g->clk_pmu.clk_freq_controllers;
64 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD;
65 clkload = &rpccall.params.clk_load;
66 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_FREQ_CONTROLLER;
67 clkload->action_mask = bload ?
68 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_YES :
69 NV_NV_PMU_CLK_LOAD_ACTION_MASK_FREQ_CONTROLLER_CALLBACK_NO;
70
71 load_mask = &rpccall.params.clk_load.payload.freq_controllers.load_mask;
72
73 status = boardobjgrpmask_export(
74 &pclk_freq_controllers->freq_ctrl_load_mask.super,
75 pclk_freq_controllers->freq_ctrl_load_mask.super.bitcount,
76 &load_mask->super);
77
78 cmd.hdr.unit_id = PMU_UNIT_CLK;
79 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
80 (u32)sizeof(struct pmu_hdr);
81
82 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
83 msg.hdr.size = sizeof(struct pmu_msg);
84
85 payload.in.buf = (u8 *)&rpccall;
86 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
87 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
88 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
89
90 payload.out.buf = (u8 *)&rpccall;
91 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
92 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
93 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
94
95 handler.prpccall = &rpccall;
96 handler.success = 0;
97 status = gk20a_pmu_cmd_post(g, &cmd, NULL, &payload,
98 PMU_COMMAND_QUEUE_LPQ,
99 clkrpc_pmucmdhandler, (void *)&handler,
100 &seqdesc, ~0);
101
102 if (status) {
103 gk20a_err(dev_from_gk20a(g),
104 "unable to post clk RPC cmd %x",
105 cmd.cmd.clk.cmd_type);
106 goto done;
107 }
108
109 pmu_wait_message_cond(&g->pmu,
110 gk20a_get_gr_idle_timeout(g),
111 &handler.success, 1);
112
113 if (handler.success == 0) {
114 gk20a_err(dev_from_gk20a(g), "rpc call to load freq cntlr cal failed");
115 status = -EINVAL;
116 }
117
118done:
119 return status;
120}
121
122u32 clk_pmu_vin_load(struct gk20a *g)
123{
124 struct pmu_cmd cmd;
125 struct pmu_msg msg;
126 struct pmu_payload payload = { {0} };
127 u32 status;
128 u32 seqdesc;
129 struct nv_pmu_clk_rpc rpccall = {0};
130 struct clkrpc_pmucmdhandler_params handler = {0};
131 struct nv_pmu_clk_load *clkload;
132
133 rpccall.function = NV_PMU_CLK_RPC_ID_LOAD;
134 clkload = &rpccall.params.clk_load;
135 clkload->feature = NV_NV_PMU_CLK_LOAD_FEATURE_VIN;
136 clkload->action_mask = NV_NV_PMU_CLK_LOAD_ACTION_MASK_VIN_HW_CAL_PROGRAM_YES << 4;
137
138 cmd.hdr.unit_id = PMU_UNIT_CLK;
139 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
140 (u32)sizeof(struct pmu_hdr);
141
142 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
143 msg.hdr.size = sizeof(struct pmu_msg);
144
145 payload.in.buf = (u8 *)&rpccall;
146 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
147 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
148 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
149
150 payload.out.buf = (u8 *)&rpccall;
151 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
152 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
153 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
154
155 handler.prpccall = &rpccall;
156 handler.success = 0;
157 status = gk20a_pmu_cmd_post(g, &cmd, NULL, &payload,
158 PMU_COMMAND_QUEUE_LPQ,
159 clkrpc_pmucmdhandler, (void *)&handler,
160 &seqdesc, ~0);
161
162 if (status) {
163 gk20a_err(dev_from_gk20a(g),
164 "unable to post clk RPC cmd %x",
165 cmd.cmd.clk.cmd_type);
166 goto done;
167 }
168
169 pmu_wait_message_cond(&g->pmu,
170 gk20a_get_gr_idle_timeout(g),
171 &handler.success, 1);
172
173 if (handler.success == 0) {
174 gk20a_err(dev_from_gk20a(g), "rpc call to load vin cal failed");
175 status = -EINVAL;
176 }
177
178done:
179 return status;
180}
181
182static u32 clk_pmu_vf_inject(struct gk20a *g, struct set_fll_clk *setfllclk)
183{
184 struct pmu_cmd cmd;
185 struct pmu_msg msg;
186 struct pmu_payload payload = { {0} };
187 u32 status;
188 u32 seqdesc;
189 struct nv_pmu_clk_rpc rpccall = {0};
190 struct clkrpc_pmucmdhandler_params handler = {0};
191 struct nv_pmu_clk_vf_change_inject *vfchange;
192
193 if ((setfllclk->gpc2clkmhz == 0) || (setfllclk->xbar2clkmhz == 0) ||
194 (setfllclk->sys2clkmhz == 0) || (setfllclk->voltuv == 0))
195 return -EINVAL;
196
197 if ((setfllclk->target_regime_id_gpc > CTRL_CLK_FLL_REGIME_ID_FR) ||
198 (setfllclk->target_regime_id_sys > CTRL_CLK_FLL_REGIME_ID_FR) ||
199 (setfllclk->target_regime_id_xbar > CTRL_CLK_FLL_REGIME_ID_FR))
200 return -EINVAL;
201
202 rpccall.function = NV_PMU_CLK_RPC_ID_CLK_VF_CHANGE_INJECT;
203 vfchange = &rpccall.params.clk_vf_change_inject;
204 vfchange->flags = 0;
205 vfchange->clk_list.num_domains = 3;
206 vfchange->clk_list.clk_domains[0].clk_domain = CTRL_CLK_DOMAIN_GPC2CLK;
207 vfchange->clk_list.clk_domains[0].clk_freq_khz =
208 setfllclk->gpc2clkmhz * 1000;
209 vfchange->clk_list.clk_domains[0].clk_flags = 0;
210 vfchange->clk_list.clk_domains[0].current_regime_id =
211 setfllclk->current_regime_id_gpc;
212 vfchange->clk_list.clk_domains[0].target_regime_id =
213 setfllclk->target_regime_id_gpc;
214 vfchange->clk_list.clk_domains[1].clk_domain = CTRL_CLK_DOMAIN_XBAR2CLK;
215 vfchange->clk_list.clk_domains[1].clk_freq_khz =
216 setfllclk->xbar2clkmhz * 1000;
217 vfchange->clk_list.clk_domains[1].clk_flags = 0;
218 vfchange->clk_list.clk_domains[1].current_regime_id =
219 setfllclk->current_regime_id_xbar;
220 vfchange->clk_list.clk_domains[1].target_regime_id =
221 setfllclk->target_regime_id_xbar;
222 vfchange->clk_list.clk_domains[2].clk_domain = CTRL_CLK_DOMAIN_SYS2CLK;
223 vfchange->clk_list.clk_domains[2].clk_freq_khz =
224 setfllclk->sys2clkmhz * 1000;
225 vfchange->clk_list.clk_domains[2].clk_flags = 0;
226 vfchange->clk_list.clk_domains[2].current_regime_id =
227 setfllclk->current_regime_id_sys;
228 vfchange->clk_list.clk_domains[2].target_regime_id =
229 setfllclk->target_regime_id_sys;
230 vfchange->volt_list.num_rails = 1;
231 vfchange->volt_list.rails[0].volt_domain = CTRL_VOLT_DOMAIN_LOGIC;
232 vfchange->volt_list.rails[0].voltage_uv = setfllclk->voltuv;
233 vfchange->volt_list.rails[0].voltage_min_noise_unaware_uv =
234 setfllclk->voltuv;
235
236 cmd.hdr.unit_id = PMU_UNIT_CLK;
237 cmd.hdr.size = (u32)sizeof(struct nv_pmu_clk_cmd) +
238 (u32)sizeof(struct pmu_hdr);
239
240 cmd.cmd.clk.cmd_type = NV_PMU_CLK_CMD_ID_RPC;
241 msg.hdr.size = sizeof(struct pmu_msg);
242
243 payload.in.buf = (u8 *)&rpccall;
244 payload.in.size = (u32)sizeof(struct nv_pmu_clk_rpc);
245 payload.in.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
246 payload.in.offset = NV_PMU_CLK_CMD_RPC_ALLOC_OFFSET;
247
248 payload.out.buf = (u8 *)&rpccall;
249 payload.out.size = (u32)sizeof(struct nv_pmu_clk_rpc);
250 payload.out.fb_size = PMU_CMD_SUBMIT_PAYLOAD_PARAMS_FB_SIZE_UNUSED;
251 payload.out.offset = NV_PMU_CLK_MSG_RPC_ALLOC_OFFSET;
252
253 handler.prpccall = &rpccall;
254 handler.success = 0;
255
256 status = gk20a_pmu_cmd_post(g, &cmd, NULL, &payload,
257 PMU_COMMAND_QUEUE_LPQ,
258 clkrpc_pmucmdhandler, (void *)&handler,
259 &seqdesc, ~0);
260
261 if (status) {
262 gk20a_err(dev_from_gk20a(g),
263 "unable to post clk RPC cmd %x",
264 cmd.cmd.clk.cmd_type);
265 goto done;
266 }
267
268 pmu_wait_message_cond(&g->pmu,
269 gk20a_get_gr_idle_timeout(g),
270 &handler.success, 1);
271
272 if (handler.success == 0) {
273 gk20a_err(dev_from_gk20a(g), "rpc call to inject clock failed");
274 status = -EINVAL;
275 }
276done:
277 return status;
278}
279
280static u32 find_regime_id(struct gk20a *g, u32 domain, u16 clkmhz)
281{
282 struct fll_device *pflldev;
283 u8 j;
284 struct clk_pmupstate *pclk = &g->clk_pmu;
285
286 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super),
287 struct fll_device *, pflldev, j) {
288 if (pflldev->clk_domain == domain) {
289 if (pflldev->regime_desc.fixed_freq_regime_limit_mhz >=
290 clkmhz)
291 return CTRL_CLK_FLL_REGIME_ID_FFR;
292 else
293 return CTRL_CLK_FLL_REGIME_ID_FR;
294 }
295 }
296 return CTRL_CLK_FLL_REGIME_ID_INVALID;
297}
298
299static int set_regime_id(struct gk20a *g, u32 domain, u32 regimeid)
300{
301 struct fll_device *pflldev;
302 u8 j;
303 struct clk_pmupstate *pclk = &g->clk_pmu;
304
305 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super),
306 struct fll_device *, pflldev, j) {
307 if (pflldev->clk_domain == domain) {
308 pflldev->regime_desc.regime_id = regimeid;
309 return 0;
310 }
311 }
312 return -EINVAL;
313}
314
315static int get_regime_id(struct gk20a *g, u32 domain, u32 *regimeid)
316{
317 struct fll_device *pflldev;
318 u8 j;
319 struct clk_pmupstate *pclk = &g->clk_pmu;
320
321 BOARDOBJGRP_FOR_EACH(&(pclk->avfs_fllobjs.super.super),
322 struct fll_device *, pflldev, j) {
323 if (pflldev->clk_domain == domain) {
324 *regimeid = pflldev->regime_desc.regime_id;
325 return 0;
326 }
327 }
328 return -EINVAL;
329}
330
331int clk_set_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk)
332{
333 int status = -EINVAL;
334
335 /*set regime ids */
336 status = get_regime_id(g, CTRL_CLK_DOMAIN_GPC2CLK,
337 &setfllclk->current_regime_id_gpc);
338 if (status)
339 goto done;
340
341 setfllclk->target_regime_id_gpc = find_regime_id(g,
342 CTRL_CLK_DOMAIN_GPC2CLK, setfllclk->gpc2clkmhz);
343
344 status = get_regime_id(g, CTRL_CLK_DOMAIN_SYS2CLK,
345 &setfllclk->current_regime_id_sys);
346 if (status)
347 goto done;
348
349 setfllclk->target_regime_id_sys = find_regime_id(g,
350 CTRL_CLK_DOMAIN_SYS2CLK, setfllclk->sys2clkmhz);
351
352 status = get_regime_id(g, CTRL_CLK_DOMAIN_XBAR2CLK,
353 &setfllclk->current_regime_id_xbar);
354 if (status)
355 goto done;
356
357 setfllclk->target_regime_id_xbar = find_regime_id(g,
358 CTRL_CLK_DOMAIN_XBAR2CLK, setfllclk->xbar2clkmhz);
359
360 status = clk_pmu_vf_inject(g, setfllclk);
361
362 if (status)
363 gk20a_err(dev_from_gk20a(g),
364 "vf inject to change clk failed");
365
366 /* save regime ids */
367 status = set_regime_id(g, CTRL_CLK_DOMAIN_XBAR2CLK,
368 setfllclk->target_regime_id_xbar);
369 if (status)
370 goto done;
371
372 status = set_regime_id(g, CTRL_CLK_DOMAIN_GPC2CLK,
373 setfllclk->target_regime_id_gpc);
374 if (status)
375 goto done;
376
377 status = set_regime_id(g, CTRL_CLK_DOMAIN_SYS2CLK,
378 setfllclk->target_regime_id_sys);
379 if (status)
380 goto done;
381done:
382 return status;
383}
384
385int clk_get_fll_clks(struct gk20a *g, struct set_fll_clk *setfllclk)
386{
387 int status = -EINVAL;
388 struct clk_domain *pdomain;
389 u8 i;
390 struct clk_pmupstate *pclk = &g->clk_pmu;
391 u16 clkmhz = 0;
392 struct clk_domain_3x_master *p3xmaster;
393 struct clk_domain_3x_slave *p3xslave;
394 unsigned long slaveidxmask;
395
396 if (setfllclk->gpc2clkmhz == 0)
397 return -EINVAL;
398
399 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
400 struct clk_domain *, pdomain, i) {
401
402 if (pdomain->api_domain == CTRL_CLK_DOMAIN_GPC2CLK) {
403
404 if (!pdomain->super.implements(g, &pdomain->super,
405 CTRL_CLK_CLK_DOMAIN_TYPE_3X_MASTER)) {
406 status = -EINVAL;
407 goto done;
408 }
409 p3xmaster = (struct clk_domain_3x_master *)pdomain;
410 slaveidxmask = p3xmaster->slave_idxs_mask;
411 for_each_set_bit(i, &slaveidxmask, 32) {
412 p3xslave = (struct clk_domain_3x_slave *)
413 CLK_CLK_DOMAIN_GET(pclk, i);
414 if ((p3xslave->super.super.super.api_domain !=
415 CTRL_CLK_DOMAIN_XBAR2CLK) &&
416 (p3xslave->super.super.super.api_domain !=
417 CTRL_CLK_DOMAIN_SYS2CLK))
418 continue;
419 clkmhz = 0;
420 status = p3xslave->clkdomainclkgetslaveclk(g,
421 pclk,
422 (struct clk_domain *)p3xslave,
423 &clkmhz,
424 setfllclk->gpc2clkmhz);
425 if (status) {
426 status = -EINVAL;
427 goto done;
428 }
429 if (p3xslave->super.super.super.api_domain ==
430 CTRL_CLK_DOMAIN_XBAR2CLK)
431 setfllclk->xbar2clkmhz = clkmhz;
432 if (p3xslave->super.super.super.api_domain ==
433 CTRL_CLK_DOMAIN_SYS2CLK)
434 setfllclk->sys2clkmhz = clkmhz;
435 }
436 }
437 }
438done:
439 return status;
440}
441
442u32 clk_domain_print_vf_table(struct gk20a *g, u32 clkapidomain)
443{
444 u32 status = -EINVAL;
445 struct clk_domain *pdomain;
446 u8 i;
447 struct clk_pmupstate *pclk = &g->clk_pmu;
448 u16 clkmhz = 0;
449 u32 volt = 0;
450
451 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
452 struct clk_domain *, pdomain, i) {
453 if (pdomain->api_domain == clkapidomain) {
454 status = pdomain->clkdomainclkvfsearch(g, pclk,
455 pdomain, &clkmhz, &volt,
456 CLK_PROG_VFE_ENTRY_LOGIC);
457 status = pdomain->clkdomainclkvfsearch(g, pclk,
458 pdomain, &clkmhz, &volt,
459 CLK_PROG_VFE_ENTRY_SRAM);
460 }
461 }
462 return status;
463}
464
465u32 clk_domain_get_f_or_v(
466 struct gk20a *g,
467 u32 clkapidomain,
468 u16 *pclkmhz,
469 u32 *pvoltuv,
470 u8 railidx
471)
472{
473 u32 status = -EINVAL;
474 struct clk_domain *pdomain;
475 u8 i;
476 struct clk_pmupstate *pclk = &g->clk_pmu;
477 u8 rail;
478
479 if ((pclkmhz == NULL) || (pvoltuv == NULL))
480 return -EINVAL;
481
482 if (railidx == CTRL_VOLT_DOMAIN_LOGIC)
483 rail = CLK_PROG_VFE_ENTRY_LOGIC;
484 else if (railidx == CTRL_VOLT_DOMAIN_SRAM)
485 rail = CLK_PROG_VFE_ENTRY_SRAM;
486 else
487 return -EINVAL;
488
489 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
490 struct clk_domain *, pdomain, i) {
491 if (pdomain->api_domain == clkapidomain) {
492 status = pdomain->clkdomainclkvfsearch(g, pclk,
493 pdomain, pclkmhz, pvoltuv, rail);
494 return status;
495 }
496 }
497 return status;
498}
499
500u32 clk_domain_get_f_points(
501 struct gk20a *g,
502 u32 clkapidomain,
503 u32 *pfpointscount,
504 u16 *pfreqpointsinmhz
505)
506{
507 u32 status = -EINVAL;
508 struct clk_domain *pdomain;
509 u8 i;
510 struct clk_pmupstate *pclk = &g->clk_pmu;
511
512 if (pfpointscount == NULL)
513 return -EINVAL;
514
515 if ((pfreqpointsinmhz == NULL) && (*pfpointscount != 0))
516 return -EINVAL;
517
518 BOARDOBJGRP_FOR_EACH(&(pclk->clk_domainobjs.super.super),
519 struct clk_domain *, pdomain, i) {
520 if (pdomain->api_domain == clkapidomain) {
521 status = pdomain->clkdomainclkgetfpoints(g, pclk,
522 pdomain, pfpointscount,
523 pfreqpointsinmhz,
524 CLK_PROG_VFE_ENTRY_LOGIC);
525 return status;
526 }
527 }
528 return status;
529}