summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c975
1 files changed, 975 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
new file mode 100644
index 00000000..c40eafe4
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/platform_gk20a_tegra.c
@@ -0,0 +1,975 @@
1/*
2 * GK20A Tegra Platform Interface
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/clkdev.h>
17#include <linux/of_platform.h>
18#include <linux/debugfs.h>
19#include <linux/platform_data/tegra_edp.h>
20#include <linux/delay.h>
21#include <uapi/linux/nvgpu.h>
22#include <linux/dma-buf.h>
23#include <linux/dma-attrs.h>
24#include <linux/nvmap.h>
25#include <linux/reset.h>
26#if defined(CONFIG_TEGRA_DVFS)
27#include <linux/tegra_soctherm.h>
28#endif
29#include <linux/platform/tegra/common.h>
30#include <linux/platform/tegra/mc.h>
31#include <linux/clk/tegra.h>
32#if defined(CONFIG_COMMON_CLK)
33#include <soc/tegra/tegra-dvfs.h>
34#endif
35#ifdef CONFIG_TEGRA_BWMGR
36#include <linux/platform/tegra/emc_bwmgr.h>
37#endif
38
39#include <linux/platform/tegra/tegra_emc.h>
40#include <soc/tegra/chip-id.h>
41
42#include <nvgpu/kmem.h>
43#include <nvgpu/bug.h>
44#include <nvgpu/enabled.h>
45#include <nvgpu/nvhost.h>
46
47#include <nvgpu/linux/dma.h>
48
49#include "gk20a/gk20a.h"
50#include "gm20b/clk_gm20b.h"
51
52#include "scale.h"
53#include "platform_gk20a.h"
54#include "clk.h"
55#include "os_linux.h"
56
57#include "../../../arch/arm/mach-tegra/iomap.h"
58
59#define TEGRA_GK20A_BW_PER_FREQ 32
60#define TEGRA_GM20B_BW_PER_FREQ 64
61#define TEGRA_DDR3_BW_PER_FREQ 16
62#define TEGRA_DDR4_BW_PER_FREQ 16
63#define MC_CLIENT_GPU 34
64#define PMC_GPU_RG_CNTRL_0 0x2d4
65
66#ifdef CONFIG_COMMON_CLK
67#define GPU_RAIL_NAME "vdd-gpu"
68#else
69#define GPU_RAIL_NAME "vdd_gpu"
70#endif
71
72extern struct device tegra_vpr_dev;
73
74#ifdef CONFIG_TEGRA_BWMGR
75struct gk20a_emc_params {
76 unsigned long bw_ratio;
77 unsigned long freq_last_set;
78 struct tegra_bwmgr_client *bwmgr_cl;
79};
80#else
81struct gk20a_emc_params {
82 unsigned long bw_ratio;
83 unsigned long freq_last_set;
84};
85#endif
86
87static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
88static inline u32 __maybe_unused pmc_read(unsigned long reg)
89{
90 return readl(pmc + reg);
91}
92
93static inline void __maybe_unused pmc_write(u32 val, unsigned long reg)
94{
95 writel_relaxed(val, pmc + reg);
96}
97#define MHZ_TO_HZ(x) ((x) * 1000000)
98#define HZ_TO_MHZ(x) ((x) / 1000000)
99
100static void gk20a_tegra_secure_page_destroy(struct gk20a *g,
101 struct secure_page_buffer *secure_buffer)
102{
103 DEFINE_DMA_ATTRS(attrs);
104 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
105 dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
106 (void *)(uintptr_t)secure_buffer->iova,
107 secure_buffer->iova, __DMA_ATTR(attrs));
108
109 secure_buffer->destroy = NULL;
110}
111
112int gk20a_tegra_secure_page_alloc(struct device *dev)
113{
114 struct gk20a_platform *platform = dev_get_drvdata(dev);
115 struct gk20a *g = get_gk20a(dev);
116 struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
117 DEFINE_DMA_ATTRS(attrs);
118 dma_addr_t iova;
119 size_t size = PAGE_SIZE;
120
121 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
122 return -EINVAL;
123
124 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
125 (void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
126 GFP_KERNEL, __DMA_ATTR(attrs));
127 if (dma_mapping_error(&tegra_vpr_dev, iova))
128 return -ENOMEM;
129
130 secure_buffer->size = size;
131 secure_buffer->iova = iova;
132 secure_buffer->destroy = gk20a_tegra_secure_page_destroy;
133
134 return 0;
135}
136
137static void gk20a_tegra_secure_destroy(struct gk20a *g,
138 struct gr_ctx_buffer_desc *desc)
139{
140 DEFINE_DMA_ATTRS(attrs);
141
142 if (desc->mem.priv.sgt) {
143 u64 pa = nvgpu_mem_get_phys_addr(g, &desc->mem);
144
145 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
146 dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
147 (void *)(uintptr_t)pa,
148 pa, __DMA_ATTR(attrs));
149 nvgpu_free_sgtable(g, &desc->mem.priv.sgt);
150 desc->mem.priv.sgt = NULL;
151 }
152}
153
154static int gk20a_tegra_secure_alloc(struct gk20a *g,
155 struct gr_ctx_buffer_desc *desc,
156 size_t size)
157{
158 struct device *dev = dev_from_gk20a(g);
159 struct gk20a_platform *platform = dev_get_drvdata(dev);
160 DEFINE_DMA_ATTRS(attrs);
161 dma_addr_t iova;
162 struct sg_table *sgt;
163 struct page *page;
164 int err = 0;
165
166 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
167 (void)dma_alloc_attrs(&tegra_vpr_dev, size, &iova,
168 GFP_KERNEL, __DMA_ATTR(attrs));
169 if (dma_mapping_error(&tegra_vpr_dev, iova))
170 return -ENOMEM;
171
172 sgt = nvgpu_kzalloc(platform->g, sizeof(*sgt));
173 if (!sgt) {
174 nvgpu_err(platform->g, "failed to allocate memory");
175 goto fail;
176 }
177 err = sg_alloc_table(sgt, 1, GFP_KERNEL);
178 if (err) {
179 nvgpu_err(platform->g, "failed to allocate sg_table");
180 goto fail_sgt;
181 }
182 page = phys_to_page(iova);
183 sg_set_page(sgt->sgl, page, size, 0);
184 /* This bypasses SMMU for VPR during gmmu_map. */
185 sg_dma_address(sgt->sgl) = 0;
186
187 desc->destroy = gk20a_tegra_secure_destroy;
188
189 desc->mem.priv.sgt = sgt;
190 desc->mem.size = size;
191 desc->mem.aperture = APERTURE_SYSMEM;
192
193 if (platform->secure_buffer.destroy)
194 platform->secure_buffer.destroy(g, &platform->secure_buffer);
195
196 return err;
197
198fail_sgt:
199 nvgpu_kfree(platform->g, sgt);
200fail:
201 dma_free_attrs(&tegra_vpr_dev, desc->mem.size,
202 (void *)(uintptr_t)iova, iova, __DMA_ATTR(attrs));
203 return err;
204}
205
206/*
207 * gk20a_tegra_get_emc_rate()
208 *
209 * This function returns the minimum emc clock based on gpu frequency
210 */
211
212static unsigned long gk20a_tegra_get_emc_rate(struct gk20a *g,
213 struct gk20a_emc_params *emc_params)
214{
215 unsigned long gpu_freq, gpu_fmax_at_vmin;
216 unsigned long emc_rate, emc_scale;
217
218 gpu_freq = clk_get_rate(g->clk.tegra_clk);
219 gpu_fmax_at_vmin = tegra_dvfs_get_fmax_at_vmin_safe_t(
220 clk_get_parent(g->clk.tegra_clk));
221
222 /* When scaling emc, account for the gpu load when the
223 * gpu frequency is less than or equal to fmax@vmin. */
224 if (gpu_freq <= gpu_fmax_at_vmin)
225 emc_scale = min(g->pmu.load_avg, g->emc3d_ratio);
226 else
227 emc_scale = g->emc3d_ratio;
228
229 emc_rate =
230 (HZ_TO_MHZ(gpu_freq) * emc_params->bw_ratio * emc_scale) / 1000;
231
232 return MHZ_TO_HZ(emc_rate);
233}
234
235/*
236 * gk20a_tegra_prescale(profile, freq)
237 *
238 * This function informs EDP about changed constraints.
239 */
240
241static void gk20a_tegra_prescale(struct device *dev)
242{
243 struct gk20a *g = get_gk20a(dev);
244 u32 avg = 0;
245
246 nvgpu_pmu_load_norm(g, &avg);
247 tegra_edp_notify_gpu_load(avg, clk_get_rate(g->clk.tegra_clk));
248}
249
250/*
251 * gk20a_tegra_calibrate_emc()
252 *
253 */
254
255static void gk20a_tegra_calibrate_emc(struct device *dev,
256 struct gk20a_emc_params *emc_params)
257{
258 enum tegra_chipid cid = tegra_get_chip_id();
259 long gpu_bw, emc_bw;
260
261 /* store gpu bw based on soc */
262 switch (cid) {
263 case TEGRA210:
264 gpu_bw = TEGRA_GM20B_BW_PER_FREQ;
265 break;
266 case TEGRA124:
267 case TEGRA132:
268 gpu_bw = TEGRA_GK20A_BW_PER_FREQ;
269 break;
270 default:
271 gpu_bw = 0;
272 break;
273 }
274
275 /* TODO detect DDR type.
276 * Okay for now since DDR3 and DDR4 have the same BW ratio */
277 emc_bw = TEGRA_DDR3_BW_PER_FREQ;
278
279 /* Calculate the bandwidth ratio of gpu_freq <-> emc_freq
280 * NOTE the ratio must come out as an integer */
281 emc_params->bw_ratio = (gpu_bw / emc_bw);
282}
283
284#ifdef CONFIG_TEGRA_BWMGR
285#ifdef CONFIG_TEGRA_DVFS
286static void gm20b_bwmgr_set_rate(struct gk20a_platform *platform, bool enb)
287{
288 struct gk20a_scale_profile *profile = platform->g->scale_profile;
289 struct gk20a_emc_params *params;
290 unsigned long rate;
291
292 if (!profile || !profile->private_data)
293 return;
294
295 params = (struct gk20a_emc_params *)profile->private_data;
296 rate = (enb) ? params->freq_last_set : 0;
297 tegra_bwmgr_set_emc(params->bwmgr_cl, rate, TEGRA_BWMGR_SET_EMC_FLOOR);
298}
299#endif
300
301static void gm20b_tegra_postscale(struct device *dev, unsigned long freq)
302{
303 struct gk20a_platform *platform = dev_get_drvdata(dev);
304 struct gk20a_scale_profile *profile = platform->g->scale_profile;
305 struct gk20a_emc_params *emc_params;
306 unsigned long emc_rate;
307
308 if (!profile || !profile->private_data)
309 return;
310
311 emc_params = profile->private_data;
312 emc_rate = gk20a_tegra_get_emc_rate(get_gk20a(dev), emc_params);
313
314 if (emc_rate > tegra_bwmgr_get_max_emc_rate())
315 emc_rate = tegra_bwmgr_get_max_emc_rate();
316
317 emc_params->freq_last_set = emc_rate;
318 if (platform->is_railgated && platform->is_railgated(dev))
319 return;
320
321 tegra_bwmgr_set_emc(emc_params->bwmgr_cl, emc_rate,
322 TEGRA_BWMGR_SET_EMC_FLOOR);
323
324}
325
326#endif
327
328#if defined(CONFIG_TEGRA_DVFS)
329/*
330 * gk20a_tegra_is_railgated()
331 *
332 * Check status of gk20a power rail
333 */
334
335static bool gk20a_tegra_is_railgated(struct device *dev)
336{
337 struct gk20a *g = get_gk20a(dev);
338 struct gk20a_platform *platform = dev_get_drvdata(dev);
339 bool ret = false;
340
341 if (!nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
342 ret = !tegra_dvfs_is_rail_up(platform->gpu_rail);
343
344 return ret;
345}
346
347/*
348 * gm20b_tegra_railgate()
349 *
350 * Gate (disable) gm20b power rail
351 */
352
353static int gm20b_tegra_railgate(struct device *dev)
354{
355 struct gk20a *g = get_gk20a(dev);
356 struct gk20a_platform *platform = dev_get_drvdata(dev);
357 int ret = 0;
358
359 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL) ||
360 !tegra_dvfs_is_rail_up(platform->gpu_rail))
361 return 0;
362
363 tegra_mc_flush(MC_CLIENT_GPU);
364
365 udelay(10);
366
367 /* enable clamp */
368 pmc_write(0x1, PMC_GPU_RG_CNTRL_0);
369 pmc_read(PMC_GPU_RG_CNTRL_0);
370
371 udelay(10);
372
373 platform->reset_assert(dev);
374
375 udelay(10);
376
377 /*
378 * GPCPLL is already disabled before entering this function; reference
379 * clocks are enabled until now - disable them just before rail gating
380 */
381 clk_disable_unprepare(platform->clk_reset);
382 clk_disable_unprepare(platform->clk[0]);
383 clk_disable_unprepare(platform->clk[1]);
384 if (platform->clk[3])
385 clk_disable_unprepare(platform->clk[3]);
386
387 udelay(10);
388
389 tegra_soctherm_gpu_tsens_invalidate(1);
390
391 if (tegra_dvfs_is_rail_up(platform->gpu_rail)) {
392 ret = tegra_dvfs_rail_power_down(platform->gpu_rail);
393 if (ret)
394 goto err_power_off;
395 } else
396 pr_info("No GPU regulator?\n");
397
398#ifdef CONFIG_TEGRA_BWMGR
399 gm20b_bwmgr_set_rate(platform, false);
400#endif
401
402 return 0;
403
404err_power_off:
405 nvgpu_err(platform->g, "Could not railgate GPU");
406 return ret;
407}
408
409
410/*
411 * gm20b_tegra_unrailgate()
412 *
413 * Ungate (enable) gm20b power rail
414 */
415
416static int gm20b_tegra_unrailgate(struct device *dev)
417{
418 struct gk20a_platform *platform = dev_get_drvdata(dev);
419 struct gk20a *g = platform->g;
420 int ret = 0;
421 bool first = false;
422
423 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
424 return 0;
425
426 ret = tegra_dvfs_rail_power_up(platform->gpu_rail);
427 if (ret)
428 return ret;
429
430#ifdef CONFIG_TEGRA_BWMGR
431 gm20b_bwmgr_set_rate(platform, true);
432#endif
433
434 tegra_soctherm_gpu_tsens_invalidate(0);
435
436 if (!platform->clk_reset) {
437 platform->clk_reset = clk_get(dev, "gpu_gate");
438 if (IS_ERR(platform->clk_reset)) {
439 nvgpu_err(g, "fail to get gpu reset clk");
440 goto err_clk_on;
441 }
442 }
443
444 if (!first) {
445 ret = clk_prepare_enable(platform->clk_reset);
446 if (ret) {
447 nvgpu_err(g, "could not turn on gpu_gate");
448 goto err_clk_on;
449 }
450
451 ret = clk_prepare_enable(platform->clk[0]);
452 if (ret) {
453 nvgpu_err(g, "could not turn on gpu pll");
454 goto err_clk_on;
455 }
456 ret = clk_prepare_enable(platform->clk[1]);
457 if (ret) {
458 nvgpu_err(g, "could not turn on pwr clock");
459 goto err_clk_on;
460 }
461
462 if (platform->clk[3]) {
463 ret = clk_prepare_enable(platform->clk[3]);
464 if (ret) {
465 nvgpu_err(g, "could not turn on fuse clock");
466 goto err_clk_on;
467 }
468 }
469 }
470
471 udelay(10);
472
473 platform->reset_assert(dev);
474
475 udelay(10);
476
477 pmc_write(0, PMC_GPU_RG_CNTRL_0);
478 pmc_read(PMC_GPU_RG_CNTRL_0);
479
480 udelay(10);
481
482 clk_disable(platform->clk_reset);
483 platform->reset_deassert(dev);
484 clk_enable(platform->clk_reset);
485
486 /* Flush MC after boot/railgate/SC7 */
487 tegra_mc_flush(MC_CLIENT_GPU);
488
489 udelay(10);
490
491 tegra_mc_flush_done(MC_CLIENT_GPU);
492
493 udelay(10);
494
495 return 0;
496
497err_clk_on:
498 tegra_dvfs_rail_power_down(platform->gpu_rail);
499
500 return ret;
501}
502#endif
503
504
505static struct {
506 char *name;
507 unsigned long default_rate;
508} tegra_gk20a_clocks[] = {
509 {"gpu_ref", UINT_MAX},
510 {"pll_p_out5", 204000000},
511 {"emc", UINT_MAX},
512 {"fuse", UINT_MAX},
513};
514
515
516
517/*
518 * gk20a_tegra_get_clocks()
519 *
520 * This function finds clocks in tegra platform and populates
521 * the clock information to gk20a platform data.
522 */
523
524static int gk20a_tegra_get_clocks(struct device *dev)
525{
526 struct gk20a_platform *platform = dev_get_drvdata(dev);
527 char devname[16];
528 unsigned int i;
529 int ret = 0;
530
531 BUG_ON(GK20A_CLKS_MAX < ARRAY_SIZE(tegra_gk20a_clocks));
532
533 snprintf(devname, sizeof(devname), "tegra_%s", dev_name(dev));
534
535 platform->num_clks = 0;
536 for (i = 0; i < ARRAY_SIZE(tegra_gk20a_clocks); i++) {
537 long rate = tegra_gk20a_clocks[i].default_rate;
538 struct clk *c;
539
540 c = clk_get_sys(devname, tegra_gk20a_clocks[i].name);
541 if (IS_ERR(c)) {
542 ret = PTR_ERR(c);
543 goto err_get_clock;
544 }
545 rate = clk_round_rate(c, rate);
546 clk_set_rate(c, rate);
547 platform->clk[i] = c;
548 }
549 platform->num_clks = i;
550
551 return 0;
552
553err_get_clock:
554
555 while (i--)
556 clk_put(platform->clk[i]);
557 return ret;
558}
559
560#if defined(CONFIG_RESET_CONTROLLER) && defined(CONFIG_COMMON_CLK)
561static int gm20b_tegra_reset_assert(struct device *dev)
562{
563 struct gk20a_platform *platform = gk20a_get_platform(dev);
564
565 if (!platform->reset_control) {
566 WARN(1, "Reset control not initialized\n");
567 return -ENOSYS;
568 }
569
570 return reset_control_assert(platform->reset_control);
571}
572
573static int gm20b_tegra_reset_deassert(struct device *dev)
574{
575 struct gk20a_platform *platform = gk20a_get_platform(dev);
576
577 if (!platform->reset_control) {
578 WARN(1, "Reset control not initialized\n");
579 return -ENOSYS;
580 }
581
582 return reset_control_deassert(platform->reset_control);
583}
584#endif
585
586static void gk20a_tegra_scale_init(struct device *dev)
587{
588 struct gk20a_platform *platform = gk20a_get_platform(dev);
589 struct gk20a_scale_profile *profile = platform->g->scale_profile;
590 struct gk20a_emc_params *emc_params;
591
592 if (!profile)
593 return;
594
595 emc_params = nvgpu_kzalloc(platform->g, sizeof(*emc_params));
596 if (!emc_params)
597 return;
598
599 emc_params->freq_last_set = -1;
600 gk20a_tegra_calibrate_emc(dev, emc_params);
601
602#ifdef CONFIG_TEGRA_BWMGR
603 emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU);
604 if (!emc_params->bwmgr_cl) {
605 gk20a_dbg_info("%s Missing GPU BWMGR client\n", __func__);
606 return;
607 }
608#endif
609
610 profile->private_data = emc_params;
611}
612
613static void gk20a_tegra_scale_exit(struct device *dev)
614{
615 struct gk20a_platform *platform = dev_get_drvdata(dev);
616 struct gk20a_scale_profile *profile = platform->g->scale_profile;
617 struct gk20a_emc_params *emc_params;
618
619 if (!profile)
620 return;
621
622 emc_params = profile->private_data;
623#ifdef CONFIG_TEGRA_BWMGR
624 tegra_bwmgr_unregister(emc_params->bwmgr_cl);
625#endif
626
627 nvgpu_kfree(platform->g, profile->private_data);
628}
629
630void gk20a_tegra_debug_dump(struct device *dev)
631{
632#ifdef CONFIG_TEGRA_GK20A_NVHOST
633 struct gk20a_platform *platform = gk20a_get_platform(dev);
634 struct gk20a *g = platform->g;
635
636 if (g->nvhost_dev)
637 nvgpu_nvhost_debug_dump_device(g->nvhost_dev);
638#endif
639}
640
641int gk20a_tegra_busy(struct device *dev)
642{
643#ifdef CONFIG_TEGRA_GK20A_NVHOST
644 struct gk20a_platform *platform = gk20a_get_platform(dev);
645 struct gk20a *g = platform->g;
646
647 if (g->nvhost_dev)
648 return nvgpu_nvhost_module_busy_ext(g->nvhost_dev);
649#endif
650 return 0;
651}
652
653void gk20a_tegra_idle(struct device *dev)
654{
655#ifdef CONFIG_TEGRA_GK20A_NVHOST
656 struct gk20a_platform *platform = gk20a_get_platform(dev);
657 struct gk20a *g = platform->g;
658
659 if (g->nvhost_dev)
660 nvgpu_nvhost_module_idle_ext(g->nvhost_dev);
661#endif
662}
663
664void gk20a_tegra_init_secure_alloc(struct gk20a *g)
665{
666 g->ops.secure_alloc = gk20a_tegra_secure_alloc;
667}
668
669#ifdef CONFIG_COMMON_CLK
670static struct clk *gk20a_clk_get(struct gk20a *g)
671{
672 if (!g->clk.tegra_clk) {
673 struct clk *clk;
674 char clk_dev_id[32];
675 struct device *dev = dev_from_gk20a(g);
676
677 snprintf(clk_dev_id, 32, "tegra_%s", dev_name(dev));
678
679 clk = clk_get_sys(clk_dev_id, "gpu");
680 if (IS_ERR(clk)) {
681 nvgpu_err(g, "fail to get tegra gpu clk %s/gpu\n",
682 clk_dev_id);
683 return NULL;
684 }
685 g->clk.tegra_clk = clk;
686 }
687
688 return g->clk.tegra_clk;
689}
690
691static int gm20b_clk_prepare_ops(struct clk_hw *hw)
692{
693 struct clk_gk20a *clk = to_clk_gk20a(hw);
694 return gm20b_clk_prepare(clk);
695}
696
697static void gm20b_clk_unprepare_ops(struct clk_hw *hw)
698{
699 struct clk_gk20a *clk = to_clk_gk20a(hw);
700 gm20b_clk_unprepare(clk);
701}
702
703static int gm20b_clk_is_prepared_ops(struct clk_hw *hw)
704{
705 struct clk_gk20a *clk = to_clk_gk20a(hw);
706 return gm20b_clk_is_prepared(clk);
707}
708
709static unsigned long gm20b_recalc_rate_ops(struct clk_hw *hw, unsigned long parent_rate)
710{
711 struct clk_gk20a *clk = to_clk_gk20a(hw);
712 return gm20b_recalc_rate(clk, parent_rate);
713}
714
715static int gm20b_gpcclk_set_rate_ops(struct clk_hw *hw, unsigned long rate,
716 unsigned long parent_rate)
717{
718 struct clk_gk20a *clk = to_clk_gk20a(hw);
719 return gm20b_gpcclk_set_rate(clk, rate, parent_rate);
720}
721
722static long gm20b_round_rate_ops(struct clk_hw *hw, unsigned long rate,
723 unsigned long *parent_rate)
724{
725 struct clk_gk20a *clk = to_clk_gk20a(hw);
726 return gm20b_round_rate(clk, rate, parent_rate);
727}
728
729static const struct clk_ops gm20b_clk_ops = {
730 .prepare = gm20b_clk_prepare_ops,
731 .unprepare = gm20b_clk_unprepare_ops,
732 .is_prepared = gm20b_clk_is_prepared_ops,
733 .recalc_rate = gm20b_recalc_rate_ops,
734 .set_rate = gm20b_gpcclk_set_rate_ops,
735 .round_rate = gm20b_round_rate_ops,
736};
737
738static int gm20b_register_gpcclk(struct gk20a *g)
739{
740 const char *parent_name = "pllg_ref";
741 struct clk_gk20a *clk = &g->clk;
742 struct clk_init_data init;
743 struct clk *c;
744 int err = 0;
745
746 /* make sure the clock is available */
747 if (!gk20a_clk_get(g))
748 return -ENOSYS;
749
750 err = gm20b_init_clk_setup_sw(g);
751 if (err)
752 return err;
753
754 init.name = "gpcclk";
755 init.ops = &gm20b_clk_ops;
756 init.parent_names = &parent_name;
757 init.num_parents = 1;
758 init.flags = 0;
759
760 /* Data in .init is copied by clk_register(), so stack variable OK */
761 clk->hw.init = &init;
762 c = clk_register(dev_from_gk20a(g), &clk->hw);
763 if (IS_ERR(c)) {
764 nvgpu_err(g, "Failed to register GPCPLL clock");
765 return -EINVAL;
766 }
767
768 clk->g = g;
769 clk_register_clkdev(c, "gpcclk", "gpcclk");
770
771 return err;
772}
773#endif /* CONFIG_COMMON_CLK */
774
775static int gk20a_tegra_probe(struct device *dev)
776{
777 struct gk20a_platform *platform = dev_get_drvdata(dev);
778 struct device_node *np = dev->of_node;
779 bool joint_xpu_rail = false;
780 int ret;
781
782#ifdef CONFIG_COMMON_CLK
783 /* DVFS is not guaranteed to be initialized at the time of probe on
784 * kernels with Common Clock Framework enabled.
785 */
786 if (!platform->gpu_rail) {
787 platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME);
788 if (!platform->gpu_rail) {
789 gk20a_dbg_info("deferring probe no gpu_rail\n");
790 return -EPROBE_DEFER;
791 }
792 }
793
794 if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) {
795 gk20a_dbg_info("deferring probe gpu_rail not ready\n");
796 return -EPROBE_DEFER;
797 }
798#endif
799
800#ifdef CONFIG_TEGRA_GK20A_NVHOST
801 ret = nvgpu_get_nvhost_dev(platform->g);
802 if (ret)
803 return ret;
804#endif
805
806#ifdef CONFIG_OF
807 joint_xpu_rail = of_property_read_bool(of_chosen,
808 "nvidia,tegra-joint_xpu_rail");
809#endif
810
811 if (joint_xpu_rail) {
812 gk20a_dbg_info("XPU rails are joint\n");
813 platform->g->can_railgate = false;
814 }
815
816 platform->g->clk.gpc_pll.id = GK20A_GPC_PLL;
817 if (tegra_get_chip_id() == TEGRA210) {
818 /* WAR for bug 1547668: Disable railgating and scaling
819 irrespective of platform data if the rework was not made. */
820 np = of_find_node_by_path("/gpu-dvfs-rework");
821 if (!(np && of_device_is_available(np))) {
822 platform->devfreq_governor = "";
823 dev_warn(dev, "board does not support scaling");
824 }
825 platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_B1;
826 if (tegra_chip_get_revision() > TEGRA210_REVISION_A04p)
827 platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_C1;
828 }
829
830 if (tegra_get_chip_id() == TEGRA132)
831 platform->soc_name = "tegra13x";
832
833 gk20a_tegra_get_clocks(dev);
834 nvgpu_linux_init_clk_support(platform->g);
835 gk20a_tegra_init_secure_alloc(platform->g);
836
837 if (platform->clk_register) {
838 ret = platform->clk_register(platform->g);
839 if (ret)
840 return ret;
841 }
842
843 pmc = ioremap(TEGRA_PMC_BASE, 4096);
844
845 return 0;
846}
847
848static int gk20a_tegra_late_probe(struct device *dev)
849{
850 /* Cause early VPR resize */
851 gk20a_tegra_secure_page_alloc(dev);
852
853 /* Initialise tegra specific scaling quirks */
854 gk20a_tegra_scale_init(dev);
855
856 return 0;
857}
858
859static int gk20a_tegra_remove(struct device *dev)
860{
861 /* deinitialise tegra specific scaling quirks */
862 gk20a_tegra_scale_exit(dev);
863
864#ifdef CONFIG_TEGRA_GK20A_NVHOST
865 nvgpu_free_nvhost_dev(get_gk20a(dev));
866#endif
867
868 return 0;
869}
870
871static int gk20a_tegra_suspend(struct device *dev)
872{
873 tegra_edp_notify_gpu_load(0, 0);
874 return 0;
875}
876
877#if defined(CONFIG_COMMON_CLK)
878static long gk20a_round_clk_rate(struct device *dev, unsigned long rate)
879{
880 struct gk20a_platform *platform = gk20a_get_platform(dev);
881 struct gk20a *g = platform->g;
882
883 /* make sure the clock is available */
884 if (!gk20a_clk_get(g))
885 return rate;
886
887 return clk_round_rate(clk_get_parent(g->clk.tegra_clk), rate);
888}
889
890static int gk20a_clk_get_freqs(struct device *dev,
891 unsigned long **freqs, int *num_freqs)
892{
893 struct gk20a_platform *platform = gk20a_get_platform(dev);
894 struct gk20a *g = platform->g;
895
896 /* make sure the clock is available */
897 if (!gk20a_clk_get(g))
898 return -ENOSYS;
899
900 return tegra_dvfs_get_freqs(clk_get_parent(g->clk.tegra_clk),
901 freqs, num_freqs);
902}
903#endif
904
905struct gk20a_platform gm20b_tegra_platform = {
906 .has_syncpoints = true,
907 .aggressive_sync_destroy_thresh = 64,
908
909 /* power management configuration */
910 .railgate_delay_init = 500,
911 .can_railgate_init = true,
912 .can_elpg_init = true,
913 .enable_slcg = true,
914 .enable_blcg = true,
915 .enable_elcg = true,
916 .can_slcg = true,
917 .can_blcg = true,
918 .can_elcg = true,
919 .enable_elpg = true,
920 .enable_aelpg = true,
921 .enable_perfmon = true,
922 .ptimer_src_freq = 19200000,
923
924 .force_reset_in_do_idle = false,
925
926 .ch_wdt_timeout_ms = 5000,
927
928 .probe = gk20a_tegra_probe,
929 .late_probe = gk20a_tegra_late_probe,
930 .remove = gk20a_tegra_remove,
931 /* power management callbacks */
932 .suspend = gk20a_tegra_suspend,
933
934#if defined(CONFIG_TEGRA_DVFS)
935 .railgate = gm20b_tegra_railgate,
936 .unrailgate = gm20b_tegra_unrailgate,
937 .is_railgated = gk20a_tegra_is_railgated,
938#endif
939
940 .busy = gk20a_tegra_busy,
941 .idle = gk20a_tegra_idle,
942
943#if defined(CONFIG_RESET_CONTROLLER) && defined(CONFIG_COMMON_CLK)
944 .reset_assert = gm20b_tegra_reset_assert,
945 .reset_deassert = gm20b_tegra_reset_deassert,
946#else
947 .reset_assert = gk20a_tegra_reset_assert,
948 .reset_deassert = gk20a_tegra_reset_deassert,
949#endif
950
951#if defined(CONFIG_COMMON_CLK)
952 .clk_round_rate = gk20a_round_clk_rate,
953 .get_clk_freqs = gk20a_clk_get_freqs,
954#endif
955
956#ifdef CONFIG_COMMON_CLK
957 .clk_register = gm20b_register_gpcclk,
958#endif
959
960 /* frequency scaling configuration */
961 .prescale = gk20a_tegra_prescale,
962#ifdef CONFIG_TEGRA_BWMGR
963 .postscale = gm20b_tegra_postscale,
964#endif
965 .devfreq_governor = "nvhost_podgov",
966 .qos_notify = gk20a_scale_qos_notify,
967
968 .dump_platform_dependencies = gk20a_tegra_debug_dump,
969
970 .has_cde = true,
971
972 .soc_name = "tegra21x",
973
974 .unified_memory = true,
975};