summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 15:59:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-15 20:47:31 -0400
commit2a2c16af5f9f1ccfc93a13e820d5381e5c881e92 (patch)
tree2e5d7b042270a649978e5bb540857012c85fb5b5 /drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c
parent98d996f4ffb0137d119b5849cae46d7b7e5693e1 (diff)
gpu: nvgpu: Move Linux files away from common
Move all Linux source code files to drivers/gpu/nvgpu/os/linux from drivers/gpu/nvgpu/common/linux. This changes the meaning of common to be OS independent. JIRA NVGPU-598 JIRA NVGPU-601 Change-Id: Ib7f2a43d3688bb0d0b7dcc48469a6783fd988ce9 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1747714 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c')
-rw-r--r--drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c957
1 files changed, 957 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c b/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c
new file mode 100644
index 00000000..af55e5b6
--- /dev/null
+++ b/drivers/gpu/nvgpu/os/linux/platform_gk20a_tegra.c
@@ -0,0 +1,957 @@
1/*
2 * GK20A Tegra Platform Interface
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#include <linux/clkdev.h>
17#include <linux/of_platform.h>
18#include <linux/debugfs.h>
19#include <linux/platform_data/tegra_edp.h>
20#include <linux/delay.h>
21#include <uapi/linux/nvgpu.h>
22#include <linux/dma-buf.h>
23#include <linux/dma-attrs.h>
24#include <linux/nvmap.h>
25#include <linux/reset.h>
26#if defined(CONFIG_TEGRA_DVFS)
27#include <linux/tegra_soctherm.h>
28#endif
29#include <linux/platform/tegra/common.h>
30#include <linux/platform/tegra/mc.h>
31#include <linux/clk/tegra.h>
32#if defined(CONFIG_COMMON_CLK)
33#include <soc/tegra/tegra-dvfs.h>
34#endif
35#ifdef CONFIG_TEGRA_BWMGR
36#include <linux/platform/tegra/emc_bwmgr.h>
37#endif
38
39#include <linux/platform/tegra/tegra_emc.h>
40#include <soc/tegra/chip-id.h>
41
42#include <nvgpu/kmem.h>
43#include <nvgpu/bug.h>
44#include <nvgpu/enabled.h>
45#include <nvgpu/nvhost.h>
46
47#include <nvgpu/linux/dma.h>
48
49#include "gk20a/gk20a.h"
50#include "gm20b/clk_gm20b.h"
51
52#include "scale.h"
53#include "platform_gk20a.h"
54#include "clk.h"
55#include "os_linux.h"
56
57#include "../../../arch/arm/mach-tegra/iomap.h"
58#include <soc/tegra/pmc.h>
59
60#define TEGRA_GK20A_BW_PER_FREQ 32
61#define TEGRA_GM20B_BW_PER_FREQ 64
62#define TEGRA_DDR3_BW_PER_FREQ 16
63#define TEGRA_DDR4_BW_PER_FREQ 16
64#define MC_CLIENT_GPU 34
65#define PMC_GPU_RG_CNTRL_0 0x2d4
66
67#ifdef CONFIG_COMMON_CLK
68#define GPU_RAIL_NAME "vdd-gpu"
69#else
70#define GPU_RAIL_NAME "vdd_gpu"
71#endif
72
73extern struct device tegra_vpr_dev;
74
75#ifdef CONFIG_TEGRA_BWMGR
76struct gk20a_emc_params {
77 unsigned long bw_ratio;
78 unsigned long freq_last_set;
79 struct tegra_bwmgr_client *bwmgr_cl;
80};
81#else
82struct gk20a_emc_params {
83 unsigned long bw_ratio;
84 unsigned long freq_last_set;
85};
86#endif
87
88#define MHZ_TO_HZ(x) ((x) * 1000000)
89#define HZ_TO_MHZ(x) ((x) / 1000000)
90
91static void gk20a_tegra_secure_page_destroy(struct gk20a *g,
92 struct secure_page_buffer *secure_buffer)
93{
94 DEFINE_DMA_ATTRS(attrs);
95 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
96 dma_free_attrs(&tegra_vpr_dev, secure_buffer->size,
97 (void *)(uintptr_t)secure_buffer->phys,
98 secure_buffer->phys, __DMA_ATTR(attrs));
99
100 secure_buffer->destroy = NULL;
101}
102
103static int gk20a_tegra_secure_alloc(struct gk20a *g,
104 struct gr_ctx_buffer_desc *desc,
105 size_t size)
106{
107 struct device *dev = dev_from_gk20a(g);
108 struct gk20a_platform *platform = dev_get_drvdata(dev);
109 struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
110 dma_addr_t phys;
111 struct sg_table *sgt;
112 struct page *page;
113 int err = 0;
114 size_t aligned_size = PAGE_ALIGN(size);
115
116 if (nvgpu_mem_is_valid(&desc->mem))
117 return 0;
118
119 /* We ran out of preallocated memory */
120 if (secure_buffer->used + aligned_size > secure_buffer->size) {
121 nvgpu_err(platform->g, "failed to alloc %zu bytes of VPR, %zu/%zu used",
122 size, secure_buffer->used, secure_buffer->size);
123 return -ENOMEM;
124 }
125
126 phys = secure_buffer->phys + secure_buffer->used;
127
128 sgt = nvgpu_kzalloc(platform->g, sizeof(*sgt));
129 if (!sgt) {
130 nvgpu_err(platform->g, "failed to allocate memory");
131 return -ENOMEM;
132 }
133 err = sg_alloc_table(sgt, 1, GFP_KERNEL);
134 if (err) {
135 nvgpu_err(platform->g, "failed to allocate sg_table");
136 goto fail_sgt;
137 }
138 page = phys_to_page(phys);
139 sg_set_page(sgt->sgl, page, size, 0);
140 /* This bypasses SMMU for VPR during gmmu_map. */
141 sg_dma_address(sgt->sgl) = 0;
142
143 desc->destroy = NULL;
144
145 desc->mem.priv.sgt = sgt;
146 desc->mem.size = size;
147 desc->mem.aperture = APERTURE_SYSMEM;
148
149 secure_buffer->used += aligned_size;
150
151 return err;
152
153fail_sgt:
154 nvgpu_kfree(platform->g, sgt);
155 return err;
156}
157
158/*
159 * gk20a_tegra_get_emc_rate()
160 *
161 * This function returns the minimum emc clock based on gpu frequency
162 */
163
164static unsigned long gk20a_tegra_get_emc_rate(struct gk20a *g,
165 struct gk20a_emc_params *emc_params)
166{
167 unsigned long gpu_freq, gpu_fmax_at_vmin;
168 unsigned long emc_rate, emc_scale;
169
170 gpu_freq = clk_get_rate(g->clk.tegra_clk);
171 gpu_fmax_at_vmin = tegra_dvfs_get_fmax_at_vmin_safe_t(
172 clk_get_parent(g->clk.tegra_clk));
173
174 /* When scaling emc, account for the gpu load when the
175 * gpu frequency is less than or equal to fmax@vmin. */
176 if (gpu_freq <= gpu_fmax_at_vmin)
177 emc_scale = min(g->pmu.load_avg, g->emc3d_ratio);
178 else
179 emc_scale = g->emc3d_ratio;
180
181 emc_rate =
182 (HZ_TO_MHZ(gpu_freq) * emc_params->bw_ratio * emc_scale) / 1000;
183
184 return MHZ_TO_HZ(emc_rate);
185}
186
187/*
188 * gk20a_tegra_prescale(profile, freq)
189 *
190 * This function informs EDP about changed constraints.
191 */
192
193static void gk20a_tegra_prescale(struct device *dev)
194{
195 struct gk20a *g = get_gk20a(dev);
196 u32 avg = 0;
197
198 nvgpu_pmu_load_norm(g, &avg);
199 tegra_edp_notify_gpu_load(avg, clk_get_rate(g->clk.tegra_clk));
200}
201
202/*
203 * gk20a_tegra_calibrate_emc()
204 *
205 */
206
207static void gk20a_tegra_calibrate_emc(struct device *dev,
208 struct gk20a_emc_params *emc_params)
209{
210 enum tegra_chipid cid = tegra_get_chip_id();
211 long gpu_bw, emc_bw;
212
213 /* store gpu bw based on soc */
214 switch (cid) {
215 case TEGRA210:
216 gpu_bw = TEGRA_GM20B_BW_PER_FREQ;
217 break;
218 case TEGRA124:
219 case TEGRA132:
220 gpu_bw = TEGRA_GK20A_BW_PER_FREQ;
221 break;
222 default:
223 gpu_bw = 0;
224 break;
225 }
226
227 /* TODO detect DDR type.
228 * Okay for now since DDR3 and DDR4 have the same BW ratio */
229 emc_bw = TEGRA_DDR3_BW_PER_FREQ;
230
231 /* Calculate the bandwidth ratio of gpu_freq <-> emc_freq
232 * NOTE the ratio must come out as an integer */
233 emc_params->bw_ratio = (gpu_bw / emc_bw);
234}
235
236#ifdef CONFIG_TEGRA_BWMGR
237#ifdef CONFIG_TEGRA_DVFS
238static void gm20b_bwmgr_set_rate(struct gk20a_platform *platform, bool enb)
239{
240 struct gk20a_scale_profile *profile = platform->g->scale_profile;
241 struct gk20a_emc_params *params;
242 unsigned long rate;
243
244 if (!profile || !profile->private_data)
245 return;
246
247 params = (struct gk20a_emc_params *)profile->private_data;
248 rate = (enb) ? params->freq_last_set : 0;
249 tegra_bwmgr_set_emc(params->bwmgr_cl, rate, TEGRA_BWMGR_SET_EMC_FLOOR);
250}
251#endif
252
253static void gm20b_tegra_postscale(struct device *dev, unsigned long freq)
254{
255 struct gk20a_platform *platform = dev_get_drvdata(dev);
256 struct gk20a_scale_profile *profile = platform->g->scale_profile;
257 struct gk20a_emc_params *emc_params;
258 unsigned long emc_rate;
259
260 if (!profile || !profile->private_data)
261 return;
262
263 emc_params = profile->private_data;
264 emc_rate = gk20a_tegra_get_emc_rate(get_gk20a(dev), emc_params);
265
266 if (emc_rate > tegra_bwmgr_get_max_emc_rate())
267 emc_rate = tegra_bwmgr_get_max_emc_rate();
268
269 emc_params->freq_last_set = emc_rate;
270 if (platform->is_railgated && platform->is_railgated(dev))
271 return;
272
273 tegra_bwmgr_set_emc(emc_params->bwmgr_cl, emc_rate,
274 TEGRA_BWMGR_SET_EMC_FLOOR);
275
276}
277
278#endif
279
280#if defined(CONFIG_TEGRA_DVFS)
281/*
282 * gk20a_tegra_is_railgated()
283 *
284 * Check status of gk20a power rail
285 */
286
287static bool gk20a_tegra_is_railgated(struct device *dev)
288{
289 struct gk20a *g = get_gk20a(dev);
290 struct gk20a_platform *platform = dev_get_drvdata(dev);
291 bool ret = false;
292
293 if (!nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
294 ret = !tegra_dvfs_is_rail_up(platform->gpu_rail);
295
296 return ret;
297}
298
299/*
300 * gm20b_tegra_railgate()
301 *
302 * Gate (disable) gm20b power rail
303 */
304
305static int gm20b_tegra_railgate(struct device *dev)
306{
307 struct gk20a *g = get_gk20a(dev);
308 struct gk20a_platform *platform = dev_get_drvdata(dev);
309 int ret = 0;
310
311 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL) ||
312 !tegra_dvfs_is_rail_up(platform->gpu_rail))
313 return 0;
314
315 tegra_mc_flush(MC_CLIENT_GPU);
316
317 udelay(10);
318
319 /* enable clamp */
320 tegra_pmc_writel_relaxed(0x1, PMC_GPU_RG_CNTRL_0);
321 tegra_pmc_readl(PMC_GPU_RG_CNTRL_0);
322
323 udelay(10);
324
325 platform->reset_assert(dev);
326
327 udelay(10);
328
329 /*
330 * GPCPLL is already disabled before entering this function; reference
331 * clocks are enabled until now - disable them just before rail gating
332 */
333 clk_disable_unprepare(platform->clk_reset);
334 clk_disable_unprepare(platform->clk[0]);
335 clk_disable_unprepare(platform->clk[1]);
336 if (platform->clk[3])
337 clk_disable_unprepare(platform->clk[3]);
338
339 udelay(10);
340
341 tegra_soctherm_gpu_tsens_invalidate(1);
342
343 if (tegra_dvfs_is_rail_up(platform->gpu_rail)) {
344 ret = tegra_dvfs_rail_power_down(platform->gpu_rail);
345 if (ret)
346 goto err_power_off;
347 } else
348 pr_info("No GPU regulator?\n");
349
350#ifdef CONFIG_TEGRA_BWMGR
351 gm20b_bwmgr_set_rate(platform, false);
352#endif
353
354 return 0;
355
356err_power_off:
357 nvgpu_err(platform->g, "Could not railgate GPU");
358 return ret;
359}
360
361
362/*
363 * gm20b_tegra_unrailgate()
364 *
365 * Ungate (enable) gm20b power rail
366 */
367
368static int gm20b_tegra_unrailgate(struct device *dev)
369{
370 struct gk20a_platform *platform = dev_get_drvdata(dev);
371 struct gk20a *g = platform->g;
372 int ret = 0;
373 bool first = false;
374
375 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
376 return 0;
377
378 ret = tegra_dvfs_rail_power_up(platform->gpu_rail);
379 if (ret)
380 return ret;
381
382#ifdef CONFIG_TEGRA_BWMGR
383 gm20b_bwmgr_set_rate(platform, true);
384#endif
385
386 tegra_soctherm_gpu_tsens_invalidate(0);
387
388 if (!platform->clk_reset) {
389 platform->clk_reset = clk_get(dev, "gpu_gate");
390 if (IS_ERR(platform->clk_reset)) {
391 nvgpu_err(g, "fail to get gpu reset clk");
392 goto err_clk_on;
393 }
394 }
395
396 if (!first) {
397 ret = clk_prepare_enable(platform->clk_reset);
398 if (ret) {
399 nvgpu_err(g, "could not turn on gpu_gate");
400 goto err_clk_on;
401 }
402
403 ret = clk_prepare_enable(platform->clk[0]);
404 if (ret) {
405 nvgpu_err(g, "could not turn on gpu pll");
406 goto err_clk_on;
407 }
408 ret = clk_prepare_enable(platform->clk[1]);
409 if (ret) {
410 nvgpu_err(g, "could not turn on pwr clock");
411 goto err_clk_on;
412 }
413
414 if (platform->clk[3]) {
415 ret = clk_prepare_enable(platform->clk[3]);
416 if (ret) {
417 nvgpu_err(g, "could not turn on fuse clock");
418 goto err_clk_on;
419 }
420 }
421 }
422
423 udelay(10);
424
425 platform->reset_assert(dev);
426
427 udelay(10);
428
429 tegra_pmc_writel_relaxed(0, PMC_GPU_RG_CNTRL_0);
430 tegra_pmc_readl(PMC_GPU_RG_CNTRL_0);
431
432 udelay(10);
433
434 clk_disable(platform->clk_reset);
435 platform->reset_deassert(dev);
436 clk_enable(platform->clk_reset);
437
438 /* Flush MC after boot/railgate/SC7 */
439 tegra_mc_flush(MC_CLIENT_GPU);
440
441 udelay(10);
442
443 tegra_mc_flush_done(MC_CLIENT_GPU);
444
445 udelay(10);
446
447 return 0;
448
449err_clk_on:
450 tegra_dvfs_rail_power_down(platform->gpu_rail);
451
452 return ret;
453}
454#endif
455
456
457static struct {
458 char *name;
459 unsigned long default_rate;
460} tegra_gk20a_clocks[] = {
461 {"gpu_ref", UINT_MAX},
462 {"pll_p_out5", 204000000},
463 {"emc", UINT_MAX},
464 {"fuse", UINT_MAX},
465};
466
467
468
469/*
470 * gk20a_tegra_get_clocks()
471 *
472 * This function finds clocks in tegra platform and populates
473 * the clock information to gk20a platform data.
474 */
475
476static int gk20a_tegra_get_clocks(struct device *dev)
477{
478 struct gk20a_platform *platform = dev_get_drvdata(dev);
479 char devname[16];
480 unsigned int i;
481 int ret = 0;
482
483 BUG_ON(GK20A_CLKS_MAX < ARRAY_SIZE(tegra_gk20a_clocks));
484
485 snprintf(devname, sizeof(devname), "tegra_%s", dev_name(dev));
486
487 platform->num_clks = 0;
488 for (i = 0; i < ARRAY_SIZE(tegra_gk20a_clocks); i++) {
489 long rate = tegra_gk20a_clocks[i].default_rate;
490 struct clk *c;
491
492 c = clk_get_sys(devname, tegra_gk20a_clocks[i].name);
493 if (IS_ERR(c)) {
494 ret = PTR_ERR(c);
495 goto err_get_clock;
496 }
497 rate = clk_round_rate(c, rate);
498 clk_set_rate(c, rate);
499 platform->clk[i] = c;
500 if (i == 0)
501 platform->cached_rate = rate;
502 }
503 platform->num_clks = i;
504
505 return 0;
506
507err_get_clock:
508
509 while (i--)
510 clk_put(platform->clk[i]);
511 return ret;
512}
513
514#if defined(CONFIG_RESET_CONTROLLER) && defined(CONFIG_COMMON_CLK)
515static int gm20b_tegra_reset_assert(struct device *dev)
516{
517 struct gk20a_platform *platform = gk20a_get_platform(dev);
518
519 if (!platform->reset_control) {
520 WARN(1, "Reset control not initialized\n");
521 return -ENOSYS;
522 }
523
524 return reset_control_assert(platform->reset_control);
525}
526
527static int gm20b_tegra_reset_deassert(struct device *dev)
528{
529 struct gk20a_platform *platform = gk20a_get_platform(dev);
530
531 if (!platform->reset_control) {
532 WARN(1, "Reset control not initialized\n");
533 return -ENOSYS;
534 }
535
536 return reset_control_deassert(platform->reset_control);
537}
538#endif
539
540static void gk20a_tegra_scale_init(struct device *dev)
541{
542 struct gk20a_platform *platform = gk20a_get_platform(dev);
543 struct gk20a_scale_profile *profile = platform->g->scale_profile;
544 struct gk20a_emc_params *emc_params;
545 struct gk20a *g = platform->g;
546
547 if (!profile)
548 return;
549
550 if (profile->private_data)
551 return;
552
553 emc_params = nvgpu_kzalloc(platform->g, sizeof(*emc_params));
554 if (!emc_params)
555 return;
556
557 emc_params->freq_last_set = -1;
558 gk20a_tegra_calibrate_emc(dev, emc_params);
559
560#ifdef CONFIG_TEGRA_BWMGR
561 emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU);
562 if (!emc_params->bwmgr_cl) {
563 nvgpu_log_info(g, "%s Missing GPU BWMGR client\n", __func__);
564 return;
565 }
566#endif
567
568 profile->private_data = emc_params;
569}
570
571static void gk20a_tegra_scale_exit(struct device *dev)
572{
573 struct gk20a_platform *platform = dev_get_drvdata(dev);
574 struct gk20a_scale_profile *profile = platform->g->scale_profile;
575 struct gk20a_emc_params *emc_params;
576
577 if (!profile)
578 return;
579
580 emc_params = profile->private_data;
581#ifdef CONFIG_TEGRA_BWMGR
582 tegra_bwmgr_unregister(emc_params->bwmgr_cl);
583#endif
584
585 nvgpu_kfree(platform->g, profile->private_data);
586}
587
588void gk20a_tegra_debug_dump(struct device *dev)
589{
590#ifdef CONFIG_TEGRA_GK20A_NVHOST
591 struct gk20a_platform *platform = gk20a_get_platform(dev);
592 struct gk20a *g = platform->g;
593
594 if (g->nvhost_dev)
595 nvgpu_nvhost_debug_dump_device(g->nvhost_dev);
596#endif
597}
598
599int gk20a_tegra_busy(struct device *dev)
600{
601#ifdef CONFIG_TEGRA_GK20A_NVHOST
602 struct gk20a_platform *platform = gk20a_get_platform(dev);
603 struct gk20a *g = platform->g;
604
605 if (g->nvhost_dev)
606 return nvgpu_nvhost_module_busy_ext(g->nvhost_dev);
607#endif
608 return 0;
609}
610
611void gk20a_tegra_idle(struct device *dev)
612{
613#ifdef CONFIG_TEGRA_GK20A_NVHOST
614 struct gk20a_platform *platform = gk20a_get_platform(dev);
615 struct gk20a *g = platform->g;
616
617 if (g->nvhost_dev)
618 nvgpu_nvhost_module_idle_ext(g->nvhost_dev);
619#endif
620}
621
622int gk20a_tegra_init_secure_alloc(struct gk20a_platform *platform)
623{
624 struct gk20a *g = platform->g;
625 struct secure_page_buffer *secure_buffer = &platform->secure_buffer;
626 DEFINE_DMA_ATTRS(attrs);
627 dma_addr_t iova;
628
629 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
630 return 0;
631
632 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs));
633 (void)dma_alloc_attrs(&tegra_vpr_dev, platform->secure_buffer_size, &iova,
634 GFP_KERNEL, __DMA_ATTR(attrs));
635 /* Some platforms disable VPR. In that case VPR allocations always
636 * fail. Just disable VPR usage in nvgpu in that case. */
637 if (dma_mapping_error(&tegra_vpr_dev, iova))
638 return 0;
639
640 secure_buffer->size = platform->secure_buffer_size;
641 secure_buffer->phys = iova;
642 secure_buffer->destroy = gk20a_tegra_secure_page_destroy;
643
644 g->ops.secure_alloc = gk20a_tegra_secure_alloc;
645 __nvgpu_set_enabled(g, NVGPU_SUPPORT_VPR, true);
646
647 return 0;
648}
649
650#ifdef CONFIG_COMMON_CLK
651static struct clk *gk20a_clk_get(struct gk20a *g)
652{
653 if (!g->clk.tegra_clk) {
654 struct clk *clk;
655 char clk_dev_id[32];
656 struct device *dev = dev_from_gk20a(g);
657
658 snprintf(clk_dev_id, 32, "tegra_%s", dev_name(dev));
659
660 clk = clk_get_sys(clk_dev_id, "gpu");
661 if (IS_ERR(clk)) {
662 nvgpu_err(g, "fail to get tegra gpu clk %s/gpu\n",
663 clk_dev_id);
664 return NULL;
665 }
666 g->clk.tegra_clk = clk;
667 }
668
669 return g->clk.tegra_clk;
670}
671
672static int gm20b_clk_prepare_ops(struct clk_hw *hw)
673{
674 struct clk_gk20a *clk = to_clk_gk20a(hw);
675 return gm20b_clk_prepare(clk);
676}
677
678static void gm20b_clk_unprepare_ops(struct clk_hw *hw)
679{
680 struct clk_gk20a *clk = to_clk_gk20a(hw);
681 gm20b_clk_unprepare(clk);
682}
683
684static int gm20b_clk_is_prepared_ops(struct clk_hw *hw)
685{
686 struct clk_gk20a *clk = to_clk_gk20a(hw);
687 return gm20b_clk_is_prepared(clk);
688}
689
690static unsigned long gm20b_recalc_rate_ops(struct clk_hw *hw, unsigned long parent_rate)
691{
692 struct clk_gk20a *clk = to_clk_gk20a(hw);
693 return gm20b_recalc_rate(clk, parent_rate);
694}
695
696static int gm20b_gpcclk_set_rate_ops(struct clk_hw *hw, unsigned long rate,
697 unsigned long parent_rate)
698{
699 struct clk_gk20a *clk = to_clk_gk20a(hw);
700 return gm20b_gpcclk_set_rate(clk, rate, parent_rate);
701}
702
703static long gm20b_round_rate_ops(struct clk_hw *hw, unsigned long rate,
704 unsigned long *parent_rate)
705{
706 struct clk_gk20a *clk = to_clk_gk20a(hw);
707 return gm20b_round_rate(clk, rate, parent_rate);
708}
709
710static const struct clk_ops gm20b_clk_ops = {
711 .prepare = gm20b_clk_prepare_ops,
712 .unprepare = gm20b_clk_unprepare_ops,
713 .is_prepared = gm20b_clk_is_prepared_ops,
714 .recalc_rate = gm20b_recalc_rate_ops,
715 .set_rate = gm20b_gpcclk_set_rate_ops,
716 .round_rate = gm20b_round_rate_ops,
717};
718
719static int gm20b_register_gpcclk(struct gk20a *g)
720{
721 const char *parent_name = "pllg_ref";
722 struct clk_gk20a *clk = &g->clk;
723 struct clk_init_data init;
724 struct clk *c;
725 int err = 0;
726
727 /* make sure the clock is available */
728 if (!gk20a_clk_get(g))
729 return -ENOSYS;
730
731 err = gm20b_init_clk_setup_sw(g);
732 if (err)
733 return err;
734
735 init.name = "gpcclk";
736 init.ops = &gm20b_clk_ops;
737 init.parent_names = &parent_name;
738 init.num_parents = 1;
739 init.flags = 0;
740
741 /* Data in .init is copied by clk_register(), so stack variable OK */
742 clk->hw.init = &init;
743 c = clk_register(dev_from_gk20a(g), &clk->hw);
744 if (IS_ERR(c)) {
745 nvgpu_err(g, "Failed to register GPCPLL clock");
746 return -EINVAL;
747 }
748
749 clk->g = g;
750 clk_register_clkdev(c, "gpcclk", "gpcclk");
751
752 return err;
753}
754#endif /* CONFIG_COMMON_CLK */
755
756static int gk20a_tegra_probe(struct device *dev)
757{
758 struct gk20a_platform *platform = dev_get_drvdata(dev);
759 struct device_node *np = dev->of_node;
760 bool joint_xpu_rail = false;
761 int ret;
762 struct gk20a *g = platform->g;
763
764#ifdef CONFIG_COMMON_CLK
765 /* DVFS is not guaranteed to be initialized at the time of probe on
766 * kernels with Common Clock Framework enabled.
767 */
768 if (!platform->gpu_rail) {
769 platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME);
770 if (!platform->gpu_rail) {
771 nvgpu_log_info(g, "deferring probe no gpu_rail");
772 return -EPROBE_DEFER;
773 }
774 }
775
776 if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) {
777 nvgpu_log_info(g, "deferring probe gpu_rail not ready");
778 return -EPROBE_DEFER;
779 }
780#endif
781
782#ifdef CONFIG_TEGRA_GK20A_NVHOST
783 ret = nvgpu_get_nvhost_dev(platform->g);
784 if (ret)
785 return ret;
786#endif
787
788#ifdef CONFIG_OF
789 joint_xpu_rail = of_property_read_bool(of_chosen,
790 "nvidia,tegra-joint_xpu_rail");
791#endif
792
793 if (joint_xpu_rail) {
794 nvgpu_log_info(g, "XPU rails are joint\n");
795 platform->g->can_railgate = false;
796 }
797
798 platform->g->clk.gpc_pll.id = GK20A_GPC_PLL;
799 if (tegra_get_chip_id() == TEGRA210) {
800 /* WAR for bug 1547668: Disable railgating and scaling
801 irrespective of platform data if the rework was not made. */
802 np = of_find_node_by_path("/gpu-dvfs-rework");
803 if (!(np && of_device_is_available(np))) {
804 platform->devfreq_governor = "";
805 dev_warn(dev, "board does not support scaling");
806 }
807 platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_B1;
808 if (tegra_chip_get_revision() > TEGRA210_REVISION_A04p)
809 platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_C1;
810 }
811
812 if (tegra_get_chip_id() == TEGRA132)
813 platform->soc_name = "tegra13x";
814
815 gk20a_tegra_get_clocks(dev);
816 nvgpu_linux_init_clk_support(platform->g);
817 ret = gk20a_tegra_init_secure_alloc(platform);
818 if (ret)
819 return ret;
820
821 if (platform->clk_register) {
822 ret = platform->clk_register(platform->g);
823 if (ret)
824 return ret;
825 }
826
827 return 0;
828}
829
830static int gk20a_tegra_late_probe(struct device *dev)
831{
832 return 0;
833}
834
835static int gk20a_tegra_remove(struct device *dev)
836{
837 /* deinitialise tegra specific scaling quirks */
838 gk20a_tegra_scale_exit(dev);
839
840#ifdef CONFIG_TEGRA_GK20A_NVHOST
841 nvgpu_free_nvhost_dev(get_gk20a(dev));
842#endif
843
844 return 0;
845}
846
847static int gk20a_tegra_suspend(struct device *dev)
848{
849 tegra_edp_notify_gpu_load(0, 0);
850 return 0;
851}
852
853#if defined(CONFIG_COMMON_CLK)
854static long gk20a_round_clk_rate(struct device *dev, unsigned long rate)
855{
856 struct gk20a_platform *platform = gk20a_get_platform(dev);
857 struct gk20a *g = platform->g;
858
859 /* make sure the clock is available */
860 if (!gk20a_clk_get(g))
861 return rate;
862
863 return clk_round_rate(clk_get_parent(g->clk.tegra_clk), rate);
864}
865
866static int gk20a_clk_get_freqs(struct device *dev,
867 unsigned long **freqs, int *num_freqs)
868{
869 struct gk20a_platform *platform = gk20a_get_platform(dev);
870 struct gk20a *g = platform->g;
871
872 /* make sure the clock is available */
873 if (!gk20a_clk_get(g))
874 return -ENOSYS;
875
876 return tegra_dvfs_get_freqs(clk_get_parent(g->clk.tegra_clk),
877 freqs, num_freqs);
878}
879#endif
880
881struct gk20a_platform gm20b_tegra_platform = {
882 .has_syncpoints = true,
883 .aggressive_sync_destroy_thresh = 64,
884
885 /* power management configuration */
886 .railgate_delay_init = 500,
887 .can_railgate_init = true,
888 .can_elpg_init = true,
889 .enable_slcg = true,
890 .enable_blcg = true,
891 .enable_elcg = true,
892 .can_slcg = true,
893 .can_blcg = true,
894 .can_elcg = true,
895 .enable_elpg = true,
896 .enable_aelpg = true,
897 .enable_perfmon = true,
898 .ptimer_src_freq = 19200000,
899
900 .force_reset_in_do_idle = false,
901
902 .ch_wdt_timeout_ms = 5000,
903
904 .probe = gk20a_tegra_probe,
905 .late_probe = gk20a_tegra_late_probe,
906 .remove = gk20a_tegra_remove,
907 /* power management callbacks */
908 .suspend = gk20a_tegra_suspend,
909
910#if defined(CONFIG_TEGRA_DVFS)
911 .railgate = gm20b_tegra_railgate,
912 .unrailgate = gm20b_tegra_unrailgate,
913 .is_railgated = gk20a_tegra_is_railgated,
914#endif
915
916 .busy = gk20a_tegra_busy,
917 .idle = gk20a_tegra_idle,
918
919#if defined(CONFIG_RESET_CONTROLLER) && defined(CONFIG_COMMON_CLK)
920 .reset_assert = gm20b_tegra_reset_assert,
921 .reset_deassert = gm20b_tegra_reset_deassert,
922#else
923 .reset_assert = gk20a_tegra_reset_assert,
924 .reset_deassert = gk20a_tegra_reset_deassert,
925#endif
926
927#if defined(CONFIG_COMMON_CLK)
928 .clk_round_rate = gk20a_round_clk_rate,
929 .get_clk_freqs = gk20a_clk_get_freqs,
930#endif
931
932#ifdef CONFIG_COMMON_CLK
933 .clk_register = gm20b_register_gpcclk,
934#endif
935
936 /* frequency scaling configuration */
937 .initscale = gk20a_tegra_scale_init,
938 .prescale = gk20a_tegra_prescale,
939#ifdef CONFIG_TEGRA_BWMGR
940 .postscale = gm20b_tegra_postscale,
941#endif
942 .devfreq_governor = "nvhost_podgov",
943 .qos_notify = gk20a_scale_qos_notify,
944
945 .dump_platform_dependencies = gk20a_tegra_debug_dump,
946
947#ifdef CONFIG_NVGPU_SUPPORT_CDE
948 .has_cde = true,
949#endif
950
951 .soc_name = "tegra21x",
952
953 .unified_memory = true,
954 .dma_mask = DMA_BIT_MASK(34),
955
956 .secure_buffer_size = 335872,
957};