diff options
Diffstat (limited to 'include/os/linux/platform_gk20a_tegra.c')
-rw-r--r-- | include/os/linux/platform_gk20a_tegra.c | 966 |
1 files changed, 0 insertions, 966 deletions
diff --git a/include/os/linux/platform_gk20a_tegra.c b/include/os/linux/platform_gk20a_tegra.c deleted file mode 100644 index c39e4f0..0000000 --- a/include/os/linux/platform_gk20a_tegra.c +++ /dev/null | |||
@@ -1,966 +0,0 @@ | |||
1 | /* | ||
2 | * GK20A Tegra Platform Interface | ||
3 | * | ||
4 | * Copyright (c) 2014-2019, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/clkdev.h> | ||
17 | #include <linux/of_platform.h> | ||
18 | #include <linux/debugfs.h> | ||
19 | #include <linux/platform_data/tegra_edp.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <uapi/linux/nvgpu.h> | ||
22 | #include <linux/dma-buf.h> | ||
23 | #include <linux/dma-attrs.h> | ||
24 | #include <linux/nvmap.h> | ||
25 | #include <linux/reset.h> | ||
26 | #if defined(CONFIG_TEGRA_DVFS) | ||
27 | #include <linux/tegra_soctherm.h> | ||
28 | #endif | ||
29 | #include <linux/platform/tegra/common.h> | ||
30 | #include <linux/platform/tegra/mc.h> | ||
31 | #include <linux/clk/tegra.h> | ||
32 | #if defined(CONFIG_COMMON_CLK) | ||
33 | #include <soc/tegra/tegra-dvfs.h> | ||
34 | #endif | ||
35 | #ifdef CONFIG_TEGRA_BWMGR | ||
36 | #include <linux/platform/tegra/emc_bwmgr.h> | ||
37 | #endif | ||
38 | |||
39 | #include <linux/platform/tegra/tegra_emc.h> | ||
40 | #include <soc/tegra/chip-id.h> | ||
41 | |||
42 | #include <nvgpu/kmem.h> | ||
43 | #include <nvgpu/bug.h> | ||
44 | #include <nvgpu/enabled.h> | ||
45 | #include <nvgpu/gk20a.h> | ||
46 | #include <nvgpu/nvhost.h> | ||
47 | |||
48 | #include <nvgpu/linux/dma.h> | ||
49 | |||
50 | #include "gm20b/clk_gm20b.h" | ||
51 | |||
52 | #include "scale.h" | ||
53 | #include "platform_gk20a.h" | ||
54 | #include "clk.h" | ||
55 | #include "os_linux.h" | ||
56 | |||
57 | #include "../../../arch/arm/mach-tegra/iomap.h" | ||
58 | #include <soc/tegra/pmc.h> | ||
59 | |||
60 | #define TEGRA_GK20A_BW_PER_FREQ 32 | ||
61 | #define TEGRA_GM20B_BW_PER_FREQ 64 | ||
62 | #define TEGRA_DDR3_BW_PER_FREQ 16 | ||
63 | #define TEGRA_DDR4_BW_PER_FREQ 16 | ||
64 | #define MC_CLIENT_GPU 34 | ||
65 | #define PMC_GPU_RG_CNTRL_0 0x2d4 | ||
66 | |||
67 | #ifdef CONFIG_COMMON_CLK | ||
68 | #define GPU_RAIL_NAME "vdd-gpu" | ||
69 | #else | ||
70 | #define GPU_RAIL_NAME "vdd_gpu" | ||
71 | #endif | ||
72 | |||
73 | extern struct device tegra_vpr_dev; | ||
74 | |||
75 | #ifdef CONFIG_TEGRA_BWMGR | ||
76 | struct gk20a_emc_params { | ||
77 | unsigned long bw_ratio; | ||
78 | unsigned long freq_last_set; | ||
79 | struct tegra_bwmgr_client *bwmgr_cl; | ||
80 | }; | ||
81 | #else | ||
82 | struct gk20a_emc_params { | ||
83 | unsigned long bw_ratio; | ||
84 | unsigned long freq_last_set; | ||
85 | }; | ||
86 | #endif | ||
87 | |||
88 | #define MHZ_TO_HZ(x) ((x) * 1000000) | ||
89 | #define HZ_TO_MHZ(x) ((x) / 1000000) | ||
90 | |||
91 | static void gk20a_tegra_secure_page_destroy(struct gk20a *g, | ||
92 | struct secure_page_buffer *secure_buffer) | ||
93 | { | ||
94 | DEFINE_DMA_ATTRS(attrs); | ||
95 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs)); | ||
96 | dma_free_attrs(&tegra_vpr_dev, secure_buffer->size, | ||
97 | (void *)(uintptr_t)secure_buffer->phys, | ||
98 | secure_buffer->phys, __DMA_ATTR(attrs)); | ||
99 | |||
100 | secure_buffer->destroy = NULL; | ||
101 | } | ||
102 | |||
103 | static int gk20a_tegra_secure_alloc(struct gk20a *g, | ||
104 | struct gr_ctx_buffer_desc *desc, | ||
105 | size_t size) | ||
106 | { | ||
107 | struct device *dev = dev_from_gk20a(g); | ||
108 | struct gk20a_platform *platform = dev_get_drvdata(dev); | ||
109 | struct secure_page_buffer *secure_buffer = &platform->secure_buffer; | ||
110 | dma_addr_t phys; | ||
111 | struct sg_table *sgt; | ||
112 | struct page *page; | ||
113 | int err = 0; | ||
114 | size_t aligned_size = PAGE_ALIGN(size); | ||
115 | |||
116 | if (nvgpu_mem_is_valid(&desc->mem)) | ||
117 | return 0; | ||
118 | |||
119 | /* We ran out of preallocated memory */ | ||
120 | if (secure_buffer->used + aligned_size > secure_buffer->size) { | ||
121 | nvgpu_err(platform->g, "failed to alloc %zu bytes of VPR, %zu/%zu used", | ||
122 | size, secure_buffer->used, secure_buffer->size); | ||
123 | return -ENOMEM; | ||
124 | } | ||
125 | |||
126 | phys = secure_buffer->phys + secure_buffer->used; | ||
127 | |||
128 | sgt = nvgpu_kzalloc(platform->g, sizeof(*sgt)); | ||
129 | if (!sgt) { | ||
130 | nvgpu_err(platform->g, "failed to allocate memory"); | ||
131 | return -ENOMEM; | ||
132 | } | ||
133 | err = sg_alloc_table(sgt, 1, GFP_KERNEL); | ||
134 | if (err) { | ||
135 | nvgpu_err(platform->g, "failed to allocate sg_table"); | ||
136 | goto fail_sgt; | ||
137 | } | ||
138 | page = phys_to_page(phys); | ||
139 | sg_set_page(sgt->sgl, page, size, 0); | ||
140 | /* This bypasses SMMU for VPR during gmmu_map. */ | ||
141 | sg_dma_address(sgt->sgl) = 0; | ||
142 | |||
143 | desc->destroy = NULL; | ||
144 | |||
145 | desc->mem.priv.sgt = sgt; | ||
146 | desc->mem.size = size; | ||
147 | desc->mem.aperture = APERTURE_SYSMEM; | ||
148 | |||
149 | secure_buffer->used += aligned_size; | ||
150 | |||
151 | return err; | ||
152 | |||
153 | fail_sgt: | ||
154 | nvgpu_kfree(platform->g, sgt); | ||
155 | return err; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * gk20a_tegra_get_emc_rate() | ||
160 | * | ||
161 | * This function returns the minimum emc clock based on gpu frequency | ||
162 | */ | ||
163 | |||
164 | static unsigned long gk20a_tegra_get_emc_rate(struct gk20a *g, | ||
165 | struct gk20a_emc_params *emc_params) | ||
166 | { | ||
167 | unsigned long gpu_freq, gpu_fmax_at_vmin; | ||
168 | unsigned long emc_rate, emc_scale; | ||
169 | |||
170 | gpu_freq = clk_get_rate(g->clk.tegra_clk); | ||
171 | gpu_fmax_at_vmin = tegra_dvfs_get_fmax_at_vmin_safe_t( | ||
172 | clk_get_parent(g->clk.tegra_clk)); | ||
173 | |||
174 | /* When scaling emc, account for the gpu load when the | ||
175 | * gpu frequency is less than or equal to fmax@vmin. */ | ||
176 | if (gpu_freq <= gpu_fmax_at_vmin) | ||
177 | emc_scale = min(g->pmu.load_avg, g->emc3d_ratio); | ||
178 | else | ||
179 | emc_scale = g->emc3d_ratio; | ||
180 | |||
181 | emc_rate = | ||
182 | (HZ_TO_MHZ(gpu_freq) * emc_params->bw_ratio * emc_scale) / 1000; | ||
183 | |||
184 | return MHZ_TO_HZ(emc_rate); | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * gk20a_tegra_prescale(profile, freq) | ||
189 | * | ||
190 | * This function informs EDP about changed constraints. | ||
191 | */ | ||
192 | |||
193 | static void gk20a_tegra_prescale(struct device *dev) | ||
194 | { | ||
195 | struct gk20a *g = get_gk20a(dev); | ||
196 | u32 avg = 0; | ||
197 | |||
198 | nvgpu_pmu_load_norm(g, &avg); | ||
199 | tegra_edp_notify_gpu_load(avg, clk_get_rate(g->clk.tegra_clk)); | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * gk20a_tegra_calibrate_emc() | ||
204 | * | ||
205 | */ | ||
206 | |||
207 | static void gk20a_tegra_calibrate_emc(struct device *dev, | ||
208 | struct gk20a_emc_params *emc_params) | ||
209 | { | ||
210 | enum tegra_chipid cid = tegra_get_chip_id(); | ||
211 | long gpu_bw, emc_bw; | ||
212 | |||
213 | /* store gpu bw based on soc */ | ||
214 | switch (cid) { | ||
215 | case TEGRA210: | ||
216 | gpu_bw = TEGRA_GM20B_BW_PER_FREQ; | ||
217 | break; | ||
218 | case TEGRA124: | ||
219 | case TEGRA132: | ||
220 | gpu_bw = TEGRA_GK20A_BW_PER_FREQ; | ||
221 | break; | ||
222 | default: | ||
223 | gpu_bw = 0; | ||
224 | break; | ||
225 | } | ||
226 | |||
227 | /* TODO detect DDR type. | ||
228 | * Okay for now since DDR3 and DDR4 have the same BW ratio */ | ||
229 | emc_bw = TEGRA_DDR3_BW_PER_FREQ; | ||
230 | |||
231 | /* Calculate the bandwidth ratio of gpu_freq <-> emc_freq | ||
232 | * NOTE the ratio must come out as an integer */ | ||
233 | emc_params->bw_ratio = (gpu_bw / emc_bw); | ||
234 | } | ||
235 | |||
236 | #ifdef CONFIG_TEGRA_BWMGR | ||
237 | #ifdef CONFIG_TEGRA_DVFS | ||
238 | static void gm20b_bwmgr_set_rate(struct gk20a_platform *platform, bool enb) | ||
239 | { | ||
240 | struct gk20a_scale_profile *profile = platform->g->scale_profile; | ||
241 | struct gk20a_emc_params *params; | ||
242 | unsigned long rate; | ||
243 | |||
244 | if (!profile || !profile->private_data) | ||
245 | return; | ||
246 | |||
247 | params = (struct gk20a_emc_params *)profile->private_data; | ||
248 | rate = (enb) ? params->freq_last_set : 0; | ||
249 | tegra_bwmgr_set_emc(params->bwmgr_cl, rate, TEGRA_BWMGR_SET_EMC_FLOOR); | ||
250 | } | ||
251 | #endif | ||
252 | |||
253 | static void gm20b_tegra_postscale(struct device *dev, unsigned long freq) | ||
254 | { | ||
255 | struct gk20a_platform *platform = dev_get_drvdata(dev); | ||
256 | struct gk20a_scale_profile *profile = platform->g->scale_profile; | ||
257 | struct gk20a_emc_params *emc_params; | ||
258 | unsigned long emc_rate; | ||
259 | |||
260 | if (!profile || !profile->private_data) | ||
261 | return; | ||
262 | |||
263 | emc_params = profile->private_data; | ||
264 | emc_rate = gk20a_tegra_get_emc_rate(get_gk20a(dev), emc_params); | ||
265 | |||
266 | if (emc_rate > tegra_bwmgr_get_max_emc_rate()) | ||
267 | emc_rate = tegra_bwmgr_get_max_emc_rate(); | ||
268 | |||
269 | emc_params->freq_last_set = emc_rate; | ||
270 | if (platform->is_railgated && platform->is_railgated(dev)) | ||
271 | return; | ||
272 | |||
273 | tegra_bwmgr_set_emc(emc_params->bwmgr_cl, emc_rate, | ||
274 | TEGRA_BWMGR_SET_EMC_FLOOR); | ||
275 | |||
276 | } | ||
277 | |||
278 | #endif | ||
279 | |||
280 | #if defined(CONFIG_TEGRA_DVFS) | ||
281 | /* | ||
282 | * gk20a_tegra_is_railgated() | ||
283 | * | ||
284 | * Check status of gk20a power rail | ||
285 | */ | ||
286 | |||
287 | static bool gk20a_tegra_is_railgated(struct device *dev) | ||
288 | { | ||
289 | struct gk20a *g = get_gk20a(dev); | ||
290 | struct gk20a_platform *platform = dev_get_drvdata(dev); | ||
291 | bool ret = false; | ||
292 | |||
293 | if (!nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) | ||
294 | ret = !tegra_dvfs_is_rail_up(platform->gpu_rail); | ||
295 | |||
296 | return ret; | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * gm20b_tegra_railgate() | ||
301 | * | ||
302 | * Gate (disable) gm20b power rail | ||
303 | */ | ||
304 | |||
305 | static int gm20b_tegra_railgate(struct device *dev) | ||
306 | { | ||
307 | struct gk20a *g = get_gk20a(dev); | ||
308 | struct gk20a_platform *platform = dev_get_drvdata(dev); | ||
309 | int ret = 0; | ||
310 | |||
311 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL) || | ||
312 | !tegra_dvfs_is_rail_up(platform->gpu_rail)) | ||
313 | return 0; | ||
314 | |||
315 | tegra_mc_flush(MC_CLIENT_GPU); | ||
316 | |||
317 | udelay(10); | ||
318 | |||
319 | /* enable clamp */ | ||
320 | tegra_pmc_writel_relaxed(0x1, PMC_GPU_RG_CNTRL_0); | ||
321 | tegra_pmc_readl(PMC_GPU_RG_CNTRL_0); | ||
322 | |||
323 | udelay(10); | ||
324 | |||
325 | platform->reset_assert(dev); | ||
326 | |||
327 | udelay(10); | ||
328 | |||
329 | /* | ||
330 | * GPCPLL is already disabled before entering this function; reference | ||
331 | * clocks are enabled until now - disable them just before rail gating | ||
332 | */ | ||
333 | clk_disable_unprepare(platform->clk_reset); | ||
334 | clk_disable_unprepare(platform->clk[0]); | ||
335 | clk_disable_unprepare(platform->clk[1]); | ||
336 | if (platform->clk[3]) | ||
337 | clk_disable_unprepare(platform->clk[3]); | ||
338 | |||
339 | udelay(10); | ||
340 | |||
341 | tegra_soctherm_gpu_tsens_invalidate(1); | ||
342 | |||
343 | if (tegra_dvfs_is_rail_up(platform->gpu_rail)) { | ||
344 | ret = tegra_dvfs_rail_power_down(platform->gpu_rail); | ||
345 | if (ret) | ||
346 | goto err_power_off; | ||
347 | } else | ||
348 | pr_info("No GPU regulator?\n"); | ||
349 | |||
350 | #ifdef CONFIG_TEGRA_BWMGR | ||
351 | gm20b_bwmgr_set_rate(platform, false); | ||
352 | #endif | ||
353 | |||
354 | return 0; | ||
355 | |||
356 | err_power_off: | ||
357 | nvgpu_err(platform->g, "Could not railgate GPU"); | ||
358 | return ret; | ||
359 | } | ||
360 | |||
361 | |||
362 | /* | ||
363 | * gm20b_tegra_unrailgate() | ||
364 | * | ||
365 | * Ungate (enable) gm20b power rail | ||
366 | */ | ||
367 | |||
368 | static int gm20b_tegra_unrailgate(struct device *dev) | ||
369 | { | ||
370 | struct gk20a_platform *platform = dev_get_drvdata(dev); | ||
371 | struct gk20a *g = platform->g; | ||
372 | int ret = 0; | ||
373 | bool first = false; | ||
374 | |||
375 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) | ||
376 | return 0; | ||
377 | |||
378 | ret = tegra_dvfs_rail_power_up(platform->gpu_rail); | ||
379 | if (ret) | ||
380 | return ret; | ||
381 | |||
382 | #ifdef CONFIG_TEGRA_BWMGR | ||
383 | gm20b_bwmgr_set_rate(platform, true); | ||
384 | #endif | ||
385 | |||
386 | tegra_soctherm_gpu_tsens_invalidate(0); | ||
387 | |||
388 | if (!platform->clk_reset) { | ||
389 | platform->clk_reset = clk_get(dev, "gpu_gate"); | ||
390 | if (IS_ERR(platform->clk_reset)) { | ||
391 | nvgpu_err(g, "fail to get gpu reset clk"); | ||
392 | goto err_clk_on; | ||
393 | } | ||
394 | } | ||
395 | |||
396 | if (!first) { | ||
397 | ret = clk_prepare_enable(platform->clk_reset); | ||
398 | if (ret) { | ||
399 | nvgpu_err(g, "could not turn on gpu_gate"); | ||
400 | goto err_clk_on; | ||
401 | } | ||
402 | |||
403 | ret = clk_prepare_enable(platform->clk[0]); | ||
404 | if (ret) { | ||
405 | nvgpu_err(g, "could not turn on gpu pll"); | ||
406 | goto err_clk_on; | ||
407 | } | ||
408 | ret = clk_prepare_enable(platform->clk[1]); | ||
409 | if (ret) { | ||
410 | nvgpu_err(g, "could not turn on pwr clock"); | ||
411 | goto err_clk_on; | ||
412 | } | ||
413 | |||
414 | if (platform->clk[3]) { | ||
415 | ret = clk_prepare_enable(platform->clk[3]); | ||
416 | if (ret) { | ||
417 | nvgpu_err(g, "could not turn on fuse clock"); | ||
418 | goto err_clk_on; | ||
419 | } | ||
420 | } | ||
421 | } | ||
422 | |||
423 | udelay(10); | ||
424 | |||
425 | platform->reset_assert(dev); | ||
426 | |||
427 | udelay(10); | ||
428 | |||
429 | tegra_pmc_writel_relaxed(0, PMC_GPU_RG_CNTRL_0); | ||
430 | tegra_pmc_readl(PMC_GPU_RG_CNTRL_0); | ||
431 | |||
432 | udelay(10); | ||
433 | |||
434 | clk_disable(platform->clk_reset); | ||
435 | platform->reset_deassert(dev); | ||
436 | clk_enable(platform->clk_reset); | ||
437 | |||
438 | /* Flush MC after boot/railgate/SC7 */ | ||
439 | tegra_mc_flush(MC_CLIENT_GPU); | ||
440 | |||
441 | udelay(10); | ||
442 | |||
443 | tegra_mc_flush_done(MC_CLIENT_GPU); | ||
444 | |||
445 | udelay(10); | ||
446 | |||
447 | return 0; | ||
448 | |||
449 | err_clk_on: | ||
450 | tegra_dvfs_rail_power_down(platform->gpu_rail); | ||
451 | |||
452 | return ret; | ||
453 | } | ||
454 | #endif | ||
455 | |||
456 | |||
457 | static struct { | ||
458 | char *name; | ||
459 | unsigned long default_rate; | ||
460 | } tegra_gk20a_clocks[] = { | ||
461 | {"gpu_ref", UINT_MAX}, | ||
462 | {"pll_p_out5", 204000000}, | ||
463 | {"emc", UINT_MAX}, | ||
464 | {"fuse", UINT_MAX}, | ||
465 | }; | ||
466 | |||
467 | |||
468 | |||
469 | /* | ||
470 | * gk20a_tegra_get_clocks() | ||
471 | * | ||
472 | * This function finds clocks in tegra platform and populates | ||
473 | * the clock information to gk20a platform data. | ||
474 | */ | ||
475 | |||
476 | static int gk20a_tegra_get_clocks(struct device *dev) | ||
477 | { | ||
478 | struct gk20a_platform *platform = dev_get_drvdata(dev); | ||
479 | char devname[16]; | ||
480 | unsigned int i; | ||
481 | int ret = 0; | ||
482 | |||
483 | BUG_ON(GK20A_CLKS_MAX < ARRAY_SIZE(tegra_gk20a_clocks)); | ||
484 | |||
485 | snprintf(devname, sizeof(devname), "tegra_%s", dev_name(dev)); | ||
486 | |||
487 | platform->num_clks = 0; | ||
488 | for (i = 0; i < ARRAY_SIZE(tegra_gk20a_clocks); i++) { | ||
489 | long rate = tegra_gk20a_clocks[i].default_rate; | ||
490 | struct clk *c; | ||
491 | |||
492 | c = clk_get_sys(devname, tegra_gk20a_clocks[i].name); | ||
493 | if (IS_ERR(c)) { | ||
494 | ret = PTR_ERR(c); | ||
495 | goto err_get_clock; | ||
496 | } | ||
497 | rate = clk_round_rate(c, rate); | ||
498 | clk_set_rate(c, rate); | ||
499 | platform->clk[i] = c; | ||
500 | } | ||
501 | platform->num_clks = i; | ||
502 | |||
503 | return 0; | ||
504 | |||
505 | err_get_clock: | ||
506 | |||
507 | while (i--) | ||
508 | clk_put(platform->clk[i]); | ||
509 | return ret; | ||
510 | } | ||
511 | |||
512 | #if defined(CONFIG_RESET_CONTROLLER) && defined(CONFIG_COMMON_CLK) | ||
513 | static int gm20b_tegra_reset_assert(struct device *dev) | ||
514 | { | ||
515 | struct gk20a_platform *platform = gk20a_get_platform(dev); | ||
516 | |||
517 | if (!platform->reset_control) { | ||
518 | WARN(1, "Reset control not initialized\n"); | ||
519 | return -ENOSYS; | ||
520 | } | ||
521 | |||
522 | return reset_control_assert(platform->reset_control); | ||
523 | } | ||
524 | |||
525 | static int gm20b_tegra_reset_deassert(struct device *dev) | ||
526 | { | ||
527 | struct gk20a_platform *platform = gk20a_get_platform(dev); | ||
528 | |||
529 | if (!platform->reset_control) { | ||
530 | WARN(1, "Reset control not initialized\n"); | ||
531 | return -ENOSYS; | ||
532 | } | ||
533 | |||
534 | return reset_control_deassert(platform->reset_control); | ||
535 | } | ||
536 | #endif | ||
537 | |||
538 | static void gk20a_tegra_scale_init(struct device *dev) | ||
539 | { | ||
540 | struct gk20a_platform *platform = gk20a_get_platform(dev); | ||
541 | struct gk20a_scale_profile *profile = platform->g->scale_profile; | ||
542 | struct gk20a_emc_params *emc_params; | ||
543 | struct gk20a *g = platform->g; | ||
544 | |||
545 | if (!profile) | ||
546 | return; | ||
547 | |||
548 | if (profile->private_data) | ||
549 | return; | ||
550 | |||
551 | emc_params = nvgpu_kzalloc(platform->g, sizeof(*emc_params)); | ||
552 | if (!emc_params) | ||
553 | return; | ||
554 | |||
555 | emc_params->freq_last_set = -1; | ||
556 | gk20a_tegra_calibrate_emc(dev, emc_params); | ||
557 | |||
558 | #ifdef CONFIG_TEGRA_BWMGR | ||
559 | emc_params->bwmgr_cl = tegra_bwmgr_register(TEGRA_BWMGR_CLIENT_GPU); | ||
560 | if (!emc_params->bwmgr_cl) { | ||
561 | nvgpu_log_info(g, "%s Missing GPU BWMGR client\n", __func__); | ||
562 | return; | ||
563 | } | ||
564 | #endif | ||
565 | |||
566 | profile->private_data = emc_params; | ||
567 | } | ||
568 | |||
569 | static void gk20a_tegra_scale_exit(struct device *dev) | ||
570 | { | ||
571 | struct gk20a_platform *platform = dev_get_drvdata(dev); | ||
572 | struct gk20a_scale_profile *profile = platform->g->scale_profile; | ||
573 | struct gk20a_emc_params *emc_params; | ||
574 | |||
575 | if (!profile) | ||
576 | return; | ||
577 | |||
578 | emc_params = profile->private_data; | ||
579 | #ifdef CONFIG_TEGRA_BWMGR | ||
580 | tegra_bwmgr_unregister(emc_params->bwmgr_cl); | ||
581 | #endif | ||
582 | |||
583 | nvgpu_kfree(platform->g, profile->private_data); | ||
584 | } | ||
585 | |||
586 | void gk20a_tegra_debug_dump(struct device *dev) | ||
587 | { | ||
588 | #ifdef CONFIG_TEGRA_GK20A_NVHOST | ||
589 | struct gk20a_platform *platform = gk20a_get_platform(dev); | ||
590 | struct gk20a *g = platform->g; | ||
591 | |||
592 | if (g->nvhost_dev) | ||
593 | nvgpu_nvhost_debug_dump_device(g->nvhost_dev); | ||
594 | #endif | ||
595 | } | ||
596 | |||
597 | int gk20a_tegra_busy(struct device *dev) | ||
598 | { | ||
599 | #ifdef CONFIG_TEGRA_GK20A_NVHOST | ||
600 | struct gk20a_platform *platform = gk20a_get_platform(dev); | ||
601 | struct gk20a *g = platform->g; | ||
602 | |||
603 | if (g->nvhost_dev) | ||
604 | return nvgpu_nvhost_module_busy_ext(g->nvhost_dev); | ||
605 | #endif | ||
606 | return 0; | ||
607 | } | ||
608 | |||
609 | void gk20a_tegra_idle(struct device *dev) | ||
610 | { | ||
611 | #ifdef CONFIG_TEGRA_GK20A_NVHOST | ||
612 | struct gk20a_platform *platform = gk20a_get_platform(dev); | ||
613 | struct gk20a *g = platform->g; | ||
614 | |||
615 | if (g->nvhost_dev) | ||
616 | nvgpu_nvhost_module_idle_ext(g->nvhost_dev); | ||
617 | #endif | ||
618 | } | ||
619 | |||
620 | int gk20a_tegra_init_secure_alloc(struct gk20a_platform *platform) | ||
621 | { | ||
622 | struct gk20a *g = platform->g; | ||
623 | struct secure_page_buffer *secure_buffer = &platform->secure_buffer; | ||
624 | DEFINE_DMA_ATTRS(attrs); | ||
625 | dma_addr_t iova; | ||
626 | |||
627 | if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) | ||
628 | return 0; | ||
629 | |||
630 | dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, __DMA_ATTR(attrs)); | ||
631 | (void)dma_alloc_attrs(&tegra_vpr_dev, platform->secure_buffer_size, &iova, | ||
632 | GFP_KERNEL, __DMA_ATTR(attrs)); | ||
633 | /* Some platforms disable VPR. In that case VPR allocations always | ||
634 | * fail. Just disable VPR usage in nvgpu in that case. */ | ||
635 | if (dma_mapping_error(&tegra_vpr_dev, iova)) | ||
636 | return 0; | ||
637 | |||
638 | secure_buffer->size = platform->secure_buffer_size; | ||
639 | secure_buffer->phys = iova; | ||
640 | secure_buffer->destroy = gk20a_tegra_secure_page_destroy; | ||
641 | |||
642 | g->ops.secure_alloc = gk20a_tegra_secure_alloc; | ||
643 | __nvgpu_set_enabled(g, NVGPU_SUPPORT_VPR, true); | ||
644 | |||
645 | return 0; | ||
646 | } | ||
647 | |||
648 | #ifdef CONFIG_COMMON_CLK | ||
649 | static struct clk *gk20a_clk_get(struct gk20a *g) | ||
650 | { | ||
651 | if (!g->clk.tegra_clk) { | ||
652 | struct clk *clk, *clk_parent; | ||
653 | char clk_dev_id[32]; | ||
654 | struct device *dev = dev_from_gk20a(g); | ||
655 | |||
656 | snprintf(clk_dev_id, 32, "tegra_%s", dev_name(dev)); | ||
657 | |||
658 | clk = clk_get_sys(clk_dev_id, "gpu"); | ||
659 | if (IS_ERR(clk)) { | ||
660 | nvgpu_err(g, "fail to get tegra gpu clk %s/gpu\n", | ||
661 | clk_dev_id); | ||
662 | return NULL; | ||
663 | } | ||
664 | |||
665 | clk_parent = clk_get_parent(clk); | ||
666 | if (IS_ERR_OR_NULL(clk_parent)) { | ||
667 | nvgpu_err(g, "fail to get tegra gpu clk parent%s/gpu\n", | ||
668 | clk_dev_id); | ||
669 | return NULL; | ||
670 | } | ||
671 | |||
672 | g->clk.tegra_clk = clk; | ||
673 | g->clk.tegra_clk_parent = clk_parent; | ||
674 | } | ||
675 | |||
676 | return g->clk.tegra_clk; | ||
677 | } | ||
678 | |||
679 | static int gm20b_clk_prepare_ops(struct clk_hw *hw) | ||
680 | { | ||
681 | struct clk_gk20a *clk = to_clk_gk20a(hw); | ||
682 | return gm20b_clk_prepare(clk); | ||
683 | } | ||
684 | |||
685 | static void gm20b_clk_unprepare_ops(struct clk_hw *hw) | ||
686 | { | ||
687 | struct clk_gk20a *clk = to_clk_gk20a(hw); | ||
688 | gm20b_clk_unprepare(clk); | ||
689 | } | ||
690 | |||
691 | static int gm20b_clk_is_prepared_ops(struct clk_hw *hw) | ||
692 | { | ||
693 | struct clk_gk20a *clk = to_clk_gk20a(hw); | ||
694 | return gm20b_clk_is_prepared(clk); | ||
695 | } | ||
696 | |||
697 | static unsigned long gm20b_recalc_rate_ops(struct clk_hw *hw, unsigned long parent_rate) | ||
698 | { | ||
699 | struct clk_gk20a *clk = to_clk_gk20a(hw); | ||
700 | return gm20b_recalc_rate(clk, parent_rate); | ||
701 | } | ||
702 | |||
703 | static int gm20b_gpcclk_set_rate_ops(struct clk_hw *hw, unsigned long rate, | ||
704 | unsigned long parent_rate) | ||
705 | { | ||
706 | struct clk_gk20a *clk = to_clk_gk20a(hw); | ||
707 | return gm20b_gpcclk_set_rate(clk, rate, parent_rate); | ||
708 | } | ||
709 | |||
710 | static long gm20b_round_rate_ops(struct clk_hw *hw, unsigned long rate, | ||
711 | unsigned long *parent_rate) | ||
712 | { | ||
713 | struct clk_gk20a *clk = to_clk_gk20a(hw); | ||
714 | return gm20b_round_rate(clk, rate, parent_rate); | ||
715 | } | ||
716 | |||
717 | static const struct clk_ops gm20b_clk_ops = { | ||
718 | .prepare = gm20b_clk_prepare_ops, | ||
719 | .unprepare = gm20b_clk_unprepare_ops, | ||
720 | .is_prepared = gm20b_clk_is_prepared_ops, | ||
721 | .recalc_rate = gm20b_recalc_rate_ops, | ||
722 | .set_rate = gm20b_gpcclk_set_rate_ops, | ||
723 | .round_rate = gm20b_round_rate_ops, | ||
724 | }; | ||
725 | |||
726 | static int gm20b_register_gpcclk(struct gk20a *g) | ||
727 | { | ||
728 | const char *parent_name = "pllg_ref"; | ||
729 | struct clk_gk20a *clk = &g->clk; | ||
730 | struct clk_init_data init; | ||
731 | struct clk *c; | ||
732 | int err = 0; | ||
733 | |||
734 | /* make sure the clock is available */ | ||
735 | if (!gk20a_clk_get(g)) | ||
736 | return -ENOSYS; | ||
737 | |||
738 | err = gm20b_init_clk_setup_sw(g); | ||
739 | if (err) | ||
740 | return err; | ||
741 | |||
742 | init.name = "gpcclk"; | ||
743 | init.ops = &gm20b_clk_ops; | ||
744 | init.parent_names = &parent_name; | ||
745 | init.num_parents = 1; | ||
746 | init.flags = 0; | ||
747 | |||
748 | /* Data in .init is copied by clk_register(), so stack variable OK */ | ||
749 | clk->hw.init = &init; | ||
750 | c = clk_register(dev_from_gk20a(g), &clk->hw); | ||
751 | if (IS_ERR(c)) { | ||
752 | nvgpu_err(g, "Failed to register GPCPLL clock"); | ||
753 | return -EINVAL; | ||
754 | } | ||
755 | |||
756 | clk->g = g; | ||
757 | clk_register_clkdev(c, "gpcclk", "gpcclk"); | ||
758 | |||
759 | return err; | ||
760 | } | ||
761 | #endif /* CONFIG_COMMON_CLK */ | ||
762 | |||
763 | static int gk20a_tegra_probe(struct device *dev) | ||
764 | { | ||
765 | struct gk20a_platform *platform = dev_get_drvdata(dev); | ||
766 | struct device_node *np = dev->of_node; | ||
767 | bool joint_xpu_rail = false; | ||
768 | int ret; | ||
769 | struct gk20a *g = platform->g; | ||
770 | |||
771 | #ifdef CONFIG_COMMON_CLK | ||
772 | /* DVFS is not guaranteed to be initialized at the time of probe on | ||
773 | * kernels with Common Clock Framework enabled. | ||
774 | */ | ||
775 | if (!platform->gpu_rail) { | ||
776 | platform->gpu_rail = tegra_dvfs_get_rail_by_name(GPU_RAIL_NAME); | ||
777 | if (!platform->gpu_rail) { | ||
778 | nvgpu_log_info(g, "deferring probe no gpu_rail"); | ||
779 | return -EPROBE_DEFER; | ||
780 | } | ||
781 | } | ||
782 | |||
783 | if (!tegra_dvfs_is_rail_ready(platform->gpu_rail)) { | ||
784 | nvgpu_log_info(g, "deferring probe gpu_rail not ready"); | ||
785 | return -EPROBE_DEFER; | ||
786 | } | ||
787 | #endif | ||
788 | |||
789 | #ifdef CONFIG_TEGRA_GK20A_NVHOST | ||
790 | ret = nvgpu_get_nvhost_dev(platform->g); | ||
791 | if (ret) | ||
792 | return ret; | ||
793 | #endif | ||
794 | |||
795 | #ifdef CONFIG_OF | ||
796 | joint_xpu_rail = of_property_read_bool(of_chosen, | ||
797 | "nvidia,tegra-joint_xpu_rail"); | ||
798 | #endif | ||
799 | |||
800 | if (joint_xpu_rail) { | ||
801 | nvgpu_log_info(g, "XPU rails are joint\n"); | ||
802 | platform->can_railgate_init = false; | ||
803 | __nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, false); | ||
804 | } | ||
805 | |||
806 | platform->g->clk.gpc_pll.id = GK20A_GPC_PLL; | ||
807 | if (tegra_get_chip_id() == TEGRA210) { | ||
808 | /* WAR for bug 1547668: Disable railgating and scaling | ||
809 | irrespective of platform data if the rework was not made. */ | ||
810 | np = of_find_node_by_path("/gpu-dvfs-rework"); | ||
811 | if (!(np && of_device_is_available(np))) { | ||
812 | platform->devfreq_governor = ""; | ||
813 | dev_warn(dev, "board does not support scaling"); | ||
814 | } | ||
815 | platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_B1; | ||
816 | if (tegra_chip_get_revision() > TEGRA210_REVISION_A04p) | ||
817 | platform->g->clk.gpc_pll.id = GM20B_GPC_PLL_C1; | ||
818 | } | ||
819 | |||
820 | if (tegra_get_chip_id() == TEGRA132) | ||
821 | platform->soc_name = "tegra13x"; | ||
822 | |||
823 | gk20a_tegra_get_clocks(dev); | ||
824 | nvgpu_linux_init_clk_support(platform->g); | ||
825 | ret = gk20a_tegra_init_secure_alloc(platform); | ||
826 | if (ret) | ||
827 | return ret; | ||
828 | |||
829 | if (platform->clk_register) { | ||
830 | ret = platform->clk_register(platform->g); | ||
831 | if (ret) | ||
832 | return ret; | ||
833 | } | ||
834 | |||
835 | return 0; | ||
836 | } | ||
837 | |||
838 | static int gk20a_tegra_late_probe(struct device *dev) | ||
839 | { | ||
840 | return 0; | ||
841 | } | ||
842 | |||
843 | static int gk20a_tegra_remove(struct device *dev) | ||
844 | { | ||
845 | /* deinitialise tegra specific scaling quirks */ | ||
846 | gk20a_tegra_scale_exit(dev); | ||
847 | |||
848 | #ifdef CONFIG_TEGRA_GK20A_NVHOST | ||
849 | nvgpu_free_nvhost_dev(get_gk20a(dev)); | ||
850 | #endif | ||
851 | |||
852 | return 0; | ||
853 | } | ||
854 | |||
855 | static int gk20a_tegra_suspend(struct device *dev) | ||
856 | { | ||
857 | tegra_edp_notify_gpu_load(0, 0); | ||
858 | return 0; | ||
859 | } | ||
860 | |||
861 | #if defined(CONFIG_COMMON_CLK) | ||
862 | static long gk20a_round_clk_rate(struct device *dev, unsigned long rate) | ||
863 | { | ||
864 | struct gk20a_platform *platform = gk20a_get_platform(dev); | ||
865 | struct gk20a *g = platform->g; | ||
866 | |||
867 | /* make sure the clock is available */ | ||
868 | if (!gk20a_clk_get(g)) | ||
869 | return rate; | ||
870 | |||
871 | return clk_round_rate(clk_get_parent(g->clk.tegra_clk), rate); | ||
872 | } | ||
873 | |||
874 | static int gk20a_clk_get_freqs(struct device *dev, | ||
875 | unsigned long **freqs, int *num_freqs) | ||
876 | { | ||
877 | struct gk20a_platform *platform = gk20a_get_platform(dev); | ||
878 | struct gk20a *g = platform->g; | ||
879 | |||
880 | /* make sure the clock is available */ | ||
881 | if (!gk20a_clk_get(g)) | ||
882 | return -ENOSYS; | ||
883 | |||
884 | return tegra_dvfs_get_freqs(clk_get_parent(g->clk.tegra_clk), | ||
885 | freqs, num_freqs); | ||
886 | } | ||
887 | #endif | ||
888 | |||
889 | struct gk20a_platform gm20b_tegra_platform = { | ||
890 | .has_syncpoints = true, | ||
891 | .aggressive_sync_destroy_thresh = 64, | ||
892 | |||
893 | /* power management configuration */ | ||
894 | .railgate_delay_init = 500, | ||
895 | .can_railgate_init = true, | ||
896 | .can_elpg_init = true, | ||
897 | .enable_slcg = true, | ||
898 | .enable_blcg = true, | ||
899 | .enable_elcg = true, | ||
900 | .can_slcg = true, | ||
901 | .can_blcg = true, | ||
902 | .can_elcg = true, | ||
903 | .enable_elpg = true, | ||
904 | .enable_aelpg = true, | ||
905 | .enable_perfmon = true, | ||
906 | .ptimer_src_freq = 19200000, | ||
907 | |||
908 | .force_reset_in_do_idle = false, | ||
909 | |||
910 | .ch_wdt_timeout_ms = 5000, | ||
911 | |||
912 | .probe = gk20a_tegra_probe, | ||
913 | .late_probe = gk20a_tegra_late_probe, | ||
914 | .remove = gk20a_tegra_remove, | ||
915 | /* power management callbacks */ | ||
916 | .suspend = gk20a_tegra_suspend, | ||
917 | |||
918 | #if defined(CONFIG_TEGRA_DVFS) | ||
919 | .railgate = gm20b_tegra_railgate, | ||
920 | .unrailgate = gm20b_tegra_unrailgate, | ||
921 | .is_railgated = gk20a_tegra_is_railgated, | ||
922 | #endif | ||
923 | |||
924 | .busy = gk20a_tegra_busy, | ||
925 | .idle = gk20a_tegra_idle, | ||
926 | |||
927 | #if defined(CONFIG_RESET_CONTROLLER) && defined(CONFIG_COMMON_CLK) | ||
928 | .reset_assert = gm20b_tegra_reset_assert, | ||
929 | .reset_deassert = gm20b_tegra_reset_deassert, | ||
930 | #else | ||
931 | .reset_assert = gk20a_tegra_reset_assert, | ||
932 | .reset_deassert = gk20a_tegra_reset_deassert, | ||
933 | #endif | ||
934 | |||
935 | #if defined(CONFIG_COMMON_CLK) | ||
936 | .clk_round_rate = gk20a_round_clk_rate, | ||
937 | .get_clk_freqs = gk20a_clk_get_freqs, | ||
938 | #endif | ||
939 | |||
940 | #ifdef CONFIG_COMMON_CLK | ||
941 | .clk_register = gm20b_register_gpcclk, | ||
942 | #endif | ||
943 | |||
944 | /* frequency scaling configuration */ | ||
945 | .initscale = gk20a_tegra_scale_init, | ||
946 | .prescale = gk20a_tegra_prescale, | ||
947 | #ifdef CONFIG_TEGRA_BWMGR | ||
948 | .postscale = gm20b_tegra_postscale, | ||
949 | #endif | ||
950 | .devfreq_governor = "nvhost_podgov", | ||
951 | .qos_notify = gk20a_scale_qos_notify, | ||
952 | |||
953 | .dump_platform_dependencies = gk20a_tegra_debug_dump, | ||
954 | |||
955 | #ifdef CONFIG_NVGPU_SUPPORT_CDE | ||
956 | .has_cde = true, | ||
957 | #endif | ||
958 | |||
959 | .soc_name = "tegra21x", | ||
960 | |||
961 | .unified_memory = true, | ||
962 | .dma_mask = DMA_BIT_MASK(34), | ||
963 | .force_128K_pmu_vm = true, | ||
964 | |||
965 | .secure_buffer_size = 335872, | ||
966 | }; | ||