summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2017-04-19 17:09:05 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-06-27 06:57:13 -0400
commit8b3d94ffd3e5b6d7a622c6ad54692d79bf39d1ce (patch)
treefb8648cd40e99892e18e28de8e8851915a0ac0b9 /drivers/gpu/nvgpu/gk20a
parent52445fba1feac3ee20bf1c3db149adc42715af9e (diff)
gpu: nvgpu: Move sysfs dependencies from HAL to Linux
Move sysfs dependencies from gk20a/ and gp10b/ to common/linux. At the same time the gk20a and gp10b variants are merged into one. JIRA NVGPU-48 Change-Id: I212be8f1beb8d20a57de04a57513e8fa0e2e83b4 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master/r/1466055 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h3
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c975
2 files changed, 0 insertions, 978 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index 7dc72f7b..acfb6144 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -1419,9 +1419,6 @@ enum {
1419 KEPLER_CHANNEL_GPFIFO_C = 0xA26F, 1419 KEPLER_CHANNEL_GPFIFO_C = 0xA26F,
1420}; 1420};
1421 1421
1422void gk20a_create_sysfs(struct device *dev);
1423void gk20a_remove_sysfs(struct device *dev);
1424
1425#define GK20A_BAR0_IORESOURCE_MEM 0 1422#define GK20A_BAR0_IORESOURCE_MEM 0
1426#define GK20A_BAR1_IORESOURCE_MEM 1 1423#define GK20A_BAR1_IORESOURCE_MEM 1
1427#define GK20A_SIM_IORESOURCE_MEM 2 1424#define GK20A_SIM_IORESOURCE_MEM 2
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c b/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
deleted file mode 100644
index 1933eed5..00000000
--- a/drivers/gpu/nvgpu/gk20a/gk20a_sysfs.c
+++ /dev/null
@@ -1,975 +0,0 @@
1/*
2 * drivers/video/tegra/host/gk20a/gk20a_sysfs.c
3 *
4 * GK20A Graphics
5 *
6 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/version.h>
22#include <linux/device.h>
23#include <linux/pm_runtime.h>
24#include <linux/fb.h>
25#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
26#include <soc/tegra/tegra-dvfs.h>
27#endif
28
29#include <nvgpu/kmem.h>
30#include <nvgpu/nvhost.h>
31
32#include "gk20a.h"
33#include "gk20a/platform_gk20a.h"
34#include "gr_gk20a.h"
35#include "fifo_gk20a.h"
36#include "pmu_gk20a.h"
37
38#define PTIMER_FP_FACTOR 1000000
39
40#define ROOTRW (S_IRWXU|S_IRGRP|S_IROTH)
41
42static ssize_t elcg_enable_store(struct device *dev,
43 struct device_attribute *attr, const char *buf, size_t count)
44{
45 struct gk20a *g = get_gk20a(dev);
46 unsigned long val = 0;
47 int err;
48
49 if (kstrtoul(buf, 10, &val) < 0)
50 return -EINVAL;
51
52 err = gk20a_busy(g);
53 if (err)
54 return err;
55
56 if (val) {
57 g->elcg_enabled = true;
58 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_AUTO);
59 } else {
60 g->elcg_enabled = false;
61 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
62 }
63
64 gk20a_idle(g);
65
66 dev_info(dev, "ELCG is %s.\n", g->elcg_enabled ? "enabled" :
67 "disabled");
68
69 return count;
70}
71
72static ssize_t elcg_enable_read(struct device *dev,
73 struct device_attribute *attr, char *buf)
74{
75 struct gk20a *g = get_gk20a(dev);
76
77 return snprintf(buf, PAGE_SIZE, "%d\n", g->elcg_enabled ? 1 : 0);
78}
79
80static DEVICE_ATTR(elcg_enable, ROOTRW, elcg_enable_read, elcg_enable_store);
81
82static ssize_t blcg_enable_store(struct device *dev,
83 struct device_attribute *attr, const char *buf, size_t count)
84{
85 struct gk20a *g = get_gk20a(dev);
86 unsigned long val = 0;
87 int err;
88
89 if (kstrtoul(buf, 10, &val) < 0)
90 return -EINVAL;
91
92 if (val)
93 g->blcg_enabled = true;
94 else
95 g->blcg_enabled = false;
96
97 err = gk20a_busy(g);
98 if (err)
99 return err;
100
101 if (g->ops.clock_gating.blcg_bus_load_gating_prod)
102 g->ops.clock_gating.blcg_bus_load_gating_prod(g, g->blcg_enabled);
103 if (g->ops.clock_gating.blcg_ce_load_gating_prod)
104 g->ops.clock_gating.blcg_ce_load_gating_prod(g,
105 g->blcg_enabled);
106 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod)
107 g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod(g, g->blcg_enabled);
108 if (g->ops.clock_gating.blcg_fb_load_gating_prod)
109 g->ops.clock_gating.blcg_fb_load_gating_prod(g, g->blcg_enabled);
110 if (g->ops.clock_gating.blcg_fifo_load_gating_prod)
111 g->ops.clock_gating.blcg_fifo_load_gating_prod(g, g->blcg_enabled);
112 if (g->ops.clock_gating.blcg_gr_load_gating_prod)
113 g->ops.clock_gating.blcg_gr_load_gating_prod(g, g->blcg_enabled);
114 if (g->ops.clock_gating.blcg_ltc_load_gating_prod)
115 g->ops.clock_gating.blcg_ltc_load_gating_prod(g, g->blcg_enabled);
116 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
117 g->ops.clock_gating.blcg_pmu_load_gating_prod(g, g->blcg_enabled);
118 if (g->ops.clock_gating.blcg_xbar_load_gating_prod)
119 g->ops.clock_gating.blcg_xbar_load_gating_prod(g,
120 g->blcg_enabled);
121 gk20a_idle(g);
122
123 dev_info(dev, "BLCG is %s.\n", g->blcg_enabled ? "enabled" :
124 "disabled");
125
126 return count;
127}
128
129static ssize_t blcg_enable_read(struct device *dev,
130 struct device_attribute *attr, char *buf)
131{
132 struct gk20a *g = get_gk20a(dev);
133
134 return snprintf(buf, PAGE_SIZE, "%d\n", g->blcg_enabled ? 1 : 0);
135}
136
137
138static DEVICE_ATTR(blcg_enable, ROOTRW, blcg_enable_read, blcg_enable_store);
139
140static ssize_t slcg_enable_store(struct device *dev,
141 struct device_attribute *attr, const char *buf, size_t count)
142{
143 struct gk20a *g = get_gk20a(dev);
144 unsigned long val = 0;
145 int err;
146
147 if (kstrtoul(buf, 10, &val) < 0)
148 return -EINVAL;
149
150 if (val)
151 g->slcg_enabled = true;
152 else
153 g->slcg_enabled = false;
154
155 /*
156 * TODO: slcg_therm_load_gating is not enabled anywhere during
157 * init. Therefore, it would be incongruous to add it here. Once
158 * it is added to init, we should add it here too.
159 */
160 err = gk20a_busy(g);
161 if (err)
162 return err;
163
164 if (g->ops.clock_gating.slcg_bus_load_gating_prod)
165 g->ops.clock_gating.slcg_bus_load_gating_prod(g, g->slcg_enabled);
166 if (g->ops.clock_gating.slcg_ce2_load_gating_prod)
167 g->ops.clock_gating.slcg_ce2_load_gating_prod(g, g->slcg_enabled);
168 if (g->ops.clock_gating.slcg_chiplet_load_gating_prod)
169 g->ops.clock_gating.slcg_chiplet_load_gating_prod(g, g->slcg_enabled);
170 if (g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod)
171 g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod(g, g->slcg_enabled);
172 if (g->ops.clock_gating.slcg_fb_load_gating_prod)
173 g->ops.clock_gating.slcg_fb_load_gating_prod(g, g->slcg_enabled);
174 if (g->ops.clock_gating.slcg_fifo_load_gating_prod)
175 g->ops.clock_gating.slcg_fifo_load_gating_prod(g, g->slcg_enabled);
176 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
177 g->ops.clock_gating.slcg_gr_load_gating_prod(g, g->slcg_enabled);
178 if (g->ops.clock_gating.slcg_ltc_load_gating_prod)
179 g->ops.clock_gating.slcg_ltc_load_gating_prod(g, g->slcg_enabled);
180 if (g->ops.clock_gating.slcg_perf_load_gating_prod)
181 g->ops.clock_gating.slcg_perf_load_gating_prod(g, g->slcg_enabled);
182 if (g->ops.clock_gating.slcg_priring_load_gating_prod)
183 g->ops.clock_gating.slcg_priring_load_gating_prod(g, g->slcg_enabled);
184 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
185 g->ops.clock_gating.slcg_pmu_load_gating_prod(g, g->slcg_enabled);
186 if (g->ops.clock_gating.slcg_xbar_load_gating_prod)
187 g->ops.clock_gating.slcg_xbar_load_gating_prod(g, g->slcg_enabled);
188 gk20a_idle(g);
189
190 dev_info(dev, "SLCG is %s.\n", g->slcg_enabled ? "enabled" :
191 "disabled");
192
193 return count;
194}
195
196static ssize_t slcg_enable_read(struct device *dev,
197 struct device_attribute *attr, char *buf)
198{
199 struct gk20a *g = get_gk20a(dev);
200
201 return snprintf(buf, PAGE_SIZE, "%d\n", g->slcg_enabled ? 1 : 0);
202}
203
204static DEVICE_ATTR(slcg_enable, ROOTRW, slcg_enable_read, slcg_enable_store);
205
206static ssize_t ptimer_scale_factor_show(struct device *dev,
207 struct device_attribute *attr,
208 char *buf)
209{
210 struct gk20a_platform *platform = dev_get_drvdata(dev);
211 u32 src_freq_hz = platform->ptimer_src_freq;
212 u32 scaling_factor_fp;
213 ssize_t res;
214
215 if (!src_freq_hz) {
216 dev_err(dev, "reference clk_m rate is not set correctly\n");
217 return -EINVAL;
218 }
219
220 scaling_factor_fp = (u32)(PTIMER_REF_FREQ_HZ) /
221 ((u32)(src_freq_hz) /
222 (u32)(PTIMER_FP_FACTOR));
223 res = snprintf(buf,
224 PAGE_SIZE,
225 "%u.%u\n",
226 scaling_factor_fp / PTIMER_FP_FACTOR,
227 scaling_factor_fp % PTIMER_FP_FACTOR);
228
229 return res;
230
231}
232
233static DEVICE_ATTR(ptimer_scale_factor,
234 S_IRUGO,
235 ptimer_scale_factor_show,
236 NULL);
237
238static ssize_t ptimer_ref_freq_show(struct device *dev,
239 struct device_attribute *attr,
240 char *buf)
241{
242 struct gk20a_platform *platform = dev_get_drvdata(dev);
243 u32 src_freq_hz = platform->ptimer_src_freq;
244 ssize_t res;
245
246 if (!src_freq_hz) {
247 dev_err(dev, "reference clk_m rate is not set correctly\n");
248 return -EINVAL;
249 }
250
251 res = snprintf(buf, PAGE_SIZE, "%u\n", PTIMER_REF_FREQ_HZ);
252
253 return res;
254
255}
256
257static DEVICE_ATTR(ptimer_ref_freq,
258 S_IRUGO,
259 ptimer_ref_freq_show,
260 NULL);
261
262static ssize_t ptimer_src_freq_show(struct device *dev,
263 struct device_attribute *attr,
264 char *buf)
265{
266 struct gk20a_platform *platform = dev_get_drvdata(dev);
267 u32 src_freq_hz = platform->ptimer_src_freq;
268 ssize_t res;
269
270 if (!src_freq_hz) {
271 dev_err(dev, "reference clk_m rate is not set correctly\n");
272 return -EINVAL;
273 }
274
275 res = snprintf(buf, PAGE_SIZE, "%u\n", src_freq_hz);
276
277 return res;
278
279}
280
281static DEVICE_ATTR(ptimer_src_freq,
282 S_IRUGO,
283 ptimer_src_freq_show,
284 NULL);
285
286
287#if defined(CONFIG_PM)
288static ssize_t railgate_enable_store(struct device *dev,
289 struct device_attribute *attr, const char *buf, size_t count)
290{
291 unsigned long railgate_enable = 0;
292 /* dev is guaranteed to be valid here. Ok to de-reference */
293 struct gk20a *g = get_gk20a(dev);
294 int err = 0;
295
296 if (kstrtoul(buf, 10, &railgate_enable) < 0)
297 return -EINVAL;
298
299 if (railgate_enable && !g->can_railgate) {
300 /* release extra ref count */
301 gk20a_idle(g);
302 g->can_railgate = true;
303 g->user_railgate_disabled = false;
304 } else if (railgate_enable == 0 && g->can_railgate) {
305 /* take extra ref count */
306 err = gk20a_busy(g);
307 if (err)
308 return err;
309 g->can_railgate = false;
310 g->user_railgate_disabled = true;
311 }
312
313 dev_info(dev, "railgate is %s.\n", g->can_railgate ?
314 "enabled" : "disabled");
315
316 return count;
317}
318
319static ssize_t railgate_enable_read(struct device *dev,
320 struct device_attribute *attr, char *buf)
321{
322 struct gk20a *g = get_gk20a(dev);
323
324 return snprintf(buf, PAGE_SIZE, "%d\n", g->can_railgate ? 1 : 0);
325}
326
327static DEVICE_ATTR(railgate_enable, ROOTRW, railgate_enable_read,
328 railgate_enable_store);
329#endif
330
331static ssize_t railgate_delay_store(struct device *dev,
332 struct device_attribute *attr,
333 const char *buf, size_t count)
334{
335 int railgate_delay = 0, ret = 0;
336 struct gk20a *g = get_gk20a(dev);
337 int err;
338
339 if (!g->can_railgate) {
340 dev_info(dev, "does not support power-gating\n");
341 return count;
342 }
343
344 ret = sscanf(buf, "%d", &railgate_delay);
345 if (ret == 1 && railgate_delay >= 0) {
346 g->railgate_delay = railgate_delay;
347 pm_runtime_set_autosuspend_delay(dev, g->railgate_delay);
348 } else
349 dev_err(dev, "Invalid powergate delay\n");
350
351 /* wake-up system to make rail-gating delay effective immediately */
352 err = gk20a_busy(g);
353 if (err)
354 return err;
355 gk20a_idle(g);
356
357 return count;
358}
359static ssize_t railgate_delay_show(struct device *dev,
360 struct device_attribute *attr, char *buf)
361{
362 struct gk20a *g = get_gk20a(dev);
363
364 return snprintf(buf, PAGE_SIZE, "%d\n", g->railgate_delay);
365}
366static DEVICE_ATTR(railgate_delay, ROOTRW, railgate_delay_show,
367 railgate_delay_store);
368
369static ssize_t is_railgated_show(struct device *dev,
370 struct device_attribute *attr, char *buf)
371{
372 struct gk20a_platform *platform = dev_get_drvdata(dev);
373 bool is_railgated = 0;
374
375 if (platform->is_railgated)
376 is_railgated = platform->is_railgated(platform->g->dev);
377
378 return snprintf(buf, PAGE_SIZE, "%s\n", is_railgated ? "yes" : "no");
379}
380static DEVICE_ATTR(is_railgated, S_IRUGO, is_railgated_show, NULL);
381
382static ssize_t counters_show(struct device *dev,
383 struct device_attribute *attr, char *buf)
384{
385 struct gk20a *g = get_gk20a(dev);
386 u32 busy_cycles, total_cycles;
387 ssize_t res;
388
389 nvgpu_pmu_get_load_counters(g, &busy_cycles, &total_cycles);
390
391 res = snprintf(buf, PAGE_SIZE, "%u %u\n", busy_cycles, total_cycles);
392
393 return res;
394}
395static DEVICE_ATTR(counters, S_IRUGO, counters_show, NULL);
396
397static ssize_t counters_show_reset(struct device *dev,
398 struct device_attribute *attr, char *buf)
399{
400 ssize_t res = counters_show(dev, attr, buf);
401 struct gk20a *g = get_gk20a(dev);
402
403 nvgpu_pmu_reset_load_counters(g);
404
405 return res;
406}
407static DEVICE_ATTR(counters_reset, S_IRUGO, counters_show_reset, NULL);
408
409static ssize_t gk20a_load_show(struct device *dev,
410 struct device_attribute *attr,
411 char *buf)
412{
413 struct gk20a *g = get_gk20a(dev);
414 u32 busy_time;
415 ssize_t res;
416 int err;
417
418 if (!g->power_on) {
419 busy_time = 0;
420 } else {
421 err = gk20a_busy(g);
422 if (err)
423 return err;
424
425 nvgpu_pmu_load_update(g);
426 nvgpu_pmu_load_norm(g, &busy_time);
427 gk20a_idle(g);
428 }
429
430 res = snprintf(buf, PAGE_SIZE, "%u\n", busy_time);
431
432 return res;
433}
434static DEVICE_ATTR(load, S_IRUGO, gk20a_load_show, NULL);
435
436static ssize_t elpg_enable_store(struct device *dev,
437 struct device_attribute *attr, const char *buf, size_t count)
438{
439 struct gk20a *g = get_gk20a(dev);
440 unsigned long val = 0;
441 int err;
442
443 if (kstrtoul(buf, 10, &val) < 0)
444 return -EINVAL;
445
446 if (!g->power_on) {
447 g->elpg_enabled = val ? true : false;
448 } else {
449 err = gk20a_busy(g);
450 if (err)
451 return -EAGAIN;
452 /*
453 * Since elpg is refcounted, we should not unnecessarily call
454 * enable/disable if it is already so.
455 */
456 if (val && !g->elpg_enabled) {
457 g->elpg_enabled = true;
458 nvgpu_pmu_pg_global_enable(g, true);
459
460 } else if (!val && g->elpg_enabled) {
461 if (g->ops.pmu.pmu_pg_engines_feature_list &&
462 g->ops.pmu.pmu_pg_engines_feature_list(g,
463 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
464 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
465 nvgpu_pmu_pg_global_enable(g, false);
466 g->elpg_enabled = false;
467 } else {
468 g->elpg_enabled = false;
469 nvgpu_pmu_pg_global_enable(g, false);
470 }
471 }
472 gk20a_idle(g);
473 }
474 dev_info(dev, "ELPG is %s.\n", g->elpg_enabled ? "enabled" :
475 "disabled");
476
477 return count;
478}
479
480static ssize_t elpg_enable_read(struct device *dev,
481 struct device_attribute *attr, char *buf)
482{
483 struct gk20a *g = get_gk20a(dev);
484
485 return snprintf(buf, PAGE_SIZE, "%d\n", g->elpg_enabled ? 1 : 0);
486}
487
488static DEVICE_ATTR(elpg_enable, ROOTRW, elpg_enable_read, elpg_enable_store);
489
490static ssize_t mscg_enable_store(struct device *dev,
491 struct device_attribute *attr, const char *buf, size_t count)
492{
493 struct gk20a *g = get_gk20a(dev);
494 struct nvgpu_pmu *pmu = &g->pmu;
495 unsigned long val = 0;
496 int err;
497
498 if (kstrtoul(buf, 10, &val) < 0)
499 return -EINVAL;
500
501 if (!g->power_on) {
502 g->mscg_enabled = val ? true : false;
503 } else {
504 err = gk20a_busy(g);
505 if (err)
506 return -EAGAIN;
507 /*
508 * Since elpg is refcounted, we should not unnecessarily call
509 * enable/disable if it is already so.
510 */
511 if (val && !g->mscg_enabled) {
512 g->mscg_enabled = true;
513 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
514 PMU_PG_LPWR_FEATURE_MSCG)) {
515 if (!ACCESS_ONCE(pmu->mscg_stat)) {
516 WRITE_ONCE(pmu->mscg_stat,
517 PMU_MSCG_ENABLED);
518 /* make status visible */
519 smp_mb();
520 }
521 }
522
523 } else if (!val && g->mscg_enabled) {
524 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
525 PMU_PG_LPWR_FEATURE_MSCG)) {
526 nvgpu_pmu_pg_global_enable(g, false);
527 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
528 /* make status visible */
529 smp_mb();
530 g->mscg_enabled = false;
531 if (g->elpg_enabled)
532 nvgpu_pmu_pg_global_enable(g, true);
533 }
534 g->mscg_enabled = false;
535 }
536 gk20a_idle(g);
537 }
538 dev_info(dev, "MSCG is %s.\n", g->mscg_enabled ? "enabled" :
539 "disabled");
540
541 return count;
542}
543
544static ssize_t mscg_enable_read(struct device *dev,
545 struct device_attribute *attr, char *buf)
546{
547 struct gk20a *g = get_gk20a(dev);
548
549 return snprintf(buf, PAGE_SIZE, "%d\n", g->mscg_enabled ? 1 : 0);
550}
551
552static DEVICE_ATTR(mscg_enable, ROOTRW, mscg_enable_read, mscg_enable_store);
553
554static ssize_t aelpg_param_store(struct device *dev,
555 struct device_attribute *attr, const char *buf, size_t count)
556{
557 struct gk20a *g = get_gk20a(dev);
558 int status = 0;
559 union pmu_ap_cmd ap_cmd;
560 int *paramlist = (int *)g->pmu.aelpg_param;
561 u32 defaultparam[5] = {
562 APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US,
563 APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US,
564 APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US,
565 APCTRL_POWER_BREAKEVEN_DEFAULT_US,
566 APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT
567 };
568
569 /* Get each parameter value from input string*/
570 sscanf(buf, "%d %d %d %d %d", &paramlist[0], &paramlist[1],
571 &paramlist[2], &paramlist[3], &paramlist[4]);
572
573 /* If parameter value is 0 then reset to SW default values*/
574 if ((paramlist[0] | paramlist[1] | paramlist[2]
575 | paramlist[3] | paramlist[4]) == 0x00) {
576 memcpy(paramlist, defaultparam, sizeof(defaultparam));
577 }
578
579 /* If aelpg is enabled & pmu is ready then post values to
580 * PMU else store then post later
581 */
582 if (g->aelpg_enabled && g->pmu.pmu_ready) {
583 /* Disable AELPG */
584 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
585 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
586 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
587
588 /* Enable AELPG */
589 nvgpu_aelpg_init(g);
590 nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
591 }
592
593 return count;
594}
595
596static ssize_t aelpg_param_read(struct device *dev,
597 struct device_attribute *attr, char *buf)
598{
599 struct gk20a *g = get_gk20a(dev);
600
601 return snprintf(buf, PAGE_SIZE,
602 "%d %d %d %d %d\n", g->pmu.aelpg_param[0],
603 g->pmu.aelpg_param[1], g->pmu.aelpg_param[2],
604 g->pmu.aelpg_param[3], g->pmu.aelpg_param[4]);
605}
606
607static DEVICE_ATTR(aelpg_param, ROOTRW,
608 aelpg_param_read, aelpg_param_store);
609
610static ssize_t aelpg_enable_store(struct device *dev,
611 struct device_attribute *attr, const char *buf, size_t count)
612{
613 struct gk20a *g = get_gk20a(dev);
614 unsigned long val = 0;
615 int status = 0;
616 union pmu_ap_cmd ap_cmd;
617 int err;
618
619 if (kstrtoul(buf, 10, &val) < 0)
620 return -EINVAL;
621
622 err = gk20a_busy(g);
623 if (err)
624 return err;
625
626 if (g->pmu.pmu_ready) {
627 if (val && !g->aelpg_enabled) {
628 g->aelpg_enabled = true;
629 /* Enable AELPG */
630 ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL;
631 ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
632 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
633 } else if (!val && g->aelpg_enabled) {
634 g->aelpg_enabled = false;
635 /* Disable AELPG */
636 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
637 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
638 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
639 }
640 } else {
641 dev_info(dev, "PMU is not ready, AELPG request failed\n");
642 }
643 gk20a_idle(g);
644
645 dev_info(dev, "AELPG is %s.\n", g->aelpg_enabled ? "enabled" :
646 "disabled");
647
648 return count;
649}
650
651static ssize_t aelpg_enable_read(struct device *dev,
652 struct device_attribute *attr, char *buf)
653{
654 struct gk20a *g = get_gk20a(dev);
655
656 return snprintf(buf, PAGE_SIZE, "%d\n", g->aelpg_enabled ? 1 : 0);
657}
658
659static DEVICE_ATTR(aelpg_enable, ROOTRW,
660 aelpg_enable_read, aelpg_enable_store);
661
662
663static ssize_t allow_all_enable_read(struct device *dev,
664 struct device_attribute *attr, char *buf)
665{
666 struct gk20a *g = get_gk20a(dev);
667
668 return snprintf(buf, PAGE_SIZE, "%d\n", g->allow_all ? 1 : 0);
669}
670
671static ssize_t allow_all_enable_store(struct device *dev,
672 struct device_attribute *attr, const char *buf, size_t count)
673{
674 struct gk20a *g = get_gk20a(dev);
675 unsigned long val = 0;
676 int err;
677
678 if (kstrtoul(buf, 10, &val) < 0)
679 return -EINVAL;
680
681 err = gk20a_busy(g);
682 g->allow_all = (val ? true : false);
683 gk20a_idle(g);
684
685 return count;
686}
687
688static DEVICE_ATTR(allow_all, ROOTRW,
689 allow_all_enable_read, allow_all_enable_store);
690
691static ssize_t emc3d_ratio_store(struct device *dev,
692 struct device_attribute *attr, const char *buf, size_t count)
693{
694 struct gk20a *g = get_gk20a(dev);
695 unsigned long val = 0;
696
697 if (kstrtoul(buf, 10, &val) < 0)
698 return -EINVAL;
699
700 g->emc3d_ratio = val;
701
702 return count;
703}
704
705static ssize_t emc3d_ratio_read(struct device *dev,
706 struct device_attribute *attr, char *buf)
707{
708 struct gk20a *g = get_gk20a(dev);
709
710 return snprintf(buf, PAGE_SIZE, "%d\n", g->emc3d_ratio);
711}
712
713static DEVICE_ATTR(emc3d_ratio, ROOTRW, emc3d_ratio_read, emc3d_ratio_store);
714
715static ssize_t fmax_at_vmin_safe_read(struct device *dev,
716 struct device_attribute *attr, char *buf)
717{
718 struct gk20a *g = get_gk20a(dev);
719 unsigned long gpu_fmax_at_vmin_hz = 0;
720 struct clk *clk = g->clk.tegra_clk;
721
722 gpu_fmax_at_vmin_hz = tegra_dvfs_get_fmax_at_vmin_safe_t(clk);
723
724 return snprintf(buf, PAGE_SIZE, "%d\n", (int)(gpu_fmax_at_vmin_hz));
725}
726
727static DEVICE_ATTR(fmax_at_vmin_safe, S_IRUGO, fmax_at_vmin_safe_read, NULL);
728
729#ifdef CONFIG_PM
730static ssize_t force_idle_store(struct device *dev,
731 struct device_attribute *attr, const char *buf, size_t count)
732{
733 struct gk20a *g = get_gk20a(dev);
734 unsigned long val = 0;
735 int err = 0;
736
737 if (kstrtoul(buf, 10, &val) < 0)
738 return -EINVAL;
739
740 if (val) {
741 if (g->forced_idle)
742 return count; /* do nothing */
743 else {
744 err = __gk20a_do_idle(g, false);
745 if (!err) {
746 g->forced_idle = 1;
747 dev_info(dev, "gpu is idle : %d\n",
748 g->forced_idle);
749 }
750 }
751 } else {
752 if (!g->forced_idle)
753 return count; /* do nothing */
754 else {
755 err = __gk20a_do_unidle(g);
756 if (!err) {
757 g->forced_idle = 0;
758 dev_info(dev, "gpu is idle : %d\n",
759 g->forced_idle);
760 }
761 }
762 }
763
764 return count;
765}
766
767static ssize_t force_idle_read(struct device *dev,
768 struct device_attribute *attr, char *buf)
769{
770 struct gk20a *g = get_gk20a(dev);
771
772 return snprintf(buf, PAGE_SIZE, "%d\n", g->forced_idle ? 1 : 0);
773}
774
775static DEVICE_ATTR(force_idle, ROOTRW, force_idle_read, force_idle_store);
776#endif
777
778static ssize_t tpc_fs_mask_store(struct device *dev,
779 struct device_attribute *attr, const char *buf, size_t count)
780{
781 struct gk20a *g = get_gk20a(dev);
782 unsigned long val = 0;
783
784 if (kstrtoul(buf, 10, &val) < 0)
785 return -EINVAL;
786
787 if (!g->gr.gpc_tpc_mask)
788 return -ENODEV;
789
790 if (val && val != g->gr.gpc_tpc_mask[0] && g->ops.gr.set_gpc_tpc_mask) {
791 g->gr.gpc_tpc_mask[0] = val;
792 g->tpc_fs_mask_user = val;
793
794 g->ops.gr.set_gpc_tpc_mask(g, 0);
795
796 nvgpu_vfree(g, g->gr.ctx_vars.local_golden_image);
797 g->gr.ctx_vars.local_golden_image = NULL;
798 g->gr.ctx_vars.golden_image_initialized = false;
799 g->gr.ctx_vars.golden_image_size = 0;
800 g->gr.sw_ready = false;
801 }
802
803 return count;
804}
805
806static ssize_t tpc_fs_mask_read(struct device *dev,
807 struct device_attribute *attr, char *buf)
808{
809 struct gk20a *g = get_gk20a(dev);
810 struct gr_gk20a *gr = &g->gr;
811 u32 gpc_index;
812 u32 tpc_fs_mask = 0;
813 int err = 0;
814
815 err = gk20a_busy(g);
816 if (err)
817 return err;
818
819 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
820 if (g->ops.gr.get_gpc_tpc_mask)
821 tpc_fs_mask |=
822 g->ops.gr.get_gpc_tpc_mask(g, gpc_index) <<
823 (gr->max_tpc_per_gpc_count * gpc_index);
824 }
825
826 gk20a_idle(g);
827
828 return snprintf(buf, PAGE_SIZE, "0x%x\n", tpc_fs_mask);
829}
830
831static DEVICE_ATTR(tpc_fs_mask, ROOTRW, tpc_fs_mask_read, tpc_fs_mask_store);
832
833static ssize_t min_timeslice_us_read(struct device *dev,
834 struct device_attribute *attr, char *buf)
835{
836 struct gk20a *g = get_gk20a(dev);
837
838 return snprintf(buf, PAGE_SIZE, "%u\n", g->min_timeslice_us);
839}
840
841static ssize_t min_timeslice_us_store(struct device *dev,
842 struct device_attribute *attr, const char *buf, size_t count)
843{
844 struct gk20a *g = get_gk20a(dev);
845 unsigned long val;
846
847 if (kstrtoul(buf, 10, &val) < 0)
848 return -EINVAL;
849
850 if (val > g->max_timeslice_us)
851 return -EINVAL;
852
853 g->min_timeslice_us = val;
854
855 return count;
856}
857
858static DEVICE_ATTR(min_timeslice_us, ROOTRW, min_timeslice_us_read,
859 min_timeslice_us_store);
860
861static ssize_t max_timeslice_us_read(struct device *dev,
862 struct device_attribute *attr, char *buf)
863{
864 struct gk20a *g = get_gk20a(dev);
865
866 return snprintf(buf, PAGE_SIZE, "%u\n", g->max_timeslice_us);
867}
868
869static ssize_t max_timeslice_us_store(struct device *dev,
870 struct device_attribute *attr, const char *buf, size_t count)
871{
872 struct gk20a *g = get_gk20a(dev);
873 unsigned long val;
874
875 if (kstrtoul(buf, 10, &val) < 0)
876 return -EINVAL;
877
878 if (val < g->min_timeslice_us)
879 return -EINVAL;
880
881 g->max_timeslice_us = val;
882
883 return count;
884}
885
886static DEVICE_ATTR(max_timeslice_us, ROOTRW, max_timeslice_us_read,
887 max_timeslice_us_store);
888
889
890void gk20a_remove_sysfs(struct device *dev)
891{
892 device_remove_file(dev, &dev_attr_elcg_enable);
893 device_remove_file(dev, &dev_attr_blcg_enable);
894 device_remove_file(dev, &dev_attr_slcg_enable);
895 device_remove_file(dev, &dev_attr_ptimer_scale_factor);
896 device_remove_file(dev, &dev_attr_ptimer_ref_freq);
897 device_remove_file(dev, &dev_attr_ptimer_src_freq);
898 device_remove_file(dev, &dev_attr_elpg_enable);
899 device_remove_file(dev, &dev_attr_mscg_enable);
900 device_remove_file(dev, &dev_attr_emc3d_ratio);
901 device_remove_file(dev, &dev_attr_fmax_at_vmin_safe);
902 device_remove_file(dev, &dev_attr_counters);
903 device_remove_file(dev, &dev_attr_counters_reset);
904 device_remove_file(dev, &dev_attr_load);
905 device_remove_file(dev, &dev_attr_railgate_delay);
906 device_remove_file(dev, &dev_attr_is_railgated);
907#ifdef CONFIG_PM
908 device_remove_file(dev, &dev_attr_force_idle);
909 device_remove_file(dev, &dev_attr_railgate_enable);
910#endif
911 device_remove_file(dev, &dev_attr_aelpg_param);
912 device_remove_file(dev, &dev_attr_aelpg_enable);
913 device_remove_file(dev, &dev_attr_allow_all);
914 device_remove_file(dev, &dev_attr_tpc_fs_mask);
915 device_remove_file(dev, &dev_attr_min_timeslice_us);
916 device_remove_file(dev, &dev_attr_max_timeslice_us);
917
918#ifdef CONFIG_TEGRA_GK20A_NVHOST
919 nvgpu_nvhost_remove_symlink(get_gk20a(dev));
920#endif
921
922 if (strcmp(dev_name(dev), "gpu.0")) {
923 struct kobject *kobj = &dev->kobj;
924 struct device *parent = container_of((kobj->parent),
925 struct device, kobj);
926 sysfs_remove_link(&parent->kobj, "gpu.0");
927 }
928}
929
930void gk20a_create_sysfs(struct device *dev)
931{
932 int error = 0;
933
934 error |= device_create_file(dev, &dev_attr_elcg_enable);
935 error |= device_create_file(dev, &dev_attr_blcg_enable);
936 error |= device_create_file(dev, &dev_attr_slcg_enable);
937 error |= device_create_file(dev, &dev_attr_ptimer_scale_factor);
938 error |= device_create_file(dev, &dev_attr_ptimer_ref_freq);
939 error |= device_create_file(dev, &dev_attr_ptimer_src_freq);
940 error |= device_create_file(dev, &dev_attr_elpg_enable);
941 error |= device_create_file(dev, &dev_attr_mscg_enable);
942 error |= device_create_file(dev, &dev_attr_emc3d_ratio);
943 error |= device_create_file(dev, &dev_attr_fmax_at_vmin_safe);
944 error |= device_create_file(dev, &dev_attr_counters);
945 error |= device_create_file(dev, &dev_attr_counters_reset);
946 error |= device_create_file(dev, &dev_attr_load);
947 error |= device_create_file(dev, &dev_attr_railgate_delay);
948 error |= device_create_file(dev, &dev_attr_is_railgated);
949#ifdef CONFIG_PM
950 error |= device_create_file(dev, &dev_attr_force_idle);
951 error |= device_create_file(dev, &dev_attr_railgate_enable);
952#endif
953 error |= device_create_file(dev, &dev_attr_aelpg_param);
954 error |= device_create_file(dev, &dev_attr_aelpg_enable);
955 error |= device_create_file(dev, &dev_attr_allow_all);
956 error |= device_create_file(dev, &dev_attr_tpc_fs_mask);
957 error |= device_create_file(dev, &dev_attr_min_timeslice_us);
958 error |= device_create_file(dev, &dev_attr_max_timeslice_us);
959
960#ifdef CONFIG_TEGRA_GK20A_NVHOST
961 error |= nvgpu_nvhost_create_symlink(get_gk20a(dev));
962#endif
963
964 if (strcmp(dev_name(dev), "gpu.0")) {
965 struct kobject *kobj = &dev->kobj;
966 struct device *parent = container_of((kobj->parent),
967 struct device, kobj);
968 error |= sysfs_create_link(&parent->kobj,
969 &dev->kobj, "gpu.0");
970 }
971
972 if (error)
973 dev_err(dev, "Failed to create sysfs attributes!\n");
974
975}