aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/sysfs.c
diff options
context:
space:
mode:
authorJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
committerJoshua Bakita <bakitajoshua@gmail.com>2024-09-25 16:09:09 -0400
commitf347fde22f1297e4f022600d201780d5ead78114 (patch)
tree76be305d6187003a1e0486ff6e91efb1062ae118 /include/os/linux/sysfs.c
parent8340d234d78a7d0f46c11a584de538148b78b7cb (diff)
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/os/linux/sysfs.c')
-rw-r--r--include/os/linux/sysfs.c1275
1 files changed, 0 insertions, 1275 deletions
diff --git a/include/os/linux/sysfs.c b/include/os/linux/sysfs.c
deleted file mode 100644
index 221ea0c..0000000
--- a/include/os/linux/sysfs.c
+++ /dev/null
@@ -1,1275 +0,0 @@
1/*
2 * Copyright (c) 2011-2019, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/device.h>
18#include <linux/pm_runtime.h>
19#include <linux/fb.h>
20
21#include <nvgpu/kmem.h>
22#include <nvgpu/nvhost.h>
23#include <nvgpu/ptimer.h>
24#include <nvgpu/power_features/cg.h>
25#include <nvgpu/power_features/pg.h>
26
27#include "os_linux.h"
28#include "sysfs.h"
29#include "platform_gk20a.h"
30#include "gk20a/gr_gk20a.h"
31#include "gv11b/gr_gv11b.h"
32
33#define PTIMER_FP_FACTOR 1000000
34
35#define ROOTRW (S_IRWXU|S_IRGRP|S_IROTH)
36
37#define TPC_MASK_FOR_ALL_ACTIVE_TPCs (u32) 0x0
38
39static ssize_t elcg_enable_store(struct device *dev,
40 struct device_attribute *attr, const char *buf, size_t count)
41{
42 struct gk20a *g = get_gk20a(dev);
43 unsigned long val = 0;
44 int err;
45
46 if (kstrtoul(buf, 10, &val) < 0)
47 return -EINVAL;
48
49 err = gk20a_busy(g);
50 if (err)
51 return err;
52
53 if (val) {
54 nvgpu_cg_elcg_set_elcg_enabled(g, true);
55 } else {
56 nvgpu_cg_elcg_set_elcg_enabled(g, false);
57 }
58
59 gk20a_idle(g);
60
61 nvgpu_info(g, "ELCG is %s.", val ? "enabled" :
62 "disabled");
63
64 return count;
65}
66
67static ssize_t elcg_enable_read(struct device *dev,
68 struct device_attribute *attr, char *buf)
69{
70 struct gk20a *g = get_gk20a(dev);
71
72 return snprintf(buf, PAGE_SIZE, "%d\n", g->elcg_enabled ? 1 : 0);
73}
74
75static DEVICE_ATTR(elcg_enable, ROOTRW, elcg_enable_read, elcg_enable_store);
76
77static ssize_t blcg_enable_store(struct device *dev,
78 struct device_attribute *attr, const char *buf, size_t count)
79{
80 struct gk20a *g = get_gk20a(dev);
81 unsigned long val = 0;
82 int err;
83
84 if (kstrtoul(buf, 10, &val) < 0)
85 return -EINVAL;
86
87 err = gk20a_busy(g);
88 if (err)
89 return err;
90
91 if (val) {
92 nvgpu_cg_blcg_set_blcg_enabled(g, true);
93 } else {
94 nvgpu_cg_blcg_set_blcg_enabled(g, false);
95 }
96
97 gk20a_idle(g);
98
99 nvgpu_info(g, "BLCG is %s.", val ? "enabled" :
100 "disabled");
101
102 return count;
103}
104
105static ssize_t blcg_enable_read(struct device *dev,
106 struct device_attribute *attr, char *buf)
107{
108 struct gk20a *g = get_gk20a(dev);
109
110 return snprintf(buf, PAGE_SIZE, "%d\n", g->blcg_enabled ? 1 : 0);
111}
112
113
114static DEVICE_ATTR(blcg_enable, ROOTRW, blcg_enable_read, blcg_enable_store);
115
116static ssize_t slcg_enable_store(struct device *dev,
117 struct device_attribute *attr, const char *buf, size_t count)
118{
119 struct gk20a *g = get_gk20a(dev);
120 unsigned long val = 0;
121 int err;
122
123 if (kstrtoul(buf, 10, &val) < 0)
124 return -EINVAL;
125
126 err = gk20a_busy(g);
127 if (err) {
128 return err;
129 }
130
131 if (val) {
132 nvgpu_cg_slcg_set_slcg_enabled(g, true);
133 } else {
134 nvgpu_cg_slcg_set_slcg_enabled(g, false);
135 }
136
137 /*
138 * TODO: slcg_therm_load_gating is not enabled anywhere during
139 * init. Therefore, it would be incongruous to add it here. Once
140 * it is added to init, we should add it here too.
141 */
142 gk20a_idle(g);
143
144 nvgpu_info(g, "SLCG is %s.", val ? "enabled" :
145 "disabled");
146
147 return count;
148}
149
150static ssize_t slcg_enable_read(struct device *dev,
151 struct device_attribute *attr, char *buf)
152{
153 struct gk20a *g = get_gk20a(dev);
154
155 return snprintf(buf, PAGE_SIZE, "%d\n", g->slcg_enabled ? 1 : 0);
156}
157
158static DEVICE_ATTR(slcg_enable, ROOTRW, slcg_enable_read, slcg_enable_store);
159
160static ssize_t ptimer_scale_factor_show(struct device *dev,
161 struct device_attribute *attr,
162 char *buf)
163{
164 struct gk20a *g = get_gk20a(dev);
165 struct gk20a_platform *platform = dev_get_drvdata(dev);
166 u32 src_freq_hz = platform->ptimer_src_freq;
167 u32 scaling_factor_fp;
168 ssize_t res;
169
170 if (!src_freq_hz) {
171 nvgpu_err(g, "reference clk_m rate is not set correctly");
172 return -EINVAL;
173 }
174
175 scaling_factor_fp = (u32)(PTIMER_REF_FREQ_HZ) /
176 ((u32)(src_freq_hz) /
177 (u32)(PTIMER_FP_FACTOR));
178 res = snprintf(buf,
179 PAGE_SIZE,
180 "%u.%u\n",
181 scaling_factor_fp / PTIMER_FP_FACTOR,
182 scaling_factor_fp % PTIMER_FP_FACTOR);
183
184 return res;
185
186}
187
188static DEVICE_ATTR(ptimer_scale_factor,
189 S_IRUGO,
190 ptimer_scale_factor_show,
191 NULL);
192
193static ssize_t ptimer_ref_freq_show(struct device *dev,
194 struct device_attribute *attr,
195 char *buf)
196{
197 struct gk20a *g = get_gk20a(dev);
198 struct gk20a_platform *platform = dev_get_drvdata(dev);
199 u32 src_freq_hz = platform->ptimer_src_freq;
200 ssize_t res;
201
202 if (!src_freq_hz) {
203 nvgpu_err(g, "reference clk_m rate is not set correctly");
204 return -EINVAL;
205 }
206
207 res = snprintf(buf, PAGE_SIZE, "%u\n", PTIMER_REF_FREQ_HZ);
208
209 return res;
210
211}
212
213static DEVICE_ATTR(ptimer_ref_freq,
214 S_IRUGO,
215 ptimer_ref_freq_show,
216 NULL);
217
218static ssize_t ptimer_src_freq_show(struct device *dev,
219 struct device_attribute *attr,
220 char *buf)
221{
222 struct gk20a *g = get_gk20a(dev);
223 struct gk20a_platform *platform = dev_get_drvdata(dev);
224 u32 src_freq_hz = platform->ptimer_src_freq;
225 ssize_t res;
226
227 if (!src_freq_hz) {
228 nvgpu_err(g, "reference clk_m rate is not set correctly");
229 return -EINVAL;
230 }
231
232 res = snprintf(buf, PAGE_SIZE, "%u\n", src_freq_hz);
233
234 return res;
235
236}
237
238static DEVICE_ATTR(ptimer_src_freq,
239 S_IRUGO,
240 ptimer_src_freq_show,
241 NULL);
242
243
244static ssize_t gpu_powered_on_show(struct device *dev,
245 struct device_attribute *attr,
246 char *buf)
247{
248 struct gk20a *g = get_gk20a(dev);
249
250 return snprintf(buf, PAGE_SIZE, "%u\n", g->power_on);
251}
252
253static DEVICE_ATTR(gpu_powered_on, S_IRUGO, gpu_powered_on_show, NULL);
254
255#if defined(CONFIG_PM)
256static ssize_t railgate_enable_store(struct device *dev,
257 struct device_attribute *attr, const char *buf, size_t count)
258{
259 unsigned long railgate_enable = 0;
260 /* dev is guaranteed to be valid here. Ok to de-reference */
261 struct gk20a *g = get_gk20a(dev);
262 struct gk20a_platform *platform = dev_get_drvdata(dev);
263 bool enabled = nvgpu_is_enabled(g, NVGPU_CAN_RAILGATE);
264 int err;
265
266 if (kstrtoul(buf, 10, &railgate_enable) < 0)
267 return -EINVAL;
268
269 /* convert to boolean */
270 railgate_enable = !!railgate_enable;
271
272 /* writing same value should be treated as nop and successful */
273 if (railgate_enable == enabled)
274 goto out;
275
276 if (!platform->can_railgate_init) {
277 nvgpu_err(g, "Railgating is not supported");
278 return -EINVAL;
279 }
280
281 if (railgate_enable) {
282 __nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, true);
283 pm_runtime_set_autosuspend_delay(dev, g->railgate_delay);
284 } else {
285 __nvgpu_set_enabled(g, NVGPU_CAN_RAILGATE, false);
286 pm_runtime_set_autosuspend_delay(dev, -1);
287 }
288 /* wake-up system to make rail-gating setting effective */
289 err = gk20a_busy(g);
290 if (err)
291 return err;
292 gk20a_idle(g);
293
294out:
295 nvgpu_info(g, "railgate is %s.",
296 nvgpu_is_enabled(g, NVGPU_CAN_RAILGATE) ?
297 "enabled" : "disabled");
298
299 return count;
300}
301
302static ssize_t railgate_enable_read(struct device *dev,
303 struct device_attribute *attr, char *buf)
304{
305 struct gk20a *g = get_gk20a(dev);
306
307 return snprintf(buf, PAGE_SIZE, "%d\n",
308 nvgpu_is_enabled(g, NVGPU_CAN_RAILGATE) ? 1 : 0);
309}
310
311static DEVICE_ATTR(railgate_enable, ROOTRW, railgate_enable_read,
312 railgate_enable_store);
313#endif
314
315static ssize_t railgate_delay_store(struct device *dev,
316 struct device_attribute *attr,
317 const char *buf, size_t count)
318{
319 int railgate_delay = 0, ret = 0;
320 struct gk20a *g = get_gk20a(dev);
321 int err;
322
323 if (!nvgpu_is_enabled(g, NVGPU_CAN_RAILGATE)) {
324 nvgpu_info(g, "does not support power-gating");
325 return count;
326 }
327
328 ret = sscanf(buf, "%d", &railgate_delay);
329 if (ret == 1 && railgate_delay >= 0) {
330 g->railgate_delay = railgate_delay;
331 pm_runtime_set_autosuspend_delay(dev, g->railgate_delay);
332 } else
333 nvgpu_err(g, "Invalid powergate delay");
334
335 /* wake-up system to make rail-gating delay effective immediately */
336 err = gk20a_busy(g);
337 if (err)
338 return err;
339 gk20a_idle(g);
340
341 return count;
342}
343static ssize_t railgate_delay_show(struct device *dev,
344 struct device_attribute *attr, char *buf)
345{
346 struct gk20a *g = get_gk20a(dev);
347
348 return snprintf(buf, PAGE_SIZE, "%d\n", g->railgate_delay);
349}
350static DEVICE_ATTR(railgate_delay, ROOTRW, railgate_delay_show,
351 railgate_delay_store);
352
353static ssize_t is_railgated_show(struct device *dev,
354 struct device_attribute *attr, char *buf)
355{
356 struct gk20a_platform *platform = dev_get_drvdata(dev);
357 bool is_railgated = 0;
358
359 if (platform->is_railgated)
360 is_railgated = platform->is_railgated(dev);
361
362 return snprintf(buf, PAGE_SIZE, "%s\n", is_railgated ? "yes" : "no");
363}
364static DEVICE_ATTR(is_railgated, S_IRUGO, is_railgated_show, NULL);
365
366static ssize_t counters_show(struct device *dev,
367 struct device_attribute *attr, char *buf)
368{
369 struct gk20a *g = get_gk20a(dev);
370 u32 busy_cycles, total_cycles;
371 ssize_t res;
372
373 nvgpu_pmu_get_load_counters(g, &busy_cycles, &total_cycles);
374
375 res = snprintf(buf, PAGE_SIZE, "%u %u\n", busy_cycles, total_cycles);
376
377 return res;
378}
379static DEVICE_ATTR(counters, S_IRUGO, counters_show, NULL);
380
381static ssize_t counters_show_reset(struct device *dev,
382 struct device_attribute *attr, char *buf)
383{
384 ssize_t res = counters_show(dev, attr, buf);
385 struct gk20a *g = get_gk20a(dev);
386
387 nvgpu_pmu_reset_load_counters(g);
388
389 return res;
390}
391static DEVICE_ATTR(counters_reset, S_IRUGO, counters_show_reset, NULL);
392
393static ssize_t gk20a_load_show(struct device *dev,
394 struct device_attribute *attr,
395 char *buf)
396{
397 struct gk20a *g = get_gk20a(dev);
398 u32 busy_time;
399 ssize_t res;
400 int err;
401
402 if (!g->power_on) {
403 busy_time = 0;
404 } else {
405 err = gk20a_busy(g);
406 if (err)
407 return err;
408
409 nvgpu_pmu_load_update(g);
410 nvgpu_pmu_load_norm(g, &busy_time);
411 gk20a_idle(g);
412 }
413
414 res = snprintf(buf, PAGE_SIZE, "%u\n", busy_time);
415
416 return res;
417}
418static DEVICE_ATTR(load, S_IRUGO, gk20a_load_show, NULL);
419
420static ssize_t elpg_enable_store(struct device *dev,
421 struct device_attribute *attr, const char *buf, size_t count)
422{
423 struct gk20a *g = get_gk20a(dev);
424 unsigned long val = 0;
425 int err;
426
427 if (kstrtoul(buf, 10, &val) < 0)
428 return -EINVAL;
429
430 if (!g->power_on) {
431 return -EINVAL;
432 } else {
433 err = gk20a_busy(g);
434 if (err)
435 return -EAGAIN;
436 /*
437 * Since elpg is refcounted, we should not unnecessarily call
438 * enable/disable if it is already so.
439 */
440 if (val != 0) {
441 nvgpu_pg_elpg_set_elpg_enabled(g, true);
442 } else {
443 nvgpu_pg_elpg_set_elpg_enabled(g, false);
444 }
445 gk20a_idle(g);
446 }
447 nvgpu_info(g, "ELPG is %s.", val ? "enabled" :
448 "disabled");
449
450 return count;
451}
452
453static ssize_t elpg_enable_read(struct device *dev,
454 struct device_attribute *attr, char *buf)
455{
456 struct gk20a *g = get_gk20a(dev);
457
458 return snprintf(buf, PAGE_SIZE, "%d\n",
459 nvgpu_pg_elpg_is_enabled(g) ? 1 : 0);
460}
461
462static DEVICE_ATTR(elpg_enable, ROOTRW, elpg_enable_read, elpg_enable_store);
463
464static ssize_t ldiv_slowdown_factor_store(struct device *dev,
465 struct device_attribute *attr, const char *buf, size_t count)
466{
467 struct gk20a *g = get_gk20a(dev);
468 unsigned long val = 0;
469 int err;
470
471 if (kstrtoul(buf, 10, &val) < 0) {
472 nvgpu_err(g, "parse error for input SLOWDOWN factor\n");
473 return -EINVAL;
474 }
475
476 if (val >= SLOWDOWN_FACTOR_FPDIV_BYMAX) {
477 nvgpu_err(g, "Invalid SLOWDOWN factor\n");
478 return -EINVAL;
479 }
480
481 if (val == g->ldiv_slowdown_factor)
482 return count;
483
484 if (!g->power_on) {
485 g->ldiv_slowdown_factor = val;
486 } else {
487 err = gk20a_busy(g);
488 if (err)
489 return -EAGAIN;
490
491 g->ldiv_slowdown_factor = val;
492
493 if (g->ops.pmu.pmu_pg_init_param)
494 g->ops.pmu.pmu_pg_init_param(g,
495 PMU_PG_ELPG_ENGINE_ID_GRAPHICS);
496
497 gk20a_idle(g);
498 }
499
500 nvgpu_info(g, "ldiv_slowdown_factor is %x\n", g->ldiv_slowdown_factor);
501
502 return count;
503}
504
505static ssize_t ldiv_slowdown_factor_read(struct device *dev,
506 struct device_attribute *attr, char *buf)
507{
508 struct gk20a *g = get_gk20a(dev);
509
510 return snprintf(buf, PAGE_SIZE, "%d\n", g->ldiv_slowdown_factor);
511}
512
513static DEVICE_ATTR(ldiv_slowdown_factor, ROOTRW,
514 ldiv_slowdown_factor_read, ldiv_slowdown_factor_store);
515
516static ssize_t mscg_enable_store(struct device *dev,
517 struct device_attribute *attr, const char *buf, size_t count)
518{
519 struct gk20a *g = get_gk20a(dev);
520 struct nvgpu_pmu *pmu = &g->pmu;
521 unsigned long val = 0;
522 int err;
523
524 if (kstrtoul(buf, 10, &val) < 0)
525 return -EINVAL;
526
527 if (!g->power_on) {
528 g->mscg_enabled = val ? true : false;
529 } else {
530 err = gk20a_busy(g);
531 if (err)
532 return -EAGAIN;
533 /*
534 * Since elpg is refcounted, we should not unnecessarily call
535 * enable/disable if it is already so.
536 */
537 if (val && !g->mscg_enabled) {
538 g->mscg_enabled = true;
539 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
540 PMU_PG_LPWR_FEATURE_MSCG)) {
541 if (!ACCESS_ONCE(pmu->mscg_stat)) {
542 WRITE_ONCE(pmu->mscg_stat,
543 PMU_MSCG_ENABLED);
544 /* make status visible */
545 smp_mb();
546 }
547 }
548
549 } else if (!val && g->mscg_enabled) {
550 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
551 PMU_PG_LPWR_FEATURE_MSCG)) {
552 nvgpu_pmu_pg_global_enable(g, false);
553 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
554 /* make status visible */
555 smp_mb();
556 g->mscg_enabled = false;
557 if (nvgpu_pg_elpg_is_enabled(g)) {
558 nvgpu_pg_elpg_enable(g);
559 }
560 }
561 g->mscg_enabled = false;
562 }
563 gk20a_idle(g);
564 }
565 nvgpu_info(g, "MSCG is %s.", g->mscg_enabled ? "enabled" :
566 "disabled");
567
568 return count;
569}
570
571static ssize_t mscg_enable_read(struct device *dev,
572 struct device_attribute *attr, char *buf)
573{
574 struct gk20a *g = get_gk20a(dev);
575
576 return snprintf(buf, PAGE_SIZE, "%d\n", g->mscg_enabled ? 1 : 0);
577}
578
579static DEVICE_ATTR(mscg_enable, ROOTRW, mscg_enable_read, mscg_enable_store);
580
581static ssize_t aelpg_param_store(struct device *dev,
582 struct device_attribute *attr, const char *buf, size_t count)
583{
584 struct gk20a *g = get_gk20a(dev);
585 int status = 0;
586 union pmu_ap_cmd ap_cmd;
587 int *paramlist = (int *)g->pmu.aelpg_param;
588 u32 defaultparam[5] = {
589 APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US,
590 APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US,
591 APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US,
592 APCTRL_POWER_BREAKEVEN_DEFAULT_US,
593 APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT
594 };
595
596 /* Get each parameter value from input string*/
597 sscanf(buf, "%d %d %d %d %d", &paramlist[0], &paramlist[1],
598 &paramlist[2], &paramlist[3], &paramlist[4]);
599
600 /* If parameter value is 0 then reset to SW default values*/
601 if ((paramlist[0] | paramlist[1] | paramlist[2]
602 | paramlist[3] | paramlist[4]) == 0x00) {
603 memcpy(paramlist, defaultparam, sizeof(defaultparam));
604 }
605
606 /* If aelpg is enabled & pmu is ready then post values to
607 * PMU else store then post later
608 */
609 if (g->aelpg_enabled && g->pmu.pmu_ready) {
610 /* Disable AELPG */
611 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
612 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
613 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
614
615 /* Enable AELPG */
616 nvgpu_aelpg_init(g);
617 nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
618 }
619
620 return count;
621}
622
623static ssize_t aelpg_param_read(struct device *dev,
624 struct device_attribute *attr, char *buf)
625{
626 struct gk20a *g = get_gk20a(dev);
627
628 return snprintf(buf, PAGE_SIZE,
629 "%d %d %d %d %d\n", g->pmu.aelpg_param[0],
630 g->pmu.aelpg_param[1], g->pmu.aelpg_param[2],
631 g->pmu.aelpg_param[3], g->pmu.aelpg_param[4]);
632}
633
634static DEVICE_ATTR(aelpg_param, ROOTRW,
635 aelpg_param_read, aelpg_param_store);
636
637static ssize_t aelpg_enable_store(struct device *dev,
638 struct device_attribute *attr, const char *buf, size_t count)
639{
640 struct gk20a *g = get_gk20a(dev);
641 unsigned long val = 0;
642 int status = 0;
643 union pmu_ap_cmd ap_cmd;
644 int err;
645
646 if (kstrtoul(buf, 10, &val) < 0)
647 return -EINVAL;
648
649 err = gk20a_busy(g);
650 if (err)
651 return err;
652
653 if (g->pmu.pmu_ready) {
654 if (val && !g->aelpg_enabled) {
655 g->aelpg_enabled = true;
656 /* Enable AELPG */
657 ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL;
658 ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
659 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
660 } else if (!val && g->aelpg_enabled) {
661 g->aelpg_enabled = false;
662 /* Disable AELPG */
663 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
664 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
665 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
666 }
667 } else {
668 nvgpu_info(g, "PMU is not ready, AELPG request failed");
669 }
670 gk20a_idle(g);
671
672 nvgpu_info(g, "AELPG is %s.", g->aelpg_enabled ? "enabled" :
673 "disabled");
674
675 return count;
676}
677
678static ssize_t aelpg_enable_read(struct device *dev,
679 struct device_attribute *attr, char *buf)
680{
681 struct gk20a *g = get_gk20a(dev);
682
683 return snprintf(buf, PAGE_SIZE, "%d\n", g->aelpg_enabled ? 1 : 0);
684}
685
686static DEVICE_ATTR(aelpg_enable, ROOTRW,
687 aelpg_enable_read, aelpg_enable_store);
688
689
690static ssize_t allow_all_enable_read(struct device *dev,
691 struct device_attribute *attr, char *buf)
692{
693 struct gk20a *g = get_gk20a(dev);
694
695 return snprintf(buf, PAGE_SIZE, "%d\n", g->allow_all ? 1 : 0);
696}
697
698static ssize_t allow_all_enable_store(struct device *dev,
699 struct device_attribute *attr, const char *buf, size_t count)
700{
701 struct gk20a *g = get_gk20a(dev);
702 unsigned long val = 0;
703 int err;
704
705 if (kstrtoul(buf, 10, &val) < 0)
706 return -EINVAL;
707
708 err = gk20a_busy(g);
709 g->allow_all = (val ? true : false);
710 gk20a_idle(g);
711
712 return count;
713}
714
715static DEVICE_ATTR(allow_all, ROOTRW,
716 allow_all_enable_read, allow_all_enable_store);
717
718static ssize_t emc3d_ratio_store(struct device *dev,
719 struct device_attribute *attr, const char *buf, size_t count)
720{
721 struct gk20a *g = get_gk20a(dev);
722 unsigned long val = 0;
723
724 if (kstrtoul(buf, 10, &val) < 0)
725 return -EINVAL;
726
727 g->emc3d_ratio = val;
728
729 return count;
730}
731
732static ssize_t emc3d_ratio_read(struct device *dev,
733 struct device_attribute *attr, char *buf)
734{
735 struct gk20a *g = get_gk20a(dev);
736
737 return snprintf(buf, PAGE_SIZE, "%d\n", g->emc3d_ratio);
738}
739
740static DEVICE_ATTR(emc3d_ratio, ROOTRW, emc3d_ratio_read, emc3d_ratio_store);
741
742static ssize_t fmax_at_vmin_safe_read(struct device *dev,
743 struct device_attribute *attr, char *buf)
744{
745 struct gk20a *g = get_gk20a(dev);
746 unsigned long gpu_fmax_at_vmin_hz = 0;
747
748 if (g->ops.clk.get_fmax_at_vmin_safe)
749 gpu_fmax_at_vmin_hz = g->ops.clk.get_fmax_at_vmin_safe(g);
750
751 return snprintf(buf, PAGE_SIZE, "%d\n", (int)(gpu_fmax_at_vmin_hz));
752}
753
754static DEVICE_ATTR(fmax_at_vmin_safe, S_IRUGO, fmax_at_vmin_safe_read, NULL);
755
756#ifdef CONFIG_PM
757static ssize_t force_idle_store(struct device *dev,
758 struct device_attribute *attr, const char *buf, size_t count)
759{
760 struct gk20a *g = get_gk20a(dev);
761 unsigned long val = 0;
762 int err = 0;
763
764 if (kstrtoul(buf, 10, &val) < 0)
765 return -EINVAL;
766
767 if (val) {
768 if (g->forced_idle)
769 return count; /* do nothing */
770 else {
771 err = __gk20a_do_idle(g, false);
772 if (!err) {
773 g->forced_idle = 1;
774 nvgpu_info(g, "gpu is idle : %d",
775 g->forced_idle);
776 }
777 }
778 } else {
779 if (!g->forced_idle)
780 return count; /* do nothing */
781 else {
782 err = __gk20a_do_unidle(g);
783 if (!err) {
784 g->forced_idle = 0;
785 nvgpu_info(g, "gpu is idle : %d",
786 g->forced_idle);
787 }
788 }
789 }
790
791 return count;
792}
793
794static ssize_t force_idle_read(struct device *dev,
795 struct device_attribute *attr, char *buf)
796{
797 struct gk20a *g = get_gk20a(dev);
798
799 return snprintf(buf, PAGE_SIZE, "%d\n", g->forced_idle ? 1 : 0);
800}
801
802static DEVICE_ATTR(force_idle, ROOTRW, force_idle_read, force_idle_store);
803#endif
804
805static bool is_tpc_mask_valid(struct gk20a *g, u32 tpc_mask)
806{
807 u32 i;
808 bool valid = false;
809
810 for (i = 0; i < MAX_TPC_PG_CONFIGS; i++) {
811 if (tpc_mask == g->valid_tpc_mask[i]) {
812 valid = true;
813 break;
814 }
815 }
816 return valid;
817}
818
819static ssize_t tpc_pg_mask_read(struct device *dev,
820 struct device_attribute *attr, char *buf)
821{
822 struct gk20a *g = get_gk20a(dev);
823
824 return snprintf(buf, PAGE_SIZE, "%d\n", g->tpc_pg_mask);
825}
826
827static ssize_t tpc_pg_mask_store(struct device *dev,
828 struct device_attribute *attr, const char *buf, size_t count)
829{
830 struct gk20a *g = get_gk20a(dev);
831 struct gr_gk20a *gr = &g->gr;
832 unsigned long val = 0;
833
834 nvgpu_mutex_acquire(&g->tpc_pg_lock);
835
836 if (kstrtoul(buf, 10, &val) < 0) {
837 nvgpu_err(g, "invalid value");
838 nvgpu_mutex_release(&g->tpc_pg_lock);
839 return -EINVAL;
840 }
841
842 if (val == g->tpc_pg_mask) {
843 nvgpu_info(g, "no value change, same mask already set");
844 goto exit;
845 }
846
847 if (gr->ctx_vars.golden_image_size) {
848 nvgpu_err(g, "golden image size already initialized");
849 nvgpu_mutex_release(&g->tpc_pg_lock);
850 return -ENODEV;
851 }
852
853 /* checking that the value from userspace is within
854 * the possible valid TPC configurations.
855 */
856 if (is_tpc_mask_valid(g, (u32)val)) {
857 g->tpc_pg_mask = val;
858 } else {
859 nvgpu_err(g, "TPC-PG mask is invalid");
860 nvgpu_mutex_release(&g->tpc_pg_lock);
861 return -EINVAL;
862 }
863exit:
864 nvgpu_mutex_release(&g->tpc_pg_lock);
865
866 return count;
867}
868
869static DEVICE_ATTR(tpc_pg_mask, ROOTRW, tpc_pg_mask_read, tpc_pg_mask_store);
870
871static ssize_t tpc_fs_mask_store(struct device *dev,
872 struct device_attribute *attr, const char *buf, size_t count)
873{
874 struct gk20a *g = get_gk20a(dev);
875 unsigned long val = 0;
876
877 if (kstrtoul(buf, 10, &val) < 0)
878 return -EINVAL;
879
880 if (!g->gr.gpc_tpc_mask)
881 return -ENODEV;
882
883 if (val && val != g->gr.gpc_tpc_mask[0] && g->ops.gr.set_gpc_tpc_mask) {
884 g->gr.gpc_tpc_mask[0] = val;
885 g->tpc_fs_mask_user = val;
886
887 g->ops.gr.set_gpc_tpc_mask(g, 0);
888
889 nvgpu_vfree(g, g->gr.ctx_vars.local_golden_image);
890 g->gr.ctx_vars.local_golden_image = NULL;
891 g->gr.ctx_vars.golden_image_initialized = false;
892 g->gr.ctx_vars.golden_image_size = 0;
893 /* Cause next poweron to reinit just gr */
894 g->gr.sw_ready = false;
895 }
896
897 return count;
898}
899
900static ssize_t tpc_fs_mask_read(struct device *dev,
901 struct device_attribute *attr, char *buf)
902{
903 struct gk20a *g = get_gk20a(dev);
904 struct gr_gk20a *gr = &g->gr;
905 u32 gpc_index;
906 u32 tpc_fs_mask = 0;
907 int err = 0;
908
909 err = gk20a_busy(g);
910 if (err)
911 return err;
912
913 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
914 if (g->ops.gr.get_gpc_tpc_mask)
915 tpc_fs_mask |=
916 g->ops.gr.get_gpc_tpc_mask(g, gpc_index) <<
917 (gr->max_tpc_per_gpc_count * gpc_index);
918 }
919
920 gk20a_idle(g);
921
922 return snprintf(buf, PAGE_SIZE, "0x%x\n", tpc_fs_mask);
923}
924
925static DEVICE_ATTR(tpc_fs_mask, ROOTRW, tpc_fs_mask_read, tpc_fs_mask_store);
926
927static ssize_t min_timeslice_us_read(struct device *dev,
928 struct device_attribute *attr, char *buf)
929{
930 struct gk20a *g = get_gk20a(dev);
931
932 return snprintf(buf, PAGE_SIZE, "%u\n", g->min_timeslice_us);
933}
934
935static ssize_t min_timeslice_us_store(struct device *dev,
936 struct device_attribute *attr, const char *buf, size_t count)
937{
938 struct gk20a *g = get_gk20a(dev);
939 unsigned long val;
940
941 if (kstrtoul(buf, 10, &val) < 0)
942 return -EINVAL;
943
944 if (val > g->max_timeslice_us)
945 return -EINVAL;
946
947 g->min_timeslice_us = val;
948
949 return count;
950}
951
952static DEVICE_ATTR(min_timeslice_us, ROOTRW, min_timeslice_us_read,
953 min_timeslice_us_store);
954
955static ssize_t max_timeslice_us_read(struct device *dev,
956 struct device_attribute *attr, char *buf)
957{
958 struct gk20a *g = get_gk20a(dev);
959
960 return snprintf(buf, PAGE_SIZE, "%u\n", g->max_timeslice_us);
961}
962
963static ssize_t max_timeslice_us_store(struct device *dev,
964 struct device_attribute *attr, const char *buf, size_t count)
965{
966 struct gk20a *g = get_gk20a(dev);
967 unsigned long val;
968
969 if (kstrtoul(buf, 10, &val) < 0)
970 return -EINVAL;
971
972 if (val < g->min_timeslice_us)
973 return -EINVAL;
974
975 g->max_timeslice_us = val;
976
977 return count;
978}
979
980static DEVICE_ATTR(max_timeslice_us, ROOTRW, max_timeslice_us_read,
981 max_timeslice_us_store);
982
983static ssize_t czf_bypass_store(struct device *dev,
984 struct device_attribute *attr, const char *buf, size_t count)
985{
986 struct gk20a *g = get_gk20a(dev);
987 unsigned long val;
988
989 if (kstrtoul(buf, 10, &val) < 0)
990 return -EINVAL;
991
992 if (val >= 4)
993 return -EINVAL;
994
995 g->gr.czf_bypass = val;
996
997 return count;
998}
999
1000static ssize_t czf_bypass_read(struct device *dev,
1001 struct device_attribute *attr, char *buf)
1002{
1003 struct gk20a *g = get_gk20a(dev);
1004
1005 return sprintf(buf, "%d\n", g->gr.czf_bypass);
1006}
1007
1008static DEVICE_ATTR(czf_bypass, ROOTRW, czf_bypass_read, czf_bypass_store);
1009
1010static ssize_t pd_max_batches_store(struct device *dev,
1011 struct device_attribute *attr, const char *buf, size_t count)
1012{
1013 struct gk20a *g = get_gk20a(dev);
1014 unsigned long val;
1015
1016 if (kstrtoul(buf, 10, &val) < 0)
1017 return -EINVAL;
1018
1019 if (val > 64)
1020 return -EINVAL;
1021
1022 g->gr.pd_max_batches = val;
1023
1024 return count;
1025}
1026
1027static ssize_t pd_max_batches_read(struct device *dev,
1028 struct device_attribute *attr, char *buf)
1029{
1030 struct gk20a *g = get_gk20a(dev);
1031
1032 return sprintf(buf, "%d\n", g->gr.pd_max_batches);
1033}
1034
1035static DEVICE_ATTR(pd_max_batches, ROOTRW, pd_max_batches_read, pd_max_batches_store);
1036
1037static ssize_t gfxp_wfi_timeout_count_store(struct device *dev,
1038 struct device_attribute *attr, const char *buf, size_t count)
1039{
1040 struct gk20a *g = get_gk20a(dev);
1041 struct gr_gk20a *gr = &g->gr;
1042 unsigned long val = 0;
1043 int err = -1;
1044
1045 if (kstrtoul(buf, 10, &val) < 0)
1046 return -EINVAL;
1047
1048 if (g->ops.gr.get_max_gfxp_wfi_timeout_count) {
1049 if (val >= g->ops.gr.get_max_gfxp_wfi_timeout_count(g))
1050 return -EINVAL;
1051 }
1052
1053 gr->gfxp_wfi_timeout_count = val;
1054
1055 if (g->ops.gr.init_preemption_state && g->power_on) {
1056 err = gk20a_busy(g);
1057 if (err)
1058 return err;
1059
1060 err = gr_gk20a_elpg_protected_call(g,
1061 g->ops.gr.init_preemption_state(g));
1062
1063 gk20a_idle(g);
1064
1065 if (err)
1066 return err;
1067 }
1068 return count;
1069}
1070
1071static ssize_t gfxp_wfi_timeout_unit_store(struct device *dev,
1072 struct device_attribute *attr, const char *buf, size_t count)
1073{
1074 struct gk20a *g = get_gk20a(dev);
1075 struct gr_gk20a *gr = &g->gr;
1076 int err = -1;
1077
1078 if (count > 0 && buf[0] == 's')
1079 /* sysclk */
1080 gr->gfxp_wfi_timeout_unit = GFXP_WFI_TIMEOUT_UNIT_SYSCLK;
1081 else
1082 /* usec */
1083 gr->gfxp_wfi_timeout_unit = GFXP_WFI_TIMEOUT_UNIT_USEC;
1084
1085 if (g->ops.gr.init_preemption_state && g->power_on) {
1086 err = gk20a_busy(g);
1087 if (err)
1088 return err;
1089
1090 err = gr_gk20a_elpg_protected_call(g,
1091 g->ops.gr.init_preemption_state(g));
1092
1093 gk20a_idle(g);
1094
1095 if (err)
1096 return err;
1097 }
1098
1099 return count;
1100}
1101
1102static ssize_t gfxp_wfi_timeout_count_read(struct device *dev,
1103 struct device_attribute *attr, char *buf)
1104{
1105 struct gk20a *g = get_gk20a(dev);
1106 struct gr_gk20a *gr = &g->gr;
1107 u32 val = gr->gfxp_wfi_timeout_count;
1108
1109 return snprintf(buf, PAGE_SIZE, "%d\n", val);
1110}
1111
1112static ssize_t gfxp_wfi_timeout_unit_read(struct device *dev,
1113 struct device_attribute *attr, char *buf)
1114{
1115 struct gk20a *g = get_gk20a(dev);
1116 struct gr_gk20a *gr = &g->gr;
1117
1118 if (gr->gfxp_wfi_timeout_unit == GFXP_WFI_TIMEOUT_UNIT_USEC)
1119 return snprintf(buf, PAGE_SIZE, "usec\n");
1120 else
1121 return snprintf(buf, PAGE_SIZE, "sysclk\n");
1122}
1123
1124static DEVICE_ATTR(gfxp_wfi_timeout_count, (S_IRWXU|S_IRGRP|S_IROTH),
1125 gfxp_wfi_timeout_count_read, gfxp_wfi_timeout_count_store);
1126
1127static DEVICE_ATTR(gfxp_wfi_timeout_unit, (S_IRWXU|S_IRGRP|S_IROTH),
1128 gfxp_wfi_timeout_unit_read, gfxp_wfi_timeout_unit_store);
1129
1130static ssize_t comptag_mem_deduct_store(struct device *dev,
1131 struct device_attribute *attr,
1132 const char *buf, size_t count)
1133{
1134 struct gk20a *g = get_gk20a(dev);
1135 unsigned long val;
1136
1137 if (kstrtoul(buf, 10, &val) < 0)
1138 return -EINVAL;
1139
1140 if (val >= totalram_size_in_mb) {
1141 dev_err(dev, "comptag_mem_deduct can not be set above %lu",
1142 totalram_size_in_mb);
1143 return -EINVAL;
1144 }
1145
1146 g->gr.comptag_mem_deduct = val;
1147 /* Deduct the part taken by the running system */
1148 g->gr.max_comptag_mem -= val;
1149
1150 return count;
1151}
1152
1153static ssize_t comptag_mem_deduct_show(struct device *dev,
1154 struct device_attribute *attr, char *buf)
1155{
1156 struct gk20a *g = get_gk20a(dev);
1157
1158 return sprintf(buf, "%d\n", g->gr.comptag_mem_deduct);
1159}
1160
1161static DEVICE_ATTR(comptag_mem_deduct, ROOTRW,
1162 comptag_mem_deduct_show, comptag_mem_deduct_store);
1163
1164void nvgpu_remove_sysfs(struct device *dev)
1165{
1166 device_remove_file(dev, &dev_attr_elcg_enable);
1167 device_remove_file(dev, &dev_attr_blcg_enable);
1168 device_remove_file(dev, &dev_attr_slcg_enable);
1169 device_remove_file(dev, &dev_attr_ptimer_scale_factor);
1170 device_remove_file(dev, &dev_attr_ptimer_ref_freq);
1171 device_remove_file(dev, &dev_attr_ptimer_src_freq);
1172 device_remove_file(dev, &dev_attr_elpg_enable);
1173 device_remove_file(dev, &dev_attr_mscg_enable);
1174 device_remove_file(dev, &dev_attr_emc3d_ratio);
1175 device_remove_file(dev, &dev_attr_ldiv_slowdown_factor);
1176
1177 device_remove_file(dev, &dev_attr_fmax_at_vmin_safe);
1178
1179 device_remove_file(dev, &dev_attr_counters);
1180 device_remove_file(dev, &dev_attr_counters_reset);
1181 device_remove_file(dev, &dev_attr_load);
1182 device_remove_file(dev, &dev_attr_railgate_delay);
1183 device_remove_file(dev, &dev_attr_is_railgated);
1184#ifdef CONFIG_PM
1185 device_remove_file(dev, &dev_attr_force_idle);
1186 device_remove_file(dev, &dev_attr_railgate_enable);
1187#endif
1188 device_remove_file(dev, &dev_attr_aelpg_param);
1189 device_remove_file(dev, &dev_attr_aelpg_enable);
1190 device_remove_file(dev, &dev_attr_allow_all);
1191 device_remove_file(dev, &dev_attr_tpc_fs_mask);
1192 device_remove_file(dev, &dev_attr_tpc_pg_mask);
1193 device_remove_file(dev, &dev_attr_min_timeslice_us);
1194 device_remove_file(dev, &dev_attr_max_timeslice_us);
1195
1196#ifdef CONFIG_TEGRA_GK20A_NVHOST
1197 nvgpu_nvhost_remove_symlink(get_gk20a(dev));
1198#endif
1199
1200 device_remove_file(dev, &dev_attr_czf_bypass);
1201 device_remove_file(dev, &dev_attr_pd_max_batches);
1202 device_remove_file(dev, &dev_attr_gfxp_wfi_timeout_count);
1203 device_remove_file(dev, &dev_attr_gfxp_wfi_timeout_unit);
1204 device_remove_file(dev, &dev_attr_gpu_powered_on);
1205
1206 device_remove_file(dev, &dev_attr_comptag_mem_deduct);
1207
1208 if (strcmp(dev_name(dev), "gpu.0")) {
1209 struct kobject *kobj = &dev->kobj;
1210 struct device *parent = container_of((kobj->parent),
1211 struct device, kobj);
1212 sysfs_remove_link(&parent->kobj, "gpu.0");
1213 }
1214}
1215
1216int nvgpu_create_sysfs(struct device *dev)
1217{
1218 struct gk20a *g = get_gk20a(dev);
1219 int error = 0;
1220
1221 error |= device_create_file(dev, &dev_attr_elcg_enable);
1222 error |= device_create_file(dev, &dev_attr_blcg_enable);
1223 error |= device_create_file(dev, &dev_attr_slcg_enable);
1224 error |= device_create_file(dev, &dev_attr_ptimer_scale_factor);
1225 error |= device_create_file(dev, &dev_attr_ptimer_ref_freq);
1226 error |= device_create_file(dev, &dev_attr_ptimer_src_freq);
1227 error |= device_create_file(dev, &dev_attr_elpg_enable);
1228 error |= device_create_file(dev, &dev_attr_mscg_enable);
1229 error |= device_create_file(dev, &dev_attr_emc3d_ratio);
1230 error |= device_create_file(dev, &dev_attr_ldiv_slowdown_factor);
1231
1232 error |= device_create_file(dev, &dev_attr_fmax_at_vmin_safe);
1233
1234 error |= device_create_file(dev, &dev_attr_counters);
1235 error |= device_create_file(dev, &dev_attr_counters_reset);
1236 error |= device_create_file(dev, &dev_attr_load);
1237 error |= device_create_file(dev, &dev_attr_railgate_delay);
1238 error |= device_create_file(dev, &dev_attr_is_railgated);
1239#ifdef CONFIG_PM
1240 error |= device_create_file(dev, &dev_attr_force_idle);
1241 error |= device_create_file(dev, &dev_attr_railgate_enable);
1242#endif
1243 error |= device_create_file(dev, &dev_attr_aelpg_param);
1244 error |= device_create_file(dev, &dev_attr_aelpg_enable);
1245 error |= device_create_file(dev, &dev_attr_allow_all);
1246 error |= device_create_file(dev, &dev_attr_tpc_fs_mask);
1247 error |= device_create_file(dev, &dev_attr_tpc_pg_mask);
1248 error |= device_create_file(dev, &dev_attr_min_timeslice_us);
1249 error |= device_create_file(dev, &dev_attr_max_timeslice_us);
1250
1251#ifdef CONFIG_TEGRA_GK20A_NVHOST
1252 error |= nvgpu_nvhost_create_symlink(g);
1253#endif
1254
1255 error |= device_create_file(dev, &dev_attr_czf_bypass);
1256 error |= device_create_file(dev, &dev_attr_pd_max_batches);
1257 error |= device_create_file(dev, &dev_attr_gfxp_wfi_timeout_count);
1258 error |= device_create_file(dev, &dev_attr_gfxp_wfi_timeout_unit);
1259 error |= device_create_file(dev, &dev_attr_gpu_powered_on);
1260
1261 error |= device_create_file(dev, &dev_attr_comptag_mem_deduct);
1262
1263 if (strcmp(dev_name(dev), "gpu.0")) {
1264 struct kobject *kobj = &dev->kobj;
1265 struct device *parent = container_of((kobj->parent),
1266 struct device, kobj);
1267 error |= sysfs_create_link(&parent->kobj,
1268 &dev->kobj, "gpu.0");
1269 }
1270
1271 if (error)
1272 nvgpu_err(g, "Failed to create sysfs attributes!\n");
1273
1274 return error;
1275}