summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/sysfs.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/sysfs.c1099
1 files changed, 1099 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/sysfs.c b/drivers/gpu/nvgpu/common/linux/sysfs.c
new file mode 100644
index 00000000..b1e7d1ed
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/sysfs.c
@@ -0,0 +1,1099 @@
1/*
2 * Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/device.h>
18#include <linux/pm_runtime.h>
19#include <linux/fb.h>
20#include <soc/tegra/tegra-dvfs.h>
21
22#include <nvgpu/kmem.h>
23#include <nvgpu/nvhost.h>
24
25#include "sysfs.h"
26#include "platform_gk20a.h"
27#include "gk20a/pmu_gk20a.h"
28#include "gk20a/gr_gk20a.h"
29
30#define PTIMER_FP_FACTOR 1000000
31
32#define ROOTRW (S_IRWXU|S_IRGRP|S_IROTH)
33
34static ssize_t elcg_enable_store(struct device *dev,
35 struct device_attribute *attr, const char *buf, size_t count)
36{
37 struct gk20a *g = get_gk20a(dev);
38 unsigned long val = 0;
39 int err;
40
41 if (kstrtoul(buf, 10, &val) < 0)
42 return -EINVAL;
43
44 err = gk20a_busy(g);
45 if (err)
46 return err;
47
48 if (val) {
49 g->elcg_enabled = true;
50 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_AUTO);
51 } else {
52 g->elcg_enabled = false;
53 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
54 }
55
56 gk20a_idle(g);
57
58 nvgpu_info(g, "ELCG is %s.", g->elcg_enabled ? "enabled" :
59 "disabled");
60
61 return count;
62}
63
64static ssize_t elcg_enable_read(struct device *dev,
65 struct device_attribute *attr, char *buf)
66{
67 struct gk20a *g = get_gk20a(dev);
68
69 return snprintf(buf, PAGE_SIZE, "%d\n", g->elcg_enabled ? 1 : 0);
70}
71
72static DEVICE_ATTR(elcg_enable, ROOTRW, elcg_enable_read, elcg_enable_store);
73
74static ssize_t blcg_enable_store(struct device *dev,
75 struct device_attribute *attr, const char *buf, size_t count)
76{
77 struct gk20a *g = get_gk20a(dev);
78 unsigned long val = 0;
79 int err;
80
81 if (kstrtoul(buf, 10, &val) < 0)
82 return -EINVAL;
83
84 if (val)
85 g->blcg_enabled = true;
86 else
87 g->blcg_enabled = false;
88
89 err = gk20a_busy(g);
90 if (err)
91 return err;
92
93 if (g->ops.clock_gating.blcg_bus_load_gating_prod)
94 g->ops.clock_gating.blcg_bus_load_gating_prod(g,
95 g->blcg_enabled);
96 if (g->ops.clock_gating.blcg_ce_load_gating_prod)
97 g->ops.clock_gating.blcg_ce_load_gating_prod(g,
98 g->blcg_enabled);
99 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod)
100 g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod(g,
101 g->blcg_enabled);
102 if (g->ops.clock_gating.blcg_fb_load_gating_prod)
103 g->ops.clock_gating.blcg_fb_load_gating_prod(g,
104 g->blcg_enabled);
105 if (g->ops.clock_gating.blcg_fifo_load_gating_prod)
106 g->ops.clock_gating.blcg_fifo_load_gating_prod(g,
107 g->blcg_enabled);
108 if (g->ops.clock_gating.blcg_gr_load_gating_prod)
109 g->ops.clock_gating.blcg_gr_load_gating_prod(g,
110 g->blcg_enabled);
111 if (g->ops.clock_gating.blcg_ltc_load_gating_prod)
112 g->ops.clock_gating.blcg_ltc_load_gating_prod(g,
113 g->blcg_enabled);
114 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
115 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
116 g->blcg_enabled);
117 if (g->ops.clock_gating.blcg_xbar_load_gating_prod)
118 g->ops.clock_gating.blcg_xbar_load_gating_prod(g,
119 g->blcg_enabled);
120 gk20a_idle(g);
121
122 nvgpu_info(g, "BLCG is %s.", g->blcg_enabled ? "enabled" :
123 "disabled");
124
125 return count;
126}
127
128static ssize_t blcg_enable_read(struct device *dev,
129 struct device_attribute *attr, char *buf)
130{
131 struct gk20a *g = get_gk20a(dev);
132
133 return snprintf(buf, PAGE_SIZE, "%d\n", g->blcg_enabled ? 1 : 0);
134}
135
136
137static DEVICE_ATTR(blcg_enable, ROOTRW, blcg_enable_read, blcg_enable_store);
138
139static ssize_t slcg_enable_store(struct device *dev,
140 struct device_attribute *attr, const char *buf, size_t count)
141{
142 struct gk20a *g = get_gk20a(dev);
143 unsigned long val = 0;
144 int err;
145
146 if (kstrtoul(buf, 10, &val) < 0)
147 return -EINVAL;
148
149 if (val)
150 g->slcg_enabled = true;
151 else
152 g->slcg_enabled = false;
153
154 /*
155 * TODO: slcg_therm_load_gating is not enabled anywhere during
156 * init. Therefore, it would be incongruous to add it here. Once
157 * it is added to init, we should add it here too.
158 */
159 err = gk20a_busy(g);
160 if (err)
161 return err;
162
163 if (g->ops.clock_gating.slcg_bus_load_gating_prod)
164 g->ops.clock_gating.slcg_bus_load_gating_prod(g,
165 g->slcg_enabled);
166 if (g->ops.clock_gating.slcg_ce2_load_gating_prod)
167 g->ops.clock_gating.slcg_ce2_load_gating_prod(g,
168 g->slcg_enabled);
169 if (g->ops.clock_gating.slcg_chiplet_load_gating_prod)
170 g->ops.clock_gating.slcg_chiplet_load_gating_prod(g,
171 g->slcg_enabled);
172 if (g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod)
173 g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod(g,
174 g->slcg_enabled);
175 if (g->ops.clock_gating.slcg_fb_load_gating_prod)
176 g->ops.clock_gating.slcg_fb_load_gating_prod(g,
177 g->slcg_enabled);
178 if (g->ops.clock_gating.slcg_fifo_load_gating_prod)
179 g->ops.clock_gating.slcg_fifo_load_gating_prod(g,
180 g->slcg_enabled);
181 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
182 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
183 g->slcg_enabled);
184 if (g->ops.clock_gating.slcg_ltc_load_gating_prod)
185 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
186 g->slcg_enabled);
187 if (g->ops.clock_gating.slcg_perf_load_gating_prod)
188 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
189 g->slcg_enabled);
190 if (g->ops.clock_gating.slcg_priring_load_gating_prod)
191 g->ops.clock_gating.slcg_priring_load_gating_prod(g,
192 g->slcg_enabled);
193 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
194 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
195 g->slcg_enabled);
196 if (g->ops.clock_gating.slcg_xbar_load_gating_prod)
197 g->ops.clock_gating.slcg_xbar_load_gating_prod(g,
198 g->slcg_enabled);
199 gk20a_idle(g);
200
201 nvgpu_info(g, "SLCG is %s.", g->slcg_enabled ? "enabled" :
202 "disabled");
203
204 return count;
205}
206
207static ssize_t slcg_enable_read(struct device *dev,
208 struct device_attribute *attr, char *buf)
209{
210 struct gk20a *g = get_gk20a(dev);
211
212 return snprintf(buf, PAGE_SIZE, "%d\n", g->slcg_enabled ? 1 : 0);
213}
214
215static DEVICE_ATTR(slcg_enable, ROOTRW, slcg_enable_read, slcg_enable_store);
216
217static ssize_t ptimer_scale_factor_show(struct device *dev,
218 struct device_attribute *attr,
219 char *buf)
220{
221 struct gk20a *g = get_gk20a(dev);
222 struct gk20a_platform *platform = dev_get_drvdata(dev);
223 u32 src_freq_hz = platform->ptimer_src_freq;
224 u32 scaling_factor_fp;
225 ssize_t res;
226
227 if (!src_freq_hz) {
228 nvgpu_err(g, "reference clk_m rate is not set correctly");
229 return -EINVAL;
230 }
231
232 scaling_factor_fp = (u32)(PTIMER_REF_FREQ_HZ) /
233 ((u32)(src_freq_hz) /
234 (u32)(PTIMER_FP_FACTOR));
235 res = snprintf(buf,
236 PAGE_SIZE,
237 "%u.%u\n",
238 scaling_factor_fp / PTIMER_FP_FACTOR,
239 scaling_factor_fp % PTIMER_FP_FACTOR);
240
241 return res;
242
243}
244
245static DEVICE_ATTR(ptimer_scale_factor,
246 S_IRUGO,
247 ptimer_scale_factor_show,
248 NULL);
249
250static ssize_t ptimer_ref_freq_show(struct device *dev,
251 struct device_attribute *attr,
252 char *buf)
253{
254 struct gk20a *g = get_gk20a(dev);
255 struct gk20a_platform *platform = dev_get_drvdata(dev);
256 u32 src_freq_hz = platform->ptimer_src_freq;
257 ssize_t res;
258
259 if (!src_freq_hz) {
260 nvgpu_err(g, "reference clk_m rate is not set correctly");
261 return -EINVAL;
262 }
263
264 res = snprintf(buf, PAGE_SIZE, "%u\n", PTIMER_REF_FREQ_HZ);
265
266 return res;
267
268}
269
270static DEVICE_ATTR(ptimer_ref_freq,
271 S_IRUGO,
272 ptimer_ref_freq_show,
273 NULL);
274
275static ssize_t ptimer_src_freq_show(struct device *dev,
276 struct device_attribute *attr,
277 char *buf)
278{
279 struct gk20a *g = get_gk20a(dev);
280 struct gk20a_platform *platform = dev_get_drvdata(dev);
281 u32 src_freq_hz = platform->ptimer_src_freq;
282 ssize_t res;
283
284 if (!src_freq_hz) {
285 nvgpu_err(g, "reference clk_m rate is not set correctly");
286 return -EINVAL;
287 }
288
289 res = snprintf(buf, PAGE_SIZE, "%u\n", src_freq_hz);
290
291 return res;
292
293}
294
295static DEVICE_ATTR(ptimer_src_freq,
296 S_IRUGO,
297 ptimer_src_freq_show,
298 NULL);
299
300
301#if defined(CONFIG_PM)
302static ssize_t railgate_enable_store(struct device *dev,
303 struct device_attribute *attr, const char *buf, size_t count)
304{
305 unsigned long railgate_enable = 0;
306 /* dev is guaranteed to be valid here. Ok to de-reference */
307 struct gk20a *g = get_gk20a(dev);
308 int err = 0;
309
310 if (kstrtoul(buf, 10, &railgate_enable) < 0)
311 return -EINVAL;
312
313 if (railgate_enable && !g->can_railgate) {
314 /* release extra ref count */
315 gk20a_idle(g);
316 g->can_railgate = true;
317 g->user_railgate_disabled = false;
318 } else if (railgate_enable == 0 && g->can_railgate) {
319 /* take extra ref count */
320 err = gk20a_busy(g);
321 if (err)
322 return err;
323 g->can_railgate = false;
324 g->user_railgate_disabled = true;
325 }
326
327 nvgpu_info(g, "railgate is %s.", g->can_railgate ?
328 "enabled" : "disabled");
329
330 return count;
331}
332
333static ssize_t railgate_enable_read(struct device *dev,
334 struct device_attribute *attr, char *buf)
335{
336 struct gk20a *g = get_gk20a(dev);
337
338 return snprintf(buf, PAGE_SIZE, "%d\n", g->can_railgate ? 1 : 0);
339}
340
341static DEVICE_ATTR(railgate_enable, ROOTRW, railgate_enable_read,
342 railgate_enable_store);
343#endif
344
345static ssize_t railgate_delay_store(struct device *dev,
346 struct device_attribute *attr,
347 const char *buf, size_t count)
348{
349 int railgate_delay = 0, ret = 0;
350 struct gk20a *g = get_gk20a(dev);
351 int err;
352
353 if (!g->can_railgate) {
354 nvgpu_info(g, "does not support power-gating");
355 return count;
356 }
357
358 ret = sscanf(buf, "%d", &railgate_delay);
359 if (ret == 1 && railgate_delay >= 0) {
360 g->railgate_delay = railgate_delay;
361 pm_runtime_set_autosuspend_delay(dev, g->railgate_delay);
362 } else
363 nvgpu_err(g, "Invalid powergate delay");
364
365 /* wake-up system to make rail-gating delay effective immediately */
366 err = gk20a_busy(g);
367 if (err)
368 return err;
369 gk20a_idle(g);
370
371 return count;
372}
373static ssize_t railgate_delay_show(struct device *dev,
374 struct device_attribute *attr, char *buf)
375{
376 struct gk20a *g = get_gk20a(dev);
377
378 return snprintf(buf, PAGE_SIZE, "%d\n", g->railgate_delay);
379}
380static DEVICE_ATTR(railgate_delay, ROOTRW, railgate_delay_show,
381 railgate_delay_store);
382
383static ssize_t is_railgated_show(struct device *dev,
384 struct device_attribute *attr, char *buf)
385{
386 struct gk20a_platform *platform = dev_get_drvdata(dev);
387 bool is_railgated = 0;
388
389 if (platform->is_railgated)
390 is_railgated = platform->is_railgated(dev);
391
392 return snprintf(buf, PAGE_SIZE, "%s\n", is_railgated ? "yes" : "no");
393}
394static DEVICE_ATTR(is_railgated, S_IRUGO, is_railgated_show, NULL);
395
396static ssize_t counters_show(struct device *dev,
397 struct device_attribute *attr, char *buf)
398{
399 struct gk20a *g = get_gk20a(dev);
400 u32 busy_cycles, total_cycles;
401 ssize_t res;
402
403 nvgpu_pmu_get_load_counters(g, &busy_cycles, &total_cycles);
404
405 res = snprintf(buf, PAGE_SIZE, "%u %u\n", busy_cycles, total_cycles);
406
407 return res;
408}
409static DEVICE_ATTR(counters, S_IRUGO, counters_show, NULL);
410
411static ssize_t counters_show_reset(struct device *dev,
412 struct device_attribute *attr, char *buf)
413{
414 ssize_t res = counters_show(dev, attr, buf);
415 struct gk20a *g = get_gk20a(dev);
416
417 nvgpu_pmu_reset_load_counters(g);
418
419 return res;
420}
421static DEVICE_ATTR(counters_reset, S_IRUGO, counters_show_reset, NULL);
422
423static ssize_t gk20a_load_show(struct device *dev,
424 struct device_attribute *attr,
425 char *buf)
426{
427 struct gk20a *g = get_gk20a(dev);
428 u32 busy_time;
429 ssize_t res;
430 int err;
431
432 if (!g->power_on) {
433 busy_time = 0;
434 } else {
435 err = gk20a_busy(g);
436 if (err)
437 return err;
438
439 nvgpu_pmu_load_update(g);
440 nvgpu_pmu_load_norm(g, &busy_time);
441 gk20a_idle(g);
442 }
443
444 res = snprintf(buf, PAGE_SIZE, "%u\n", busy_time);
445
446 return res;
447}
448static DEVICE_ATTR(load, S_IRUGO, gk20a_load_show, NULL);
449
450static ssize_t elpg_enable_store(struct device *dev,
451 struct device_attribute *attr, const char *buf, size_t count)
452{
453 struct gk20a *g = get_gk20a(dev);
454 unsigned long val = 0;
455 int err;
456
457 if (kstrtoul(buf, 10, &val) < 0)
458 return -EINVAL;
459
460 if (!g->power_on) {
461 g->elpg_enabled = val ? true : false;
462 } else {
463 err = gk20a_busy(g);
464 if (err)
465 return -EAGAIN;
466 /*
467 * Since elpg is refcounted, we should not unnecessarily call
468 * enable/disable if it is already so.
469 */
470 if (val && !g->elpg_enabled) {
471 g->elpg_enabled = true;
472 nvgpu_pmu_pg_global_enable(g, true);
473
474 } else if (!val && g->elpg_enabled) {
475 if (g->ops.pmu.pmu_pg_engines_feature_list &&
476 g->ops.pmu.pmu_pg_engines_feature_list(g,
477 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
478 PMU_PG_FEATURE_GR_POWER_GATING_ENABLED) {
479 nvgpu_pmu_pg_global_enable(g, false);
480 g->elpg_enabled = false;
481 } else {
482 g->elpg_enabled = false;
483 nvgpu_pmu_pg_global_enable(g, false);
484 }
485 }
486 gk20a_idle(g);
487 }
488 nvgpu_info(g, "ELPG is %s.", g->elpg_enabled ? "enabled" :
489 "disabled");
490
491 return count;
492}
493
494static ssize_t elpg_enable_read(struct device *dev,
495 struct device_attribute *attr, char *buf)
496{
497 struct gk20a *g = get_gk20a(dev);
498
499 return snprintf(buf, PAGE_SIZE, "%d\n", g->elpg_enabled ? 1 : 0);
500}
501
502static DEVICE_ATTR(elpg_enable, ROOTRW, elpg_enable_read, elpg_enable_store);
503
504static ssize_t mscg_enable_store(struct device *dev,
505 struct device_attribute *attr, const char *buf, size_t count)
506{
507 struct gk20a *g = get_gk20a(dev);
508 struct nvgpu_pmu *pmu = &g->pmu;
509 unsigned long val = 0;
510 int err;
511
512 if (kstrtoul(buf, 10, &val) < 0)
513 return -EINVAL;
514
515 if (!g->power_on) {
516 g->mscg_enabled = val ? true : false;
517 } else {
518 err = gk20a_busy(g);
519 if (err)
520 return -EAGAIN;
521 /*
522 * Since elpg is refcounted, we should not unnecessarily call
523 * enable/disable if it is already so.
524 */
525 if (val && !g->mscg_enabled) {
526 g->mscg_enabled = true;
527 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
528 PMU_PG_LPWR_FEATURE_MSCG)) {
529 if (!ACCESS_ONCE(pmu->mscg_stat)) {
530 WRITE_ONCE(pmu->mscg_stat,
531 PMU_MSCG_ENABLED);
532 /* make status visible */
533 smp_mb();
534 }
535 }
536
537 } else if (!val && g->mscg_enabled) {
538 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
539 PMU_PG_LPWR_FEATURE_MSCG)) {
540 nvgpu_pmu_pg_global_enable(g, false);
541 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
542 /* make status visible */
543 smp_mb();
544 g->mscg_enabled = false;
545 if (g->elpg_enabled)
546 nvgpu_pmu_pg_global_enable(g, true);
547 }
548 g->mscg_enabled = false;
549 }
550 gk20a_idle(g);
551 }
552 nvgpu_info(g, "MSCG is %s.", g->mscg_enabled ? "enabled" :
553 "disabled");
554
555 return count;
556}
557
558static ssize_t mscg_enable_read(struct device *dev,
559 struct device_attribute *attr, char *buf)
560{
561 struct gk20a *g = get_gk20a(dev);
562
563 return snprintf(buf, PAGE_SIZE, "%d\n", g->mscg_enabled ? 1 : 0);
564}
565
566static DEVICE_ATTR(mscg_enable, ROOTRW, mscg_enable_read, mscg_enable_store);
567
568static ssize_t aelpg_param_store(struct device *dev,
569 struct device_attribute *attr, const char *buf, size_t count)
570{
571 struct gk20a *g = get_gk20a(dev);
572 int status = 0;
573 union pmu_ap_cmd ap_cmd;
574 int *paramlist = (int *)g->pmu.aelpg_param;
575 u32 defaultparam[5] = {
576 APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US,
577 APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US,
578 APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US,
579 APCTRL_POWER_BREAKEVEN_DEFAULT_US,
580 APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT
581 };
582
583 /* Get each parameter value from input string*/
584 sscanf(buf, "%d %d %d %d %d", &paramlist[0], &paramlist[1],
585 &paramlist[2], &paramlist[3], &paramlist[4]);
586
587 /* If parameter value is 0 then reset to SW default values*/
588 if ((paramlist[0] | paramlist[1] | paramlist[2]
589 | paramlist[3] | paramlist[4]) == 0x00) {
590 memcpy(paramlist, defaultparam, sizeof(defaultparam));
591 }
592
593 /* If aelpg is enabled & pmu is ready then post values to
594 * PMU else store then post later
595 */
596 if (g->aelpg_enabled && g->pmu.pmu_ready) {
597 /* Disable AELPG */
598 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
599 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
600 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
601
602 /* Enable AELPG */
603 nvgpu_aelpg_init(g);
604 nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
605 }
606
607 return count;
608}
609
610static ssize_t aelpg_param_read(struct device *dev,
611 struct device_attribute *attr, char *buf)
612{
613 struct gk20a *g = get_gk20a(dev);
614
615 return snprintf(buf, PAGE_SIZE,
616 "%d %d %d %d %d\n", g->pmu.aelpg_param[0],
617 g->pmu.aelpg_param[1], g->pmu.aelpg_param[2],
618 g->pmu.aelpg_param[3], g->pmu.aelpg_param[4]);
619}
620
621static DEVICE_ATTR(aelpg_param, ROOTRW,
622 aelpg_param_read, aelpg_param_store);
623
624static ssize_t aelpg_enable_store(struct device *dev,
625 struct device_attribute *attr, const char *buf, size_t count)
626{
627 struct gk20a *g = get_gk20a(dev);
628 unsigned long val = 0;
629 int status = 0;
630 union pmu_ap_cmd ap_cmd;
631 int err;
632
633 if (kstrtoul(buf, 10, &val) < 0)
634 return -EINVAL;
635
636 err = gk20a_busy(g);
637 if (err)
638 return err;
639
640 if (g->pmu.pmu_ready) {
641 if (val && !g->aelpg_enabled) {
642 g->aelpg_enabled = true;
643 /* Enable AELPG */
644 ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL;
645 ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
646 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
647 } else if (!val && g->aelpg_enabled) {
648 g->aelpg_enabled = false;
649 /* Disable AELPG */
650 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
651 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
652 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
653 }
654 } else {
655 nvgpu_info(g, "PMU is not ready, AELPG request failed");
656 }
657 gk20a_idle(g);
658
659 nvgpu_info(g, "AELPG is %s.", g->aelpg_enabled ? "enabled" :
660 "disabled");
661
662 return count;
663}
664
665static ssize_t aelpg_enable_read(struct device *dev,
666 struct device_attribute *attr, char *buf)
667{
668 struct gk20a *g = get_gk20a(dev);
669
670 return snprintf(buf, PAGE_SIZE, "%d\n", g->aelpg_enabled ? 1 : 0);
671}
672
673static DEVICE_ATTR(aelpg_enable, ROOTRW,
674 aelpg_enable_read, aelpg_enable_store);
675
676
677static ssize_t allow_all_enable_read(struct device *dev,
678 struct device_attribute *attr, char *buf)
679{
680 struct gk20a *g = get_gk20a(dev);
681
682 return snprintf(buf, PAGE_SIZE, "%d\n", g->allow_all ? 1 : 0);
683}
684
685static ssize_t allow_all_enable_store(struct device *dev,
686 struct device_attribute *attr, const char *buf, size_t count)
687{
688 struct gk20a *g = get_gk20a(dev);
689 unsigned long val = 0;
690 int err;
691
692 if (kstrtoul(buf, 10, &val) < 0)
693 return -EINVAL;
694
695 err = gk20a_busy(g);
696 g->allow_all = (val ? true : false);
697 gk20a_idle(g);
698
699 return count;
700}
701
702static DEVICE_ATTR(allow_all, ROOTRW,
703 allow_all_enable_read, allow_all_enable_store);
704
705static ssize_t emc3d_ratio_store(struct device *dev,
706 struct device_attribute *attr, const char *buf, size_t count)
707{
708 struct gk20a *g = get_gk20a(dev);
709 unsigned long val = 0;
710
711 if (kstrtoul(buf, 10, &val) < 0)
712 return -EINVAL;
713
714 g->emc3d_ratio = val;
715
716 return count;
717}
718
719static ssize_t emc3d_ratio_read(struct device *dev,
720 struct device_attribute *attr, char *buf)
721{
722 struct gk20a *g = get_gk20a(dev);
723
724 return snprintf(buf, PAGE_SIZE, "%d\n", g->emc3d_ratio);
725}
726
727static DEVICE_ATTR(emc3d_ratio, ROOTRW, emc3d_ratio_read, emc3d_ratio_store);
728
729static ssize_t fmax_at_vmin_safe_read(struct device *dev,
730 struct device_attribute *attr, char *buf)
731{
732 struct gk20a *g = get_gk20a(dev);
733 unsigned long gpu_fmax_at_vmin_hz = 0;
734 struct clk *clk = g->clk.tegra_clk;
735
736 gpu_fmax_at_vmin_hz = tegra_dvfs_get_fmax_at_vmin_safe_t(clk);
737
738 return snprintf(buf, PAGE_SIZE, "%d\n", (int)(gpu_fmax_at_vmin_hz));
739}
740
741static DEVICE_ATTR(fmax_at_vmin_safe, S_IRUGO, fmax_at_vmin_safe_read, NULL);
742
743#ifdef CONFIG_PM
744static ssize_t force_idle_store(struct device *dev,
745 struct device_attribute *attr, const char *buf, size_t count)
746{
747 struct gk20a *g = get_gk20a(dev);
748 unsigned long val = 0;
749 int err = 0;
750
751 if (kstrtoul(buf, 10, &val) < 0)
752 return -EINVAL;
753
754 if (val) {
755 if (g->forced_idle)
756 return count; /* do nothing */
757 else {
758 err = __gk20a_do_idle(g, false);
759 if (!err) {
760 g->forced_idle = 1;
761 nvgpu_info(g, "gpu is idle : %d",
762 g->forced_idle);
763 }
764 }
765 } else {
766 if (!g->forced_idle)
767 return count; /* do nothing */
768 else {
769 err = __gk20a_do_unidle(g);
770 if (!err) {
771 g->forced_idle = 0;
772 nvgpu_info(g, "gpu is idle : %d",
773 g->forced_idle);
774 }
775 }
776 }
777
778 return count;
779}
780
781static ssize_t force_idle_read(struct device *dev,
782 struct device_attribute *attr, char *buf)
783{
784 struct gk20a *g = get_gk20a(dev);
785
786 return snprintf(buf, PAGE_SIZE, "%d\n", g->forced_idle ? 1 : 0);
787}
788
789static DEVICE_ATTR(force_idle, ROOTRW, force_idle_read, force_idle_store);
790#endif
791
792static ssize_t tpc_fs_mask_store(struct device *dev,
793 struct device_attribute *attr, const char *buf, size_t count)
794{
795 struct gk20a *g = get_gk20a(dev);
796 unsigned long val = 0;
797
798 if (kstrtoul(buf, 10, &val) < 0)
799 return -EINVAL;
800
801 if (!g->gr.gpc_tpc_mask)
802 return -ENODEV;
803
804 if (val && val != g->gr.gpc_tpc_mask[0] && g->ops.gr.set_gpc_tpc_mask) {
805 g->gr.gpc_tpc_mask[0] = val;
806 g->tpc_fs_mask_user = val;
807
808 g->ops.gr.set_gpc_tpc_mask(g, 0);
809
810 nvgpu_vfree(g, g->gr.ctx_vars.local_golden_image);
811 g->gr.ctx_vars.local_golden_image = NULL;
812 g->gr.ctx_vars.golden_image_initialized = false;
813 g->gr.ctx_vars.golden_image_size = 0;
814 g->gr.sw_ready = false;
815 }
816
817 return count;
818}
819
820static ssize_t tpc_fs_mask_read(struct device *dev,
821 struct device_attribute *attr, char *buf)
822{
823 struct gk20a *g = get_gk20a(dev);
824 struct gr_gk20a *gr = &g->gr;
825 u32 gpc_index;
826 u32 tpc_fs_mask = 0;
827 int err = 0;
828
829 err = gk20a_busy(g);
830 if (err)
831 return err;
832
833 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
834 if (g->ops.gr.get_gpc_tpc_mask)
835 tpc_fs_mask |=
836 g->ops.gr.get_gpc_tpc_mask(g, gpc_index) <<
837 (gr->max_tpc_per_gpc_count * gpc_index);
838 }
839
840 gk20a_idle(g);
841
842 return snprintf(buf, PAGE_SIZE, "0x%x\n", tpc_fs_mask);
843}
844
845static DEVICE_ATTR(tpc_fs_mask, ROOTRW, tpc_fs_mask_read, tpc_fs_mask_store);
846
847static ssize_t min_timeslice_us_read(struct device *dev,
848 struct device_attribute *attr, char *buf)
849{
850 struct gk20a *g = get_gk20a(dev);
851
852 return snprintf(buf, PAGE_SIZE, "%u\n", g->min_timeslice_us);
853}
854
855static ssize_t min_timeslice_us_store(struct device *dev,
856 struct device_attribute *attr, const char *buf, size_t count)
857{
858 struct gk20a *g = get_gk20a(dev);
859 unsigned long val;
860
861 if (kstrtoul(buf, 10, &val) < 0)
862 return -EINVAL;
863
864 if (val > g->max_timeslice_us)
865 return -EINVAL;
866
867 g->min_timeslice_us = val;
868
869 return count;
870}
871
872static DEVICE_ATTR(min_timeslice_us, ROOTRW, min_timeslice_us_read,
873 min_timeslice_us_store);
874
875static ssize_t max_timeslice_us_read(struct device *dev,
876 struct device_attribute *attr, char *buf)
877{
878 struct gk20a *g = get_gk20a(dev);
879
880 return snprintf(buf, PAGE_SIZE, "%u\n", g->max_timeslice_us);
881}
882
883static ssize_t max_timeslice_us_store(struct device *dev,
884 struct device_attribute *attr, const char *buf, size_t count)
885{
886 struct gk20a *g = get_gk20a(dev);
887 unsigned long val;
888
889 if (kstrtoul(buf, 10, &val) < 0)
890 return -EINVAL;
891
892 if (val < g->min_timeslice_us)
893 return -EINVAL;
894
895 g->max_timeslice_us = val;
896
897 return count;
898}
899
900static DEVICE_ATTR(max_timeslice_us, ROOTRW, max_timeslice_us_read,
901 max_timeslice_us_store);
902
903static ssize_t czf_bypass_store(struct device *dev,
904 struct device_attribute *attr, const char *buf, size_t count)
905{
906 struct gk20a *g = get_gk20a(dev);
907 unsigned long val;
908
909 if (kstrtoul(buf, 10, &val) < 0)
910 return -EINVAL;
911
912 if (val >= 4)
913 return -EINVAL;
914
915 g->gr.czf_bypass = val;
916
917 return count;
918}
919
920static ssize_t czf_bypass_read(struct device *dev,
921 struct device_attribute *attr, char *buf)
922{
923 struct gk20a *g = get_gk20a(dev);
924
925 return sprintf(buf, "%d\n", g->gr.czf_bypass);
926}
927
928static DEVICE_ATTR(czf_bypass, ROOTRW, czf_bypass_read, czf_bypass_store);
929
930static ssize_t pd_max_batches_store(struct device *dev,
931 struct device_attribute *attr, const char *buf, size_t count)
932{
933 struct gk20a *g = get_gk20a(dev);
934 unsigned long val;
935
936 if (kstrtoul(buf, 10, &val) < 0)
937 return -EINVAL;
938
939 if (val > 64)
940 return -EINVAL;
941
942 g->gr.pd_max_batches = val;
943
944 return count;
945}
946
947static ssize_t pd_max_batches_read(struct device *dev,
948 struct device_attribute *attr, char *buf)
949{
950 struct gk20a *g = get_gk20a(dev);
951
952 return sprintf(buf, "%d\n", g->gr.pd_max_batches);
953}
954
955static DEVICE_ATTR(pd_max_batches, ROOTRW, pd_max_batches_read, pd_max_batches_store);
956
957static ssize_t gfxp_wfi_timeout_count_store(struct device *dev,
958 struct device_attribute *attr, const char *buf, size_t count)
959{
960 struct gk20a *g = get_gk20a(dev);
961 struct gr_gk20a *gr = &g->gr;
962 unsigned long val = 0;
963 int err = -1;
964
965 if (kstrtoul(buf, 10, &val) < 0)
966 return -EINVAL;
967
968 if (val >= 100*1000*1000) /* 100ms @ 1Ghz */
969 return -EINVAL;
970
971 gr->gfxp_wfi_timeout_count = val;
972
973 if (g->ops.gr.init_preemption_state && g->power_on) {
974 err = gk20a_busy(g);
975 if (err)
976 return err;
977
978 err = gr_gk20a_elpg_protected_call(g,
979 g->ops.gr.init_preemption_state(g));
980
981 gk20a_idle(g);
982
983 if (err)
984 return err;
985 }
986
987 return count;
988}
989
990static ssize_t gfxp_wfi_timeout_count_read(struct device *dev,
991 struct device_attribute *attr, char *buf)
992{
993 struct gk20a *g = get_gk20a(dev);
994 struct gr_gk20a *gr = &g->gr;
995 u32 val = gr->gfxp_wfi_timeout_count;
996
997 return snprintf(buf, PAGE_SIZE, "%d\n", val);
998}
999
1000static DEVICE_ATTR(gfxp_wfi_timeout_count, (S_IRWXU|S_IRGRP|S_IROTH),
1001 gfxp_wfi_timeout_count_read, gfxp_wfi_timeout_count_store);
1002
1003
1004void nvgpu_remove_sysfs(struct device *dev)
1005{
1006 device_remove_file(dev, &dev_attr_elcg_enable);
1007 device_remove_file(dev, &dev_attr_blcg_enable);
1008 device_remove_file(dev, &dev_attr_slcg_enable);
1009 device_remove_file(dev, &dev_attr_ptimer_scale_factor);
1010 device_remove_file(dev, &dev_attr_ptimer_ref_freq);
1011 device_remove_file(dev, &dev_attr_ptimer_src_freq);
1012 device_remove_file(dev, &dev_attr_elpg_enable);
1013 device_remove_file(dev, &dev_attr_mscg_enable);
1014 device_remove_file(dev, &dev_attr_emc3d_ratio);
1015 device_remove_file(dev, &dev_attr_fmax_at_vmin_safe);
1016 device_remove_file(dev, &dev_attr_counters);
1017 device_remove_file(dev, &dev_attr_counters_reset);
1018 device_remove_file(dev, &dev_attr_load);
1019 device_remove_file(dev, &dev_attr_railgate_delay);
1020 device_remove_file(dev, &dev_attr_is_railgated);
1021#ifdef CONFIG_PM
1022 device_remove_file(dev, &dev_attr_force_idle);
1023 device_remove_file(dev, &dev_attr_railgate_enable);
1024#endif
1025 device_remove_file(dev, &dev_attr_aelpg_param);
1026 device_remove_file(dev, &dev_attr_aelpg_enable);
1027 device_remove_file(dev, &dev_attr_allow_all);
1028 device_remove_file(dev, &dev_attr_tpc_fs_mask);
1029 device_remove_file(dev, &dev_attr_min_timeslice_us);
1030 device_remove_file(dev, &dev_attr_max_timeslice_us);
1031
1032#ifdef CONFIG_TEGRA_GK20A_NVHOST
1033 nvgpu_nvhost_remove_symlink(get_gk20a(dev));
1034#endif
1035
1036 device_remove_file(dev, &dev_attr_czf_bypass);
1037 device_remove_file(dev, &dev_attr_pd_max_batches);
1038 device_remove_file(dev, &dev_attr_gfxp_wfi_timeout_count);
1039
1040 if (strcmp(dev_name(dev), "gpu.0")) {
1041 struct kobject *kobj = &dev->kobj;
1042 struct device *parent = container_of((kobj->parent),
1043 struct device, kobj);
1044 sysfs_remove_link(&parent->kobj, "gpu.0");
1045 }
1046}
1047
1048int nvgpu_create_sysfs(struct device *dev)
1049{
1050 struct gk20a *g = get_gk20a(dev);
1051 int error = 0;
1052
1053 error |= device_create_file(dev, &dev_attr_elcg_enable);
1054 error |= device_create_file(dev, &dev_attr_blcg_enable);
1055 error |= device_create_file(dev, &dev_attr_slcg_enable);
1056 error |= device_create_file(dev, &dev_attr_ptimer_scale_factor);
1057 error |= device_create_file(dev, &dev_attr_ptimer_ref_freq);
1058 error |= device_create_file(dev, &dev_attr_ptimer_src_freq);
1059 error |= device_create_file(dev, &dev_attr_elpg_enable);
1060 error |= device_create_file(dev, &dev_attr_mscg_enable);
1061 error |= device_create_file(dev, &dev_attr_emc3d_ratio);
1062 error |= device_create_file(dev, &dev_attr_fmax_at_vmin_safe);
1063 error |= device_create_file(dev, &dev_attr_counters);
1064 error |= device_create_file(dev, &dev_attr_counters_reset);
1065 error |= device_create_file(dev, &dev_attr_load);
1066 error |= device_create_file(dev, &dev_attr_railgate_delay);
1067 error |= device_create_file(dev, &dev_attr_is_railgated);
1068#ifdef CONFIG_PM
1069 error |= device_create_file(dev, &dev_attr_force_idle);
1070 error |= device_create_file(dev, &dev_attr_railgate_enable);
1071#endif
1072 error |= device_create_file(dev, &dev_attr_aelpg_param);
1073 error |= device_create_file(dev, &dev_attr_aelpg_enable);
1074 error |= device_create_file(dev, &dev_attr_allow_all);
1075 error |= device_create_file(dev, &dev_attr_tpc_fs_mask);
1076 error |= device_create_file(dev, &dev_attr_min_timeslice_us);
1077 error |= device_create_file(dev, &dev_attr_max_timeslice_us);
1078
1079#ifdef CONFIG_TEGRA_GK20A_NVHOST
1080 error |= nvgpu_nvhost_create_symlink(g);
1081#endif
1082
1083 error |= device_create_file(dev, &dev_attr_czf_bypass);
1084 error |= device_create_file(dev, &dev_attr_pd_max_batches);
1085 error |= device_create_file(dev, &dev_attr_gfxp_wfi_timeout_count);
1086
1087 if (strcmp(dev_name(dev), "gpu.0")) {
1088 struct kobject *kobj = &dev->kobj;
1089 struct device *parent = container_of((kobj->parent),
1090 struct device, kobj);
1091 error |= sysfs_create_link(&parent->kobj,
1092 &dev->kobj, "gpu.0");
1093 }
1094
1095 if (error)
1096 nvgpu_err(g, "Failed to create sysfs attributes!\n");
1097
1098 return error;
1099}