summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/sysfs.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-04-18 15:59:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-15 20:47:31 -0400
commit2a2c16af5f9f1ccfc93a13e820d5381e5c881e92 (patch)
tree2e5d7b042270a649978e5bb540857012c85fb5b5 /drivers/gpu/nvgpu/common/linux/sysfs.c
parent98d996f4ffb0137d119b5849cae46d7b7e5693e1 (diff)
gpu: nvgpu: Move Linux files away from common
Move all Linux source code files to drivers/gpu/nvgpu/os/linux from drivers/gpu/nvgpu/common/linux. This changes the meaning of common to be OS independent. JIRA NVGPU-598 JIRA NVGPU-601 Change-Id: Ib7f2a43d3688bb0d0b7dcc48469a6783fd988ce9 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1747714 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/sysfs.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/sysfs.c1205
1 files changed, 0 insertions, 1205 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/sysfs.c b/drivers/gpu/nvgpu/common/linux/sysfs.c
deleted file mode 100644
index e5995bb8..00000000
--- a/drivers/gpu/nvgpu/common/linux/sysfs.c
+++ /dev/null
@@ -1,1205 +0,0 @@
1/*
2 * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/device.h>
18#include <linux/pm_runtime.h>
19#include <linux/fb.h>
20
21#include <nvgpu/kmem.h>
22#include <nvgpu/nvhost.h>
23
24#include "sysfs.h"
25#include "platform_gk20a.h"
26#include "gk20a/pmu_gk20a.h"
27#include "gk20a/gr_gk20a.h"
28#include "gv11b/gr_gv11b.h"
29
30#define PTIMER_FP_FACTOR 1000000
31
32#define ROOTRW (S_IRWXU|S_IRGRP|S_IROTH)
33
34static ssize_t elcg_enable_store(struct device *dev,
35 struct device_attribute *attr, const char *buf, size_t count)
36{
37 struct gk20a *g = get_gk20a(dev);
38 unsigned long val = 0;
39 int err;
40
41 if (kstrtoul(buf, 10, &val) < 0)
42 return -EINVAL;
43
44 err = gk20a_busy(g);
45 if (err)
46 return err;
47
48 if (val) {
49 g->elcg_enabled = true;
50 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_AUTO);
51 } else {
52 g->elcg_enabled = false;
53 gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN);
54 }
55
56 gk20a_idle(g);
57
58 nvgpu_info(g, "ELCG is %s.", g->elcg_enabled ? "enabled" :
59 "disabled");
60
61 return count;
62}
63
64static ssize_t elcg_enable_read(struct device *dev,
65 struct device_attribute *attr, char *buf)
66{
67 struct gk20a *g = get_gk20a(dev);
68
69 return snprintf(buf, PAGE_SIZE, "%d\n", g->elcg_enabled ? 1 : 0);
70}
71
72static DEVICE_ATTR(elcg_enable, ROOTRW, elcg_enable_read, elcg_enable_store);
73
74static ssize_t blcg_enable_store(struct device *dev,
75 struct device_attribute *attr, const char *buf, size_t count)
76{
77 struct gk20a *g = get_gk20a(dev);
78 unsigned long val = 0;
79 int err;
80
81 if (kstrtoul(buf, 10, &val) < 0)
82 return -EINVAL;
83
84 if (val)
85 g->blcg_enabled = true;
86 else
87 g->blcg_enabled = false;
88
89 err = gk20a_busy(g);
90 if (err)
91 return err;
92
93 if (g->ops.clock_gating.blcg_bus_load_gating_prod)
94 g->ops.clock_gating.blcg_bus_load_gating_prod(g,
95 g->blcg_enabled);
96 if (g->ops.clock_gating.blcg_ce_load_gating_prod)
97 g->ops.clock_gating.blcg_ce_load_gating_prod(g,
98 g->blcg_enabled);
99 if (g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod)
100 g->ops.clock_gating.blcg_ctxsw_firmware_load_gating_prod(g,
101 g->blcg_enabled);
102 if (g->ops.clock_gating.blcg_fb_load_gating_prod)
103 g->ops.clock_gating.blcg_fb_load_gating_prod(g,
104 g->blcg_enabled);
105 if (g->ops.clock_gating.blcg_fifo_load_gating_prod)
106 g->ops.clock_gating.blcg_fifo_load_gating_prod(g,
107 g->blcg_enabled);
108 if (g->ops.clock_gating.blcg_gr_load_gating_prod)
109 g->ops.clock_gating.blcg_gr_load_gating_prod(g,
110 g->blcg_enabled);
111 if (g->ops.clock_gating.blcg_ltc_load_gating_prod)
112 g->ops.clock_gating.blcg_ltc_load_gating_prod(g,
113 g->blcg_enabled);
114 if (g->ops.clock_gating.blcg_pmu_load_gating_prod)
115 g->ops.clock_gating.blcg_pmu_load_gating_prod(g,
116 g->blcg_enabled);
117 if (g->ops.clock_gating.blcg_xbar_load_gating_prod)
118 g->ops.clock_gating.blcg_xbar_load_gating_prod(g,
119 g->blcg_enabled);
120 gk20a_idle(g);
121
122 nvgpu_info(g, "BLCG is %s.", g->blcg_enabled ? "enabled" :
123 "disabled");
124
125 return count;
126}
127
128static ssize_t blcg_enable_read(struct device *dev,
129 struct device_attribute *attr, char *buf)
130{
131 struct gk20a *g = get_gk20a(dev);
132
133 return snprintf(buf, PAGE_SIZE, "%d\n", g->blcg_enabled ? 1 : 0);
134}
135
136
137static DEVICE_ATTR(blcg_enable, ROOTRW, blcg_enable_read, blcg_enable_store);
138
139static ssize_t slcg_enable_store(struct device *dev,
140 struct device_attribute *attr, const char *buf, size_t count)
141{
142 struct gk20a *g = get_gk20a(dev);
143 unsigned long val = 0;
144 int err;
145
146 if (kstrtoul(buf, 10, &val) < 0)
147 return -EINVAL;
148
149 if (val)
150 g->slcg_enabled = true;
151 else
152 g->slcg_enabled = false;
153
154 /*
155 * TODO: slcg_therm_load_gating is not enabled anywhere during
156 * init. Therefore, it would be incongruous to add it here. Once
157 * it is added to init, we should add it here too.
158 */
159 err = gk20a_busy(g);
160 if (err)
161 return err;
162
163 if (g->ops.clock_gating.slcg_bus_load_gating_prod)
164 g->ops.clock_gating.slcg_bus_load_gating_prod(g,
165 g->slcg_enabled);
166 if (g->ops.clock_gating.slcg_ce2_load_gating_prod)
167 g->ops.clock_gating.slcg_ce2_load_gating_prod(g,
168 g->slcg_enabled);
169 if (g->ops.clock_gating.slcg_chiplet_load_gating_prod)
170 g->ops.clock_gating.slcg_chiplet_load_gating_prod(g,
171 g->slcg_enabled);
172 if (g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod)
173 g->ops.clock_gating.slcg_ctxsw_firmware_load_gating_prod(g,
174 g->slcg_enabled);
175 if (g->ops.clock_gating.slcg_fb_load_gating_prod)
176 g->ops.clock_gating.slcg_fb_load_gating_prod(g,
177 g->slcg_enabled);
178 if (g->ops.clock_gating.slcg_fifo_load_gating_prod)
179 g->ops.clock_gating.slcg_fifo_load_gating_prod(g,
180 g->slcg_enabled);
181 if (g->ops.clock_gating.slcg_gr_load_gating_prod)
182 g->ops.clock_gating.slcg_gr_load_gating_prod(g,
183 g->slcg_enabled);
184 if (g->ops.clock_gating.slcg_ltc_load_gating_prod)
185 g->ops.clock_gating.slcg_ltc_load_gating_prod(g,
186 g->slcg_enabled);
187 if (g->ops.clock_gating.slcg_perf_load_gating_prod)
188 g->ops.clock_gating.slcg_perf_load_gating_prod(g,
189 g->slcg_enabled);
190 if (g->ops.clock_gating.slcg_priring_load_gating_prod)
191 g->ops.clock_gating.slcg_priring_load_gating_prod(g,
192 g->slcg_enabled);
193 if (g->ops.clock_gating.slcg_pmu_load_gating_prod)
194 g->ops.clock_gating.slcg_pmu_load_gating_prod(g,
195 g->slcg_enabled);
196 if (g->ops.clock_gating.slcg_xbar_load_gating_prod)
197 g->ops.clock_gating.slcg_xbar_load_gating_prod(g,
198 g->slcg_enabled);
199 gk20a_idle(g);
200
201 nvgpu_info(g, "SLCG is %s.", g->slcg_enabled ? "enabled" :
202 "disabled");
203
204 return count;
205}
206
207static ssize_t slcg_enable_read(struct device *dev,
208 struct device_attribute *attr, char *buf)
209{
210 struct gk20a *g = get_gk20a(dev);
211
212 return snprintf(buf, PAGE_SIZE, "%d\n", g->slcg_enabled ? 1 : 0);
213}
214
215static DEVICE_ATTR(slcg_enable, ROOTRW, slcg_enable_read, slcg_enable_store);
216
217static ssize_t ptimer_scale_factor_show(struct device *dev,
218 struct device_attribute *attr,
219 char *buf)
220{
221 struct gk20a *g = get_gk20a(dev);
222 struct gk20a_platform *platform = dev_get_drvdata(dev);
223 u32 src_freq_hz = platform->ptimer_src_freq;
224 u32 scaling_factor_fp;
225 ssize_t res;
226
227 if (!src_freq_hz) {
228 nvgpu_err(g, "reference clk_m rate is not set correctly");
229 return -EINVAL;
230 }
231
232 scaling_factor_fp = (u32)(PTIMER_REF_FREQ_HZ) /
233 ((u32)(src_freq_hz) /
234 (u32)(PTIMER_FP_FACTOR));
235 res = snprintf(buf,
236 PAGE_SIZE,
237 "%u.%u\n",
238 scaling_factor_fp / PTIMER_FP_FACTOR,
239 scaling_factor_fp % PTIMER_FP_FACTOR);
240
241 return res;
242
243}
244
245static DEVICE_ATTR(ptimer_scale_factor,
246 S_IRUGO,
247 ptimer_scale_factor_show,
248 NULL);
249
250static ssize_t ptimer_ref_freq_show(struct device *dev,
251 struct device_attribute *attr,
252 char *buf)
253{
254 struct gk20a *g = get_gk20a(dev);
255 struct gk20a_platform *platform = dev_get_drvdata(dev);
256 u32 src_freq_hz = platform->ptimer_src_freq;
257 ssize_t res;
258
259 if (!src_freq_hz) {
260 nvgpu_err(g, "reference clk_m rate is not set correctly");
261 return -EINVAL;
262 }
263
264 res = snprintf(buf, PAGE_SIZE, "%u\n", PTIMER_REF_FREQ_HZ);
265
266 return res;
267
268}
269
270static DEVICE_ATTR(ptimer_ref_freq,
271 S_IRUGO,
272 ptimer_ref_freq_show,
273 NULL);
274
275static ssize_t ptimer_src_freq_show(struct device *dev,
276 struct device_attribute *attr,
277 char *buf)
278{
279 struct gk20a *g = get_gk20a(dev);
280 struct gk20a_platform *platform = dev_get_drvdata(dev);
281 u32 src_freq_hz = platform->ptimer_src_freq;
282 ssize_t res;
283
284 if (!src_freq_hz) {
285 nvgpu_err(g, "reference clk_m rate is not set correctly");
286 return -EINVAL;
287 }
288
289 res = snprintf(buf, PAGE_SIZE, "%u\n", src_freq_hz);
290
291 return res;
292
293}
294
295static DEVICE_ATTR(ptimer_src_freq,
296 S_IRUGO,
297 ptimer_src_freq_show,
298 NULL);
299
300
301#if defined(CONFIG_PM)
302static ssize_t railgate_enable_store(struct device *dev,
303 struct device_attribute *attr, const char *buf, size_t count)
304{
305 unsigned long railgate_enable = 0;
306 /* dev is guaranteed to be valid here. Ok to de-reference */
307 struct gk20a *g = get_gk20a(dev);
308 int err;
309
310 if (kstrtoul(buf, 10, &railgate_enable) < 0)
311 return -EINVAL;
312
313 if (railgate_enable && !g->can_railgate) {
314 g->can_railgate = true;
315 pm_runtime_set_autosuspend_delay(dev, g->railgate_delay);
316 } else if (railgate_enable == 0 && g->can_railgate) {
317 g->can_railgate = false;
318 pm_runtime_set_autosuspend_delay(dev, -1);
319 }
320 /* wake-up system to make rail-gating setting effective */
321 err = gk20a_busy(g);
322 if (err)
323 return err;
324 gk20a_idle(g);
325
326 nvgpu_info(g, "railgate is %s.", g->can_railgate ?
327 "enabled" : "disabled");
328
329 return count;
330}
331
332static ssize_t railgate_enable_read(struct device *dev,
333 struct device_attribute *attr, char *buf)
334{
335 struct gk20a *g = get_gk20a(dev);
336
337 return snprintf(buf, PAGE_SIZE, "%d\n", g->can_railgate ? 1 : 0);
338}
339
340static DEVICE_ATTR(railgate_enable, ROOTRW, railgate_enable_read,
341 railgate_enable_store);
342#endif
343
344static ssize_t railgate_delay_store(struct device *dev,
345 struct device_attribute *attr,
346 const char *buf, size_t count)
347{
348 int railgate_delay = 0, ret = 0;
349 struct gk20a *g = get_gk20a(dev);
350 int err;
351
352 if (!g->can_railgate) {
353 nvgpu_info(g, "does not support power-gating");
354 return count;
355 }
356
357 ret = sscanf(buf, "%d", &railgate_delay);
358 if (ret == 1 && railgate_delay >= 0) {
359 g->railgate_delay = railgate_delay;
360 pm_runtime_set_autosuspend_delay(dev, g->railgate_delay);
361 } else
362 nvgpu_err(g, "Invalid powergate delay");
363
364 /* wake-up system to make rail-gating delay effective immediately */
365 err = gk20a_busy(g);
366 if (err)
367 return err;
368 gk20a_idle(g);
369
370 return count;
371}
372static ssize_t railgate_delay_show(struct device *dev,
373 struct device_attribute *attr, char *buf)
374{
375 struct gk20a *g = get_gk20a(dev);
376
377 return snprintf(buf, PAGE_SIZE, "%d\n", g->railgate_delay);
378}
379static DEVICE_ATTR(railgate_delay, ROOTRW, railgate_delay_show,
380 railgate_delay_store);
381
382static ssize_t is_railgated_show(struct device *dev,
383 struct device_attribute *attr, char *buf)
384{
385 struct gk20a_platform *platform = dev_get_drvdata(dev);
386 bool is_railgated = 0;
387
388 if (platform->is_railgated)
389 is_railgated = platform->is_railgated(dev);
390
391 return snprintf(buf, PAGE_SIZE, "%s\n", is_railgated ? "yes" : "no");
392}
393static DEVICE_ATTR(is_railgated, S_IRUGO, is_railgated_show, NULL);
394
395static ssize_t counters_show(struct device *dev,
396 struct device_attribute *attr, char *buf)
397{
398 struct gk20a *g = get_gk20a(dev);
399 u32 busy_cycles, total_cycles;
400 ssize_t res;
401
402 nvgpu_pmu_get_load_counters(g, &busy_cycles, &total_cycles);
403
404 res = snprintf(buf, PAGE_SIZE, "%u %u\n", busy_cycles, total_cycles);
405
406 return res;
407}
408static DEVICE_ATTR(counters, S_IRUGO, counters_show, NULL);
409
410static ssize_t counters_show_reset(struct device *dev,
411 struct device_attribute *attr, char *buf)
412{
413 ssize_t res = counters_show(dev, attr, buf);
414 struct gk20a *g = get_gk20a(dev);
415
416 nvgpu_pmu_reset_load_counters(g);
417
418 return res;
419}
420static DEVICE_ATTR(counters_reset, S_IRUGO, counters_show_reset, NULL);
421
422static ssize_t gk20a_load_show(struct device *dev,
423 struct device_attribute *attr,
424 char *buf)
425{
426 struct gk20a *g = get_gk20a(dev);
427 u32 busy_time;
428 ssize_t res;
429 int err;
430
431 if (!g->power_on) {
432 busy_time = 0;
433 } else {
434 err = gk20a_busy(g);
435 if (err)
436 return err;
437
438 nvgpu_pmu_load_update(g);
439 nvgpu_pmu_load_norm(g, &busy_time);
440 gk20a_idle(g);
441 }
442
443 res = snprintf(buf, PAGE_SIZE, "%u\n", busy_time);
444
445 return res;
446}
447static DEVICE_ATTR(load, S_IRUGO, gk20a_load_show, NULL);
448
449static ssize_t elpg_enable_store(struct device *dev,
450 struct device_attribute *attr, const char *buf, size_t count)
451{
452 struct gk20a *g = get_gk20a(dev);
453 unsigned long val = 0;
454 int err;
455
456 if (kstrtoul(buf, 10, &val) < 0)
457 return -EINVAL;
458
459 if (!g->power_on) {
460 g->elpg_enabled = val ? true : false;
461 } else {
462 err = gk20a_busy(g);
463 if (err)
464 return -EAGAIN;
465 /*
466 * Since elpg is refcounted, we should not unnecessarily call
467 * enable/disable if it is already so.
468 */
469 if (val && !g->elpg_enabled) {
470 g->elpg_enabled = true;
471 nvgpu_pmu_pg_global_enable(g, true);
472
473 } else if (!val && g->elpg_enabled) {
474 if (g->ops.pmu.pmu_pg_engines_feature_list &&
475 g->ops.pmu.pmu_pg_engines_feature_list(g,
476 PMU_PG_ELPG_ENGINE_ID_GRAPHICS) !=
477 NVGPU_PMU_GR_FEATURE_MASK_POWER_GATING) {
478 nvgpu_pmu_pg_global_enable(g, false);
479 g->elpg_enabled = false;
480 } else {
481 g->elpg_enabled = false;
482 nvgpu_pmu_pg_global_enable(g, false);
483 }
484 }
485 gk20a_idle(g);
486 }
487 nvgpu_info(g, "ELPG is %s.", g->elpg_enabled ? "enabled" :
488 "disabled");
489
490 return count;
491}
492
493static ssize_t elpg_enable_read(struct device *dev,
494 struct device_attribute *attr, char *buf)
495{
496 struct gk20a *g = get_gk20a(dev);
497
498 return snprintf(buf, PAGE_SIZE, "%d\n", g->elpg_enabled ? 1 : 0);
499}
500
501static DEVICE_ATTR(elpg_enable, ROOTRW, elpg_enable_read, elpg_enable_store);
502
503static ssize_t ldiv_slowdown_factor_store(struct device *dev,
504 struct device_attribute *attr, const char *buf, size_t count)
505{
506 struct gk20a *g = get_gk20a(dev);
507 unsigned long val = 0;
508 int err;
509
510 if (kstrtoul(buf, 10, &val) < 0) {
511 nvgpu_err(g, "parse error for input SLOWDOWN factor\n");
512 return -EINVAL;
513 }
514
515 if (val >= SLOWDOWN_FACTOR_FPDIV_BYMAX) {
516 nvgpu_err(g, "Invalid SLOWDOWN factor\n");
517 return -EINVAL;
518 }
519
520 if (val == g->ldiv_slowdown_factor)
521 return count;
522
523 if (!g->power_on) {
524 g->ldiv_slowdown_factor = val;
525 } else {
526 err = gk20a_busy(g);
527 if (err)
528 return -EAGAIN;
529
530 g->ldiv_slowdown_factor = val;
531
532 if (g->ops.pmu.pmu_pg_init_param)
533 g->ops.pmu.pmu_pg_init_param(g,
534 PMU_PG_ELPG_ENGINE_ID_GRAPHICS);
535
536 gk20a_idle(g);
537 }
538
539 nvgpu_info(g, "ldiv_slowdown_factor is %x\n", g->ldiv_slowdown_factor);
540
541 return count;
542}
543
544static ssize_t ldiv_slowdown_factor_read(struct device *dev,
545 struct device_attribute *attr, char *buf)
546{
547 struct gk20a *g = get_gk20a(dev);
548
549 return snprintf(buf, PAGE_SIZE, "%d\n", g->ldiv_slowdown_factor);
550}
551
552static DEVICE_ATTR(ldiv_slowdown_factor, ROOTRW,
553 ldiv_slowdown_factor_read, ldiv_slowdown_factor_store);
554
555static ssize_t mscg_enable_store(struct device *dev,
556 struct device_attribute *attr, const char *buf, size_t count)
557{
558 struct gk20a *g = get_gk20a(dev);
559 struct nvgpu_pmu *pmu = &g->pmu;
560 unsigned long val = 0;
561 int err;
562
563 if (kstrtoul(buf, 10, &val) < 0)
564 return -EINVAL;
565
566 if (!g->power_on) {
567 g->mscg_enabled = val ? true : false;
568 } else {
569 err = gk20a_busy(g);
570 if (err)
571 return -EAGAIN;
572 /*
573 * Since elpg is refcounted, we should not unnecessarily call
574 * enable/disable if it is already so.
575 */
576 if (val && !g->mscg_enabled) {
577 g->mscg_enabled = true;
578 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
579 PMU_PG_LPWR_FEATURE_MSCG)) {
580 if (!ACCESS_ONCE(pmu->mscg_stat)) {
581 WRITE_ONCE(pmu->mscg_stat,
582 PMU_MSCG_ENABLED);
583 /* make status visible */
584 smp_mb();
585 }
586 }
587
588 } else if (!val && g->mscg_enabled) {
589 if (g->ops.pmu.pmu_is_lpwr_feature_supported(g,
590 PMU_PG_LPWR_FEATURE_MSCG)) {
591 nvgpu_pmu_pg_global_enable(g, false);
592 WRITE_ONCE(pmu->mscg_stat, PMU_MSCG_DISABLED);
593 /* make status visible */
594 smp_mb();
595 g->mscg_enabled = false;
596 if (g->elpg_enabled)
597 nvgpu_pmu_pg_global_enable(g, true);
598 }
599 g->mscg_enabled = false;
600 }
601 gk20a_idle(g);
602 }
603 nvgpu_info(g, "MSCG is %s.", g->mscg_enabled ? "enabled" :
604 "disabled");
605
606 return count;
607}
608
609static ssize_t mscg_enable_read(struct device *dev,
610 struct device_attribute *attr, char *buf)
611{
612 struct gk20a *g = get_gk20a(dev);
613
614 return snprintf(buf, PAGE_SIZE, "%d\n", g->mscg_enabled ? 1 : 0);
615}
616
617static DEVICE_ATTR(mscg_enable, ROOTRW, mscg_enable_read, mscg_enable_store);
618
619static ssize_t aelpg_param_store(struct device *dev,
620 struct device_attribute *attr, const char *buf, size_t count)
621{
622 struct gk20a *g = get_gk20a(dev);
623 int status = 0;
624 union pmu_ap_cmd ap_cmd;
625 int *paramlist = (int *)g->pmu.aelpg_param;
626 u32 defaultparam[5] = {
627 APCTRL_SAMPLING_PERIOD_PG_DEFAULT_US,
628 APCTRL_MINIMUM_IDLE_FILTER_DEFAULT_US,
629 APCTRL_MINIMUM_TARGET_SAVING_DEFAULT_US,
630 APCTRL_POWER_BREAKEVEN_DEFAULT_US,
631 APCTRL_CYCLES_PER_SAMPLE_MAX_DEFAULT
632 };
633
634 /* Get each parameter value from input string*/
635 sscanf(buf, "%d %d %d %d %d", &paramlist[0], &paramlist[1],
636 &paramlist[2], &paramlist[3], &paramlist[4]);
637
638 /* If parameter value is 0 then reset to SW default values*/
639 if ((paramlist[0] | paramlist[1] | paramlist[2]
640 | paramlist[3] | paramlist[4]) == 0x00) {
641 memcpy(paramlist, defaultparam, sizeof(defaultparam));
642 }
643
644 /* If aelpg is enabled & pmu is ready then post values to
645 * PMU else store then post later
646 */
647 if (g->aelpg_enabled && g->pmu.pmu_ready) {
648 /* Disable AELPG */
649 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
650 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
651 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
652
653 /* Enable AELPG */
654 nvgpu_aelpg_init(g);
655 nvgpu_aelpg_init_and_enable(g, PMU_AP_CTRL_ID_GRAPHICS);
656 }
657
658 return count;
659}
660
661static ssize_t aelpg_param_read(struct device *dev,
662 struct device_attribute *attr, char *buf)
663{
664 struct gk20a *g = get_gk20a(dev);
665
666 return snprintf(buf, PAGE_SIZE,
667 "%d %d %d %d %d\n", g->pmu.aelpg_param[0],
668 g->pmu.aelpg_param[1], g->pmu.aelpg_param[2],
669 g->pmu.aelpg_param[3], g->pmu.aelpg_param[4]);
670}
671
672static DEVICE_ATTR(aelpg_param, ROOTRW,
673 aelpg_param_read, aelpg_param_store);
674
675static ssize_t aelpg_enable_store(struct device *dev,
676 struct device_attribute *attr, const char *buf, size_t count)
677{
678 struct gk20a *g = get_gk20a(dev);
679 unsigned long val = 0;
680 int status = 0;
681 union pmu_ap_cmd ap_cmd;
682 int err;
683
684 if (kstrtoul(buf, 10, &val) < 0)
685 return -EINVAL;
686
687 err = gk20a_busy(g);
688 if (err)
689 return err;
690
691 if (g->pmu.pmu_ready) {
692 if (val && !g->aelpg_enabled) {
693 g->aelpg_enabled = true;
694 /* Enable AELPG */
695 ap_cmd.enable_ctrl.cmd_id = PMU_AP_CMD_ID_ENABLE_CTRL;
696 ap_cmd.enable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
697 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
698 } else if (!val && g->aelpg_enabled) {
699 g->aelpg_enabled = false;
700 /* Disable AELPG */
701 ap_cmd.disable_ctrl.cmd_id = PMU_AP_CMD_ID_DISABLE_CTRL;
702 ap_cmd.disable_ctrl.ctrl_id = PMU_AP_CTRL_ID_GRAPHICS;
703 status = nvgpu_pmu_ap_send_command(g, &ap_cmd, false);
704 }
705 } else {
706 nvgpu_info(g, "PMU is not ready, AELPG request failed");
707 }
708 gk20a_idle(g);
709
710 nvgpu_info(g, "AELPG is %s.", g->aelpg_enabled ? "enabled" :
711 "disabled");
712
713 return count;
714}
715
716static ssize_t aelpg_enable_read(struct device *dev,
717 struct device_attribute *attr, char *buf)
718{
719 struct gk20a *g = get_gk20a(dev);
720
721 return snprintf(buf, PAGE_SIZE, "%d\n", g->aelpg_enabled ? 1 : 0);
722}
723
724static DEVICE_ATTR(aelpg_enable, ROOTRW,
725 aelpg_enable_read, aelpg_enable_store);
726
727
728static ssize_t allow_all_enable_read(struct device *dev,
729 struct device_attribute *attr, char *buf)
730{
731 struct gk20a *g = get_gk20a(dev);
732
733 return snprintf(buf, PAGE_SIZE, "%d\n", g->allow_all ? 1 : 0);
734}
735
736static ssize_t allow_all_enable_store(struct device *dev,
737 struct device_attribute *attr, const char *buf, size_t count)
738{
739 struct gk20a *g = get_gk20a(dev);
740 unsigned long val = 0;
741 int err;
742
743 if (kstrtoul(buf, 10, &val) < 0)
744 return -EINVAL;
745
746 err = gk20a_busy(g);
747 g->allow_all = (val ? true : false);
748 gk20a_idle(g);
749
750 return count;
751}
752
753static DEVICE_ATTR(allow_all, ROOTRW,
754 allow_all_enable_read, allow_all_enable_store);
755
756static ssize_t emc3d_ratio_store(struct device *dev,
757 struct device_attribute *attr, const char *buf, size_t count)
758{
759 struct gk20a *g = get_gk20a(dev);
760 unsigned long val = 0;
761
762 if (kstrtoul(buf, 10, &val) < 0)
763 return -EINVAL;
764
765 g->emc3d_ratio = val;
766
767 return count;
768}
769
770static ssize_t emc3d_ratio_read(struct device *dev,
771 struct device_attribute *attr, char *buf)
772{
773 struct gk20a *g = get_gk20a(dev);
774
775 return snprintf(buf, PAGE_SIZE, "%d\n", g->emc3d_ratio);
776}
777
778static DEVICE_ATTR(emc3d_ratio, ROOTRW, emc3d_ratio_read, emc3d_ratio_store);
779
780static ssize_t fmax_at_vmin_safe_read(struct device *dev,
781 struct device_attribute *attr, char *buf)
782{
783 struct gk20a *g = get_gk20a(dev);
784 unsigned long gpu_fmax_at_vmin_hz = 0;
785
786 if (g->ops.clk.get_fmax_at_vmin_safe)
787 gpu_fmax_at_vmin_hz = g->ops.clk.get_fmax_at_vmin_safe(g);
788
789 return snprintf(buf, PAGE_SIZE, "%d\n", (int)(gpu_fmax_at_vmin_hz));
790}
791
792static DEVICE_ATTR(fmax_at_vmin_safe, S_IRUGO, fmax_at_vmin_safe_read, NULL);
793
794#ifdef CONFIG_PM
795static ssize_t force_idle_store(struct device *dev,
796 struct device_attribute *attr, const char *buf, size_t count)
797{
798 struct gk20a *g = get_gk20a(dev);
799 unsigned long val = 0;
800 int err = 0;
801
802 if (kstrtoul(buf, 10, &val) < 0)
803 return -EINVAL;
804
805 if (val) {
806 if (g->forced_idle)
807 return count; /* do nothing */
808 else {
809 err = __gk20a_do_idle(g, false);
810 if (!err) {
811 g->forced_idle = 1;
812 nvgpu_info(g, "gpu is idle : %d",
813 g->forced_idle);
814 }
815 }
816 } else {
817 if (!g->forced_idle)
818 return count; /* do nothing */
819 else {
820 err = __gk20a_do_unidle(g);
821 if (!err) {
822 g->forced_idle = 0;
823 nvgpu_info(g, "gpu is idle : %d",
824 g->forced_idle);
825 }
826 }
827 }
828
829 return count;
830}
831
832static ssize_t force_idle_read(struct device *dev,
833 struct device_attribute *attr, char *buf)
834{
835 struct gk20a *g = get_gk20a(dev);
836
837 return snprintf(buf, PAGE_SIZE, "%d\n", g->forced_idle ? 1 : 0);
838}
839
840static DEVICE_ATTR(force_idle, ROOTRW, force_idle_read, force_idle_store);
841#endif
842
843static ssize_t tpc_fs_mask_store(struct device *dev,
844 struct device_attribute *attr, const char *buf, size_t count)
845{
846 struct gk20a *g = get_gk20a(dev);
847 unsigned long val = 0;
848
849 if (kstrtoul(buf, 10, &val) < 0)
850 return -EINVAL;
851
852 if (!g->gr.gpc_tpc_mask)
853 return -ENODEV;
854
855 if (val && val != g->gr.gpc_tpc_mask[0] && g->ops.gr.set_gpc_tpc_mask) {
856 g->gr.gpc_tpc_mask[0] = val;
857 g->tpc_fs_mask_user = val;
858
859 g->ops.gr.set_gpc_tpc_mask(g, 0);
860
861 nvgpu_vfree(g, g->gr.ctx_vars.local_golden_image);
862 g->gr.ctx_vars.local_golden_image = NULL;
863 g->gr.ctx_vars.golden_image_initialized = false;
864 g->gr.ctx_vars.golden_image_size = 0;
865 /* Cause next poweron to reinit just gr */
866 g->gr.sw_ready = false;
867 }
868
869 return count;
870}
871
872static ssize_t tpc_fs_mask_read(struct device *dev,
873 struct device_attribute *attr, char *buf)
874{
875 struct gk20a *g = get_gk20a(dev);
876 struct gr_gk20a *gr = &g->gr;
877 u32 gpc_index;
878 u32 tpc_fs_mask = 0;
879 int err = 0;
880
881 err = gk20a_busy(g);
882 if (err)
883 return err;
884
885 for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) {
886 if (g->ops.gr.get_gpc_tpc_mask)
887 tpc_fs_mask |=
888 g->ops.gr.get_gpc_tpc_mask(g, gpc_index) <<
889 (gr->max_tpc_per_gpc_count * gpc_index);
890 }
891
892 gk20a_idle(g);
893
894 return snprintf(buf, PAGE_SIZE, "0x%x\n", tpc_fs_mask);
895}
896
897static DEVICE_ATTR(tpc_fs_mask, ROOTRW, tpc_fs_mask_read, tpc_fs_mask_store);
898
899static ssize_t min_timeslice_us_read(struct device *dev,
900 struct device_attribute *attr, char *buf)
901{
902 struct gk20a *g = get_gk20a(dev);
903
904 return snprintf(buf, PAGE_SIZE, "%u\n", g->min_timeslice_us);
905}
906
907static ssize_t min_timeslice_us_store(struct device *dev,
908 struct device_attribute *attr, const char *buf, size_t count)
909{
910 struct gk20a *g = get_gk20a(dev);
911 unsigned long val;
912
913 if (kstrtoul(buf, 10, &val) < 0)
914 return -EINVAL;
915
916 if (val > g->max_timeslice_us)
917 return -EINVAL;
918
919 g->min_timeslice_us = val;
920
921 return count;
922}
923
924static DEVICE_ATTR(min_timeslice_us, ROOTRW, min_timeslice_us_read,
925 min_timeslice_us_store);
926
927static ssize_t max_timeslice_us_read(struct device *dev,
928 struct device_attribute *attr, char *buf)
929{
930 struct gk20a *g = get_gk20a(dev);
931
932 return snprintf(buf, PAGE_SIZE, "%u\n", g->max_timeslice_us);
933}
934
935static ssize_t max_timeslice_us_store(struct device *dev,
936 struct device_attribute *attr, const char *buf, size_t count)
937{
938 struct gk20a *g = get_gk20a(dev);
939 unsigned long val;
940
941 if (kstrtoul(buf, 10, &val) < 0)
942 return -EINVAL;
943
944 if (val < g->min_timeslice_us)
945 return -EINVAL;
946
947 g->max_timeslice_us = val;
948
949 return count;
950}
951
952static DEVICE_ATTR(max_timeslice_us, ROOTRW, max_timeslice_us_read,
953 max_timeslice_us_store);
954
955static ssize_t czf_bypass_store(struct device *dev,
956 struct device_attribute *attr, const char *buf, size_t count)
957{
958 struct gk20a *g = get_gk20a(dev);
959 unsigned long val;
960
961 if (kstrtoul(buf, 10, &val) < 0)
962 return -EINVAL;
963
964 if (val >= 4)
965 return -EINVAL;
966
967 g->gr.czf_bypass = val;
968
969 return count;
970}
971
972static ssize_t czf_bypass_read(struct device *dev,
973 struct device_attribute *attr, char *buf)
974{
975 struct gk20a *g = get_gk20a(dev);
976
977 return sprintf(buf, "%d\n", g->gr.czf_bypass);
978}
979
980static DEVICE_ATTR(czf_bypass, ROOTRW, czf_bypass_read, czf_bypass_store);
981
982static ssize_t pd_max_batches_store(struct device *dev,
983 struct device_attribute *attr, const char *buf, size_t count)
984{
985 struct gk20a *g = get_gk20a(dev);
986 unsigned long val;
987
988 if (kstrtoul(buf, 10, &val) < 0)
989 return -EINVAL;
990
991 if (val > 64)
992 return -EINVAL;
993
994 g->gr.pd_max_batches = val;
995
996 return count;
997}
998
999static ssize_t pd_max_batches_read(struct device *dev,
1000 struct device_attribute *attr, char *buf)
1001{
1002 struct gk20a *g = get_gk20a(dev);
1003
1004 return sprintf(buf, "%d\n", g->gr.pd_max_batches);
1005}
1006
1007static DEVICE_ATTR(pd_max_batches, ROOTRW, pd_max_batches_read, pd_max_batches_store);
1008
1009static ssize_t gfxp_wfi_timeout_count_store(struct device *dev,
1010 struct device_attribute *attr, const char *buf, size_t count)
1011{
1012 struct gk20a *g = get_gk20a(dev);
1013 struct gr_gk20a *gr = &g->gr;
1014 unsigned long val = 0;
1015 int err = -1;
1016
1017 if (kstrtoul(buf, 10, &val) < 0)
1018 return -EINVAL;
1019
1020 if (g->ops.gr.get_max_gfxp_wfi_timeout_count) {
1021 if (val >= g->ops.gr.get_max_gfxp_wfi_timeout_count(g))
1022 return -EINVAL;
1023 }
1024
1025 gr->gfxp_wfi_timeout_count = val;
1026
1027 if (g->ops.gr.init_preemption_state && g->power_on) {
1028 err = gk20a_busy(g);
1029 if (err)
1030 return err;
1031
1032 err = gr_gk20a_elpg_protected_call(g,
1033 g->ops.gr.init_preemption_state(g));
1034
1035 gk20a_idle(g);
1036
1037 if (err)
1038 return err;
1039 }
1040 return count;
1041}
1042
1043static ssize_t gfxp_wfi_timeout_unit_store(struct device *dev,
1044 struct device_attribute *attr, const char *buf, size_t count)
1045{
1046 struct gk20a *g = get_gk20a(dev);
1047 struct gr_gk20a *gr = &g->gr;
1048 int err = -1;
1049
1050 if (count > 0 && buf[0] == 's')
1051 /* sysclk */
1052 gr->gfxp_wfi_timeout_unit = GFXP_WFI_TIMEOUT_UNIT_SYSCLK;
1053 else
1054 /* usec */
1055 gr->gfxp_wfi_timeout_unit = GFXP_WFI_TIMEOUT_UNIT_USEC;
1056
1057 if (g->ops.gr.init_preemption_state && g->power_on) {
1058 err = gk20a_busy(g);
1059 if (err)
1060 return err;
1061
1062 err = gr_gk20a_elpg_protected_call(g,
1063 g->ops.gr.init_preemption_state(g));
1064
1065 gk20a_idle(g);
1066
1067 if (err)
1068 return err;
1069 }
1070
1071 return count;
1072}
1073
1074static ssize_t gfxp_wfi_timeout_count_read(struct device *dev,
1075 struct device_attribute *attr, char *buf)
1076{
1077 struct gk20a *g = get_gk20a(dev);
1078 struct gr_gk20a *gr = &g->gr;
1079 u32 val = gr->gfxp_wfi_timeout_count;
1080
1081 return snprintf(buf, PAGE_SIZE, "%d\n", val);
1082}
1083
1084static ssize_t gfxp_wfi_timeout_unit_read(struct device *dev,
1085 struct device_attribute *attr, char *buf)
1086{
1087 struct gk20a *g = get_gk20a(dev);
1088 struct gr_gk20a *gr = &g->gr;
1089
1090 if (gr->gfxp_wfi_timeout_unit == GFXP_WFI_TIMEOUT_UNIT_USEC)
1091 return snprintf(buf, PAGE_SIZE, "usec\n");
1092 else
1093 return snprintf(buf, PAGE_SIZE, "sysclk\n");
1094}
1095
1096static DEVICE_ATTR(gfxp_wfi_timeout_count, (S_IRWXU|S_IRGRP|S_IROTH),
1097 gfxp_wfi_timeout_count_read, gfxp_wfi_timeout_count_store);
1098
1099static DEVICE_ATTR(gfxp_wfi_timeout_unit, (S_IRWXU|S_IRGRP|S_IROTH),
1100 gfxp_wfi_timeout_unit_read, gfxp_wfi_timeout_unit_store);
1101
1102void nvgpu_remove_sysfs(struct device *dev)
1103{
1104 device_remove_file(dev, &dev_attr_elcg_enable);
1105 device_remove_file(dev, &dev_attr_blcg_enable);
1106 device_remove_file(dev, &dev_attr_slcg_enable);
1107 device_remove_file(dev, &dev_attr_ptimer_scale_factor);
1108 device_remove_file(dev, &dev_attr_ptimer_ref_freq);
1109 device_remove_file(dev, &dev_attr_ptimer_src_freq);
1110 device_remove_file(dev, &dev_attr_elpg_enable);
1111 device_remove_file(dev, &dev_attr_mscg_enable);
1112 device_remove_file(dev, &dev_attr_emc3d_ratio);
1113 device_remove_file(dev, &dev_attr_ldiv_slowdown_factor);
1114
1115 device_remove_file(dev, &dev_attr_fmax_at_vmin_safe);
1116
1117 device_remove_file(dev, &dev_attr_counters);
1118 device_remove_file(dev, &dev_attr_counters_reset);
1119 device_remove_file(dev, &dev_attr_load);
1120 device_remove_file(dev, &dev_attr_railgate_delay);
1121 device_remove_file(dev, &dev_attr_is_railgated);
1122#ifdef CONFIG_PM
1123 device_remove_file(dev, &dev_attr_force_idle);
1124 device_remove_file(dev, &dev_attr_railgate_enable);
1125#endif
1126 device_remove_file(dev, &dev_attr_aelpg_param);
1127 device_remove_file(dev, &dev_attr_aelpg_enable);
1128 device_remove_file(dev, &dev_attr_allow_all);
1129 device_remove_file(dev, &dev_attr_tpc_fs_mask);
1130 device_remove_file(dev, &dev_attr_min_timeslice_us);
1131 device_remove_file(dev, &dev_attr_max_timeslice_us);
1132
1133#ifdef CONFIG_TEGRA_GK20A_NVHOST
1134 nvgpu_nvhost_remove_symlink(get_gk20a(dev));
1135#endif
1136
1137 device_remove_file(dev, &dev_attr_czf_bypass);
1138 device_remove_file(dev, &dev_attr_pd_max_batches);
1139 device_remove_file(dev, &dev_attr_gfxp_wfi_timeout_count);
1140 device_remove_file(dev, &dev_attr_gfxp_wfi_timeout_unit);
1141
1142 if (strcmp(dev_name(dev), "gpu.0")) {
1143 struct kobject *kobj = &dev->kobj;
1144 struct device *parent = container_of((kobj->parent),
1145 struct device, kobj);
1146 sysfs_remove_link(&parent->kobj, "gpu.0");
1147 }
1148}
1149
1150int nvgpu_create_sysfs(struct device *dev)
1151{
1152 struct gk20a *g = get_gk20a(dev);
1153 int error = 0;
1154
1155 error |= device_create_file(dev, &dev_attr_elcg_enable);
1156 error |= device_create_file(dev, &dev_attr_blcg_enable);
1157 error |= device_create_file(dev, &dev_attr_slcg_enable);
1158 error |= device_create_file(dev, &dev_attr_ptimer_scale_factor);
1159 error |= device_create_file(dev, &dev_attr_ptimer_ref_freq);
1160 error |= device_create_file(dev, &dev_attr_ptimer_src_freq);
1161 error |= device_create_file(dev, &dev_attr_elpg_enable);
1162 error |= device_create_file(dev, &dev_attr_mscg_enable);
1163 error |= device_create_file(dev, &dev_attr_emc3d_ratio);
1164 error |= device_create_file(dev, &dev_attr_ldiv_slowdown_factor);
1165
1166 error |= device_create_file(dev, &dev_attr_fmax_at_vmin_safe);
1167
1168 error |= device_create_file(dev, &dev_attr_counters);
1169 error |= device_create_file(dev, &dev_attr_counters_reset);
1170 error |= device_create_file(dev, &dev_attr_load);
1171 error |= device_create_file(dev, &dev_attr_railgate_delay);
1172 error |= device_create_file(dev, &dev_attr_is_railgated);
1173#ifdef CONFIG_PM
1174 error |= device_create_file(dev, &dev_attr_force_idle);
1175 error |= device_create_file(dev, &dev_attr_railgate_enable);
1176#endif
1177 error |= device_create_file(dev, &dev_attr_aelpg_param);
1178 error |= device_create_file(dev, &dev_attr_aelpg_enable);
1179 error |= device_create_file(dev, &dev_attr_allow_all);
1180 error |= device_create_file(dev, &dev_attr_tpc_fs_mask);
1181 error |= device_create_file(dev, &dev_attr_min_timeslice_us);
1182 error |= device_create_file(dev, &dev_attr_max_timeslice_us);
1183
1184#ifdef CONFIG_TEGRA_GK20A_NVHOST
1185 error |= nvgpu_nvhost_create_symlink(g);
1186#endif
1187
1188 error |= device_create_file(dev, &dev_attr_czf_bypass);
1189 error |= device_create_file(dev, &dev_attr_pd_max_batches);
1190 error |= device_create_file(dev, &dev_attr_gfxp_wfi_timeout_count);
1191 error |= device_create_file(dev, &dev_attr_gfxp_wfi_timeout_unit);
1192
1193 if (strcmp(dev_name(dev), "gpu.0")) {
1194 struct kobject *kobj = &dev->kobj;
1195 struct device *parent = container_of((kobj->parent),
1196 struct device, kobj);
1197 error |= sysfs_create_link(&parent->kobj,
1198 &dev->kobj, "gpu.0");
1199 }
1200
1201 if (error)
1202 nvgpu_err(g, "Failed to create sysfs attributes!\n");
1203
1204 return error;
1205}