aboutsummaryrefslogtreecommitdiffstats
path: root/include/os/linux/debug.c
diff options
context:
space:
mode:
authorJoshua Bakita <bakitajoshua@gmail.com>2023-06-28 18:24:25 -0400
committerJoshua Bakita <bakitajoshua@gmail.com>2023-06-28 18:24:25 -0400
commit01e6fac4d61fdd7fff5433942ec93fc2ea1e4df1 (patch)
tree4ef34501728a087be24f4ba0af90f91486bf780b /include/os/linux/debug.c
parent306a03d18b305e4e573be3b2931978fa10679eb9 (diff)
Include nvgpu headers
These are needed to build on NVIDIA's Jetson boards for the time being. Only a couple structs are required, so it should be fairly easy to remove this dependency at some point in the future.
Diffstat (limited to 'include/os/linux/debug.c')
-rw-r--r--include/os/linux/debug.c457
1 files changed, 457 insertions, 0 deletions
diff --git a/include/os/linux/debug.c b/include/os/linux/debug.c
new file mode 100644
index 0000000..5f0703c
--- /dev/null
+++ b/include/os/linux/debug.c
@@ -0,0 +1,457 @@
1/*
2 * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#include "debug_cde.h"
16#include "debug_ce.h"
17#include "debug_fifo.h"
18#include "debug_gr.h"
19#include "debug_allocator.h"
20#include "debug_kmem.h"
21#include "debug_pmu.h"
22#include "debug_sched.h"
23#include "debug_hal.h"
24#include "debug_xve.h"
25#include "debug_ltc.h"
26#include "debug_bios.h"
27#include "os_linux.h"
28#include "platform_gk20a.h"
29
30#include <nvgpu/gk20a.h>
31
32#include <linux/debugfs.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
35
36#include <nvgpu/debug.h>
37
38unsigned int gk20a_debug_trace_cmdbuf;
39
40static inline void gk20a_debug_write_printk(void *ctx, const char *str,
41 size_t len)
42{
43 pr_info("%s", str);
44}
45
46static inline void gk20a_debug_write_to_seqfile(void *ctx, const char *str,
47 size_t len)
48{
49 seq_write((struct seq_file *)ctx, str, len);
50}
51
52void gk20a_debug_output(struct gk20a_debug_output *o,
53 const char *fmt, ...)
54{
55 va_list args;
56 int len;
57
58 va_start(args, fmt);
59 len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
60 va_end(args);
61 o->fn(o->ctx, o->buf, len);
62}
63
64static int gk20a_gr_dump_regs(struct gk20a *g,
65 struct gk20a_debug_output *o)
66{
67 if (g->ops.gr.dump_gr_regs)
68 gr_gk20a_elpg_protected_call(g, g->ops.gr.dump_gr_regs(g, o));
69
70 return 0;
71}
72
73int gk20a_gr_debug_dump(struct gk20a *g)
74{
75 struct gk20a_debug_output o = {
76 .fn = gk20a_debug_write_printk
77 };
78
79 gk20a_gr_dump_regs(g, &o);
80
81 return 0;
82}
83
84static int gk20a_gr_debug_show(struct seq_file *s, void *unused)
85{
86 struct device *dev = s->private;
87 struct gk20a *g = gk20a_get_platform(dev)->g;
88 struct gk20a_debug_output o = {
89 .fn = gk20a_debug_write_to_seqfile,
90 .ctx = s,
91 };
92 int err;
93
94 err = gk20a_busy(g);
95 if (err) {
96 nvgpu_err(g, "failed to power on gpu: %d", err);
97 return -EINVAL;
98 }
99
100 gk20a_gr_dump_regs(g, &o);
101
102 gk20a_idle(g);
103
104 return 0;
105}
106
107void gk20a_debug_dump(struct gk20a *g)
108{
109 struct gk20a_platform *platform = gk20a_get_platform(dev_from_gk20a(g));
110 struct gk20a_debug_output o = {
111 .fn = gk20a_debug_write_printk
112 };
113
114 if (platform->dump_platform_dependencies)
115 platform->dump_platform_dependencies(dev_from_gk20a(g));
116
117 /* HAL only initialized after 1st power-on */
118 if (g->ops.debug.show_dump)
119 g->ops.debug.show_dump(g, &o);
120}
121
122static int gk20a_debug_show(struct seq_file *s, void *unused)
123{
124 struct device *dev = s->private;
125 struct gk20a_debug_output o = {
126 .fn = gk20a_debug_write_to_seqfile,
127 .ctx = s,
128 };
129 struct gk20a *g;
130 int err;
131
132 g = gk20a_get_platform(dev)->g;
133
134 err = gk20a_busy(g);
135 if (err) {
136 nvgpu_err(g, "failed to power on gpu: %d", err);
137 return -EFAULT;
138 }
139
140 /* HAL only initialized after 1st power-on */
141 if (g->ops.debug.show_dump)
142 g->ops.debug.show_dump(g, &o);
143
144 gk20a_idle(g);
145 return 0;
146}
147
148static int gk20a_gr_debug_open(struct inode *inode, struct file *file)
149{
150 return single_open(file, gk20a_gr_debug_show, inode->i_private);
151}
152
153static int gk20a_debug_open(struct inode *inode, struct file *file)
154{
155 return single_open(file, gk20a_debug_show, inode->i_private);
156}
157
158static const struct file_operations gk20a_gr_debug_fops = {
159 .open = gk20a_gr_debug_open,
160 .read = seq_read,
161 .llseek = seq_lseek,
162 .release = single_release,
163};
164
165static const struct file_operations gk20a_debug_fops = {
166 .open = gk20a_debug_open,
167 .read = seq_read,
168 .llseek = seq_lseek,
169 .release = single_release,
170};
171
172void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o)
173{
174 g->ops.fifo.dump_pbdma_status(g, o);
175 g->ops.fifo.dump_eng_status(g, o);
176
177 gk20a_debug_dump_all_channel_status_ramfc(g, o);
178}
179
180static ssize_t disable_bigpage_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos)
181{
182 char buf[3];
183 struct gk20a *g = file->private_data;
184
185 if (g->mm.disable_bigpage)
186 buf[0] = 'Y';
187 else
188 buf[0] = 'N';
189 buf[1] = '\n';
190 buf[2] = 0x00;
191 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
192}
193
194static ssize_t disable_bigpage_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos)
195{
196 char buf[32];
197 int buf_size;
198 bool bv;
199 struct gk20a *g = file->private_data;
200
201 buf_size = min(count, (sizeof(buf)-1));
202 if (copy_from_user(buf, user_buf, buf_size))
203 return -EFAULT;
204
205 if (strtobool(buf, &bv) == 0) {
206 g->mm.disable_bigpage = bv;
207 gk20a_init_gpu_characteristics(g);
208 }
209
210 return count;
211}
212
213static struct file_operations disable_bigpage_fops = {
214 .open = simple_open,
215 .read = disable_bigpage_read,
216 .write = disable_bigpage_write,
217};
218
219static int railgate_residency_show(struct seq_file *s, void *data)
220{
221 struct gk20a *g = s->private;
222 struct gk20a_platform *platform = dev_get_drvdata(dev_from_gk20a(g));
223 unsigned long time_since_last_state_transition_ms;
224 unsigned long total_rail_gate_time_ms;
225 unsigned long total_rail_ungate_time_ms;
226
227 if (platform->is_railgated(dev_from_gk20a(g))) {
228 time_since_last_state_transition_ms =
229 jiffies_to_msecs(jiffies -
230 g->pstats.last_rail_gate_complete);
231 total_rail_ungate_time_ms = g->pstats.total_rail_ungate_time_ms;
232 total_rail_gate_time_ms =
233 g->pstats.total_rail_gate_time_ms +
234 time_since_last_state_transition_ms;
235 } else {
236 time_since_last_state_transition_ms =
237 jiffies_to_msecs(jiffies -
238 g->pstats.last_rail_ungate_complete);
239 total_rail_gate_time_ms = g->pstats.total_rail_gate_time_ms;
240 total_rail_ungate_time_ms =
241 g->pstats.total_rail_ungate_time_ms +
242 time_since_last_state_transition_ms;
243 }
244
245 seq_printf(s, "Time with Rails Gated: %lu ms\n"
246 "Time with Rails UnGated: %lu ms\n"
247 "Total railgating cycles: %lu\n",
248 total_rail_gate_time_ms,
249 total_rail_ungate_time_ms,
250 g->pstats.railgating_cycle_count - 1);
251 return 0;
252
253}
254
255static int railgate_residency_open(struct inode *inode, struct file *file)
256{
257 return single_open(file, railgate_residency_show, inode->i_private);
258}
259
260static const struct file_operations railgate_residency_fops = {
261 .open = railgate_residency_open,
262 .read = seq_read,
263 .llseek = seq_lseek,
264 .release = single_release,
265};
266
267static int gk20a_railgating_debugfs_init(struct gk20a *g)
268{
269 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
270 struct dentry *d;
271
272 d = debugfs_create_file(
273 "railgate_residency", S_IRUGO|S_IWUSR, l->debugfs, g,
274 &railgate_residency_fops);
275 if (!d)
276 return -ENOMEM;
277
278 return 0;
279}
280static ssize_t timeouts_enabled_read(struct file *file,
281 char __user *user_buf, size_t count, loff_t *ppos)
282{
283 char buf[3];
284 struct gk20a *g = file->private_data;
285
286 if (nvgpu_is_timeouts_enabled(g))
287 buf[0] = 'Y';
288 else
289 buf[0] = 'N';
290 buf[1] = '\n';
291 buf[2] = 0x00;
292 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
293}
294
295static ssize_t timeouts_enabled_write(struct file *file,
296 const char __user *user_buf, size_t count, loff_t *ppos)
297{
298 char buf[3];
299 int buf_size;
300 bool timeouts_enabled;
301 struct gk20a *g = file->private_data;
302
303 buf_size = min(count, (sizeof(buf)-1));
304 if (copy_from_user(buf, user_buf, buf_size))
305 return -EFAULT;
306
307 if (strtobool(buf, &timeouts_enabled) == 0) {
308 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
309 if (timeouts_enabled == false) {
310 /* requesting to disable timeouts */
311 if (g->timeouts_disabled_by_user == false) {
312 nvgpu_atomic_inc(&g->timeouts_disabled_refcount);
313 g->timeouts_disabled_by_user = true;
314 }
315 } else {
316 /* requesting to enable timeouts */
317 if (g->timeouts_disabled_by_user == true) {
318 nvgpu_atomic_dec(&g->timeouts_disabled_refcount);
319 g->timeouts_disabled_by_user = false;
320 }
321 }
322 nvgpu_mutex_release(&g->dbg_sessions_lock);
323 }
324
325 return count;
326}
327
328static const struct file_operations timeouts_enabled_fops = {
329 .open = simple_open,
330 .read = timeouts_enabled_read,
331 .write = timeouts_enabled_write,
332};
333
334void gk20a_debug_init(struct gk20a *g, const char *debugfs_symlink)
335{
336 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
337 struct device *dev = dev_from_gk20a(g);
338
339 l->debugfs = debugfs_create_dir(dev_name(dev), NULL);
340 if (!l->debugfs)
341 return;
342
343 if (debugfs_symlink)
344 l->debugfs_alias =
345 debugfs_create_symlink(debugfs_symlink,
346 NULL, dev_name(dev));
347
348 debugfs_create_file("status", S_IRUGO, l->debugfs,
349 dev, &gk20a_debug_fops);
350 debugfs_create_file("gr_status", S_IRUGO, l->debugfs,
351 dev, &gk20a_gr_debug_fops);
352 debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR,
353 l->debugfs, &gk20a_debug_trace_cmdbuf);
354
355 debugfs_create_u32("ch_wdt_timeout_ms", S_IRUGO|S_IWUSR,
356 l->debugfs, &g->ch_wdt_timeout_ms);
357
358 debugfs_create_u32("disable_syncpoints", S_IRUGO,
359 l->debugfs, &g->disable_syncpoints);
360
361 /* New debug logging API. */
362 debugfs_create_u64("log_mask", S_IRUGO|S_IWUSR,
363 l->debugfs, &g->log_mask);
364 debugfs_create_u32("log_trace", S_IRUGO|S_IWUSR,
365 l->debugfs, &g->log_trace);
366
367 l->debugfs_ltc_enabled =
368 debugfs_create_bool("ltc_enabled", S_IRUGO|S_IWUSR,
369 l->debugfs,
370 &g->mm.ltc_enabled_target);
371
372 l->debugfs_gr_idle_timeout_default =
373 debugfs_create_u32("gr_idle_timeout_default_us",
374 S_IRUGO|S_IWUSR, l->debugfs,
375 &g->gr_idle_timeout_default);
376 l->debugfs_timeouts_enabled =
377 debugfs_create_file("timeouts_enabled",
378 S_IRUGO|S_IWUSR,
379 l->debugfs,
380 g,
381 &timeouts_enabled_fops);
382
383 l->debugfs_disable_bigpage =
384 debugfs_create_file("disable_bigpage",
385 S_IRUGO|S_IWUSR,
386 l->debugfs,
387 g,
388 &disable_bigpage_fops);
389
390 l->debugfs_timeslice_low_priority_us =
391 debugfs_create_u32("timeslice_low_priority_us",
392 S_IRUGO|S_IWUSR,
393 l->debugfs,
394 &g->timeslice_low_priority_us);
395 l->debugfs_timeslice_medium_priority_us =
396 debugfs_create_u32("timeslice_medium_priority_us",
397 S_IRUGO|S_IWUSR,
398 l->debugfs,
399 &g->timeslice_medium_priority_us);
400 l->debugfs_timeslice_high_priority_us =
401 debugfs_create_u32("timeslice_high_priority_us",
402 S_IRUGO|S_IWUSR,
403 l->debugfs,
404 &g->timeslice_high_priority_us);
405 l->debugfs_runlist_interleave =
406 debugfs_create_bool("runlist_interleave",
407 S_IRUGO|S_IWUSR,
408 l->debugfs,
409 &g->runlist_interleave);
410 l->debugfs_force_preemption_gfxp =
411 debugfs_create_bool("force_preemption_gfxp", S_IRUGO|S_IWUSR,
412 l->debugfs,
413 &g->gr.ctx_vars.force_preemption_gfxp);
414
415 l->debugfs_force_preemption_cilp =
416 debugfs_create_bool("force_preemption_cilp", S_IRUGO|S_IWUSR,
417 l->debugfs,
418 &g->gr.ctx_vars.force_preemption_cilp);
419
420 l->debugfs_dump_ctxsw_stats =
421 debugfs_create_bool("dump_ctxsw_stats_on_channel_close",
422 S_IRUGO|S_IWUSR, l->debugfs,
423 &g->gr.ctx_vars.dump_ctxsw_stats_on_channel_close);
424
425 gr_gk20a_debugfs_init(g);
426 gk20a_pmu_debugfs_init(g);
427 gk20a_railgating_debugfs_init(g);
428#ifdef CONFIG_NVGPU_SUPPORT_CDE
429 gk20a_cde_debugfs_init(g);
430#endif
431 gk20a_ce_debugfs_init(g);
432 nvgpu_alloc_debugfs_init(g);
433 nvgpu_hal_debugfs_init(g);
434 gk20a_fifo_debugfs_init(g);
435 gk20a_sched_debugfs_init(g);
436#ifdef CONFIG_NVGPU_TRACK_MEM_USAGE
437 nvgpu_kmem_debugfs_init(g);
438#endif
439 nvgpu_ltc_debugfs_init(g);
440 if (g->pci_vendor_id) {
441 nvgpu_xve_debugfs_init(g);
442 nvgpu_bios_debugfs_init(g);
443 }
444}
445
446void gk20a_debug_deinit(struct gk20a *g)
447{
448 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
449
450 if (!l->debugfs)
451 return;
452
453 gk20a_fifo_debugfs_deinit(g);
454
455 debugfs_remove_recursive(l->debugfs);
456 debugfs_remove(l->debugfs_alias);
457}