summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/debug_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/debug_gk20a.c425
1 files changed, 0 insertions, 425 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c b/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
deleted file mode 100644
index ac435046..00000000
--- a/drivers/gpu/nvgpu/gk20a/debug_gk20a.c
+++ /dev/null
@@ -1,425 +0,0 @@
1/*
2 * Copyright (C) 2011-2017 NVIDIA Corporation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifdef CONFIG_DEBUG_FS
16#include <linux/debugfs.h>
17#endif
18#include <linux/seq_file.h>
19#include <linux/io.h>
20#include <linux/fs.h>
21
22#include <nvgpu/log.h>
23#include <nvgpu/kmem.h>
24#include <nvgpu/semaphore.h>
25#include <nvgpu/log.h>
26
27#include "gk20a.h"
28#include "gk20a/platform_gk20a.h"
29#include "debug_gk20a.h"
30
31#include <nvgpu/hw/gk20a/hw_ram_gk20a.h>
32#include <nvgpu/hw/gk20a/hw_fifo_gk20a.h>
33#include <nvgpu/hw/gk20a/hw_ccsr_gk20a.h>
34#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
35
36unsigned int gk20a_debug_trace_cmdbuf;
37
38static inline void gk20a_debug_write_printk(void *ctx, const char *str,
39 size_t len)
40{
41 pr_info("%s", str);
42}
43
44static inline void gk20a_debug_write_to_seqfile(void *ctx, const char *str,
45 size_t len)
46{
47 seq_write((struct seq_file *)ctx, str, len);
48}
49
50void gk20a_debug_output(struct gk20a_debug_output *o,
51 const char *fmt, ...)
52{
53 va_list args;
54 int len;
55
56 va_start(args, fmt);
57 len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
58 va_end(args);
59 o->fn(o->ctx, o->buf, len);
60}
61
62static void gk20a_debug_dump_all_channel_status_ramfc(struct gk20a *g,
63 struct gk20a_debug_output *o)
64{
65 struct fifo_gk20a *f = &g->fifo;
66 u32 chid;
67 struct ch_state **ch_state;
68
69 ch_state = nvgpu_kzalloc(g, sizeof(*ch_state) * f->num_channels);
70 if (!ch_state) {
71 gk20a_debug_output(o, "cannot alloc memory for channels\n");
72 return;
73 }
74
75 for (chid = 0; chid < f->num_channels; chid++) {
76 struct channel_gk20a *ch = &f->channel[chid];
77 if (gk20a_channel_get(ch)) {
78 ch_state[chid] =
79 nvgpu_kmalloc(g, sizeof(struct ch_state) +
80 ram_in_alloc_size_v());
81 /* ref taken stays to below loop with
82 * successful allocs */
83 if (!ch_state[chid])
84 gk20a_channel_put(ch);
85 }
86 }
87
88 for (chid = 0; chid < f->num_channels; chid++) {
89 struct channel_gk20a *ch = &f->channel[chid];
90 if (!ch_state[chid])
91 continue;
92
93 ch_state[chid]->pid = ch->pid;
94 ch_state[chid]->refs = atomic_read(&ch->ref_count);
95 nvgpu_mem_rd_n(g, &ch->inst_block, 0,
96 &ch_state[chid]->inst_block[0],
97 ram_in_alloc_size_v());
98 gk20a_channel_put(ch);
99 }
100 for (chid = 0; chid < f->num_channels; chid++) {
101 if (ch_state[chid]) {
102 g->ops.fifo.dump_channel_status_ramfc(g, o, chid,
103 ch_state[chid]);
104 nvgpu_kfree(g, ch_state[chid]);
105 }
106 }
107 nvgpu_kfree(g, ch_state);
108}
109
110void gk20a_debug_show_dump(struct gk20a *g, struct gk20a_debug_output *o)
111{
112 g->ops.fifo.dump_pbdma_status(g, o);
113 g->ops.fifo.dump_eng_status(g, o);
114
115 gk20a_debug_dump_all_channel_status_ramfc(g, o);
116}
117
118static int gk20a_gr_dump_regs(struct device *dev,
119 struct gk20a_debug_output *o)
120{
121 struct gk20a_platform *platform = gk20a_get_platform(dev);
122 struct gk20a *g = platform->g;
123
124 if (g->ops.gr.dump_gr_regs)
125 gr_gk20a_elpg_protected_call(g, g->ops.gr.dump_gr_regs(g, o));
126
127 return 0;
128}
129
130int gk20a_gr_debug_dump(struct device *dev)
131{
132 struct gk20a_debug_output o = {
133 .fn = gk20a_debug_write_printk
134 };
135
136 gk20a_gr_dump_regs(dev, &o);
137
138 return 0;
139}
140
141static int gk20a_gr_debug_show(struct seq_file *s, void *unused)
142{
143 struct device *dev = s->private;
144 struct gk20a *g = gk20a_get_platform(dev)->g;
145 struct gk20a_debug_output o = {
146 .fn = gk20a_debug_write_to_seqfile,
147 .ctx = s,
148 };
149 int err;
150
151 err = gk20a_busy(g);
152 if (err) {
153 nvgpu_err(g, "failed to power on gpu: %d", err);
154 return -EINVAL;
155 }
156
157 gk20a_gr_dump_regs(dev, &o);
158
159 gk20a_idle(g);
160
161 return 0;
162}
163
164void gk20a_debug_dump(struct device *dev)
165{
166 struct gk20a_platform *platform = gk20a_get_platform(dev);
167 struct gk20a *g = platform->g;
168 struct gk20a_debug_output o = {
169 .fn = gk20a_debug_write_printk
170 };
171
172 if (platform->dump_platform_dependencies)
173 platform->dump_platform_dependencies(dev);
174
175 /* HAL only initialized after 1st power-on */
176 if (g->ops.debug.show_dump)
177 g->ops.debug.show_dump(g, &o);
178}
179
180static int gk20a_debug_show(struct seq_file *s, void *unused)
181{
182 struct device *dev = s->private;
183 struct gk20a_debug_output o = {
184 .fn = gk20a_debug_write_to_seqfile,
185 .ctx = s,
186 };
187 struct gk20a *g;
188 int err;
189
190 g = gk20a_get_platform(dev)->g;
191
192 err = gk20a_busy(g);
193 if (err) {
194 nvgpu_err(g, "failed to power on gpu: %d", err);
195 return -EFAULT;
196 }
197
198 /* HAL only initialized after 1st power-on */
199 if (g->ops.debug.show_dump)
200 g->ops.debug.show_dump(g, &o);
201
202 gk20a_idle(g);
203 return 0;
204}
205
206static int gk20a_gr_debug_open(struct inode *inode, struct file *file)
207{
208 return single_open(file, gk20a_gr_debug_show, inode->i_private);
209}
210
211static int gk20a_debug_open(struct inode *inode, struct file *file)
212{
213 return single_open(file, gk20a_debug_show, inode->i_private);
214}
215
216static const struct file_operations gk20a_gr_debug_fops = {
217 .open = gk20a_gr_debug_open,
218 .read = seq_read,
219 .llseek = seq_lseek,
220 .release = single_release,
221};
222
223static const struct file_operations gk20a_debug_fops = {
224 .open = gk20a_debug_open,
225 .read = seq_read,
226 .llseek = seq_lseek,
227 .release = single_release,
228};
229
230void gk20a_init_debug_ops(struct gpu_ops *gops)
231{
232 gops->debug.show_dump = gk20a_debug_show_dump;
233}
234
235#ifdef CONFIG_DEBUG_FS
236static int railgate_residency_show(struct seq_file *s, void *data)
237{
238 struct device *dev = s->private;
239 struct gk20a_platform *platform = dev_get_drvdata(dev);
240 struct gk20a *g = get_gk20a(dev);
241 unsigned long time_since_last_state_transition_ms;
242 unsigned long total_rail_gate_time_ms;
243 unsigned long total_rail_ungate_time_ms;
244
245 if (platform->is_railgated(dev)) {
246 time_since_last_state_transition_ms =
247 jiffies_to_msecs(jiffies -
248 g->pstats.last_rail_gate_complete);
249 total_rail_ungate_time_ms = g->pstats.total_rail_ungate_time_ms;
250 total_rail_gate_time_ms =
251 g->pstats.total_rail_gate_time_ms +
252 time_since_last_state_transition_ms;
253 } else {
254 time_since_last_state_transition_ms =
255 jiffies_to_msecs(jiffies -
256 g->pstats.last_rail_ungate_complete);
257 total_rail_gate_time_ms = g->pstats.total_rail_gate_time_ms;
258 total_rail_ungate_time_ms =
259 g->pstats.total_rail_ungate_time_ms +
260 time_since_last_state_transition_ms;
261 }
262
263 seq_printf(s, "Time with Rails Gated: %lu ms\n"
264 "Time with Rails UnGated: %lu ms\n"
265 "Total railgating cycles: %lu\n",
266 total_rail_gate_time_ms,
267 total_rail_ungate_time_ms,
268 g->pstats.railgating_cycle_count - 1);
269 return 0;
270
271}
272
273static int railgate_residency_open(struct inode *inode, struct file *file)
274{
275 return single_open(file, railgate_residency_show, inode->i_private);
276}
277
278static const struct file_operations railgate_residency_fops = {
279 .open = railgate_residency_open,
280 .read = seq_read,
281 .llseek = seq_lseek,
282 .release = single_release,
283};
284
285int gk20a_railgating_debugfs_init(struct device *dev)
286{
287 struct dentry *d;
288 struct gk20a_platform *platform = dev_get_drvdata(dev);
289 struct gk20a *g = get_gk20a(dev);
290
291 if (!g->can_railgate)
292 return 0;
293
294 d = debugfs_create_file(
295 "railgate_residency", S_IRUGO|S_IWUSR, platform->debugfs, dev,
296 &railgate_residency_fops);
297 if (!d)
298 return -ENOMEM;
299
300 return 0;
301}
302#endif
303
304void gk20a_debug_init(struct device *dev, const char *debugfs_symlink)
305{
306#ifdef CONFIG_DEBUG_FS
307 struct gk20a_platform *platform = dev_get_drvdata(dev);
308 struct gk20a *g = platform->g;
309
310 platform->debugfs = debugfs_create_dir(dev_name(dev), NULL);
311 if (!platform->debugfs)
312 return;
313
314 if (debugfs_symlink)
315 platform->debugfs_alias =
316 debugfs_create_symlink(debugfs_symlink,
317 NULL, dev_name(dev));
318
319 debugfs_create_file("status", S_IRUGO, platform->debugfs,
320 dev, &gk20a_debug_fops);
321 debugfs_create_file("gr_status", S_IRUGO, platform->debugfs,
322 dev, &gk20a_gr_debug_fops);
323 debugfs_create_u32("trace_cmdbuf", S_IRUGO|S_IWUSR,
324 platform->debugfs, &gk20a_debug_trace_cmdbuf);
325
326 debugfs_create_u32("ch_wdt_timeout_ms", S_IRUGO|S_IWUSR,
327 platform->debugfs, &g->ch_wdt_timeout_ms);
328
329 debugfs_create_u32("disable_syncpoints", S_IRUGO|S_IWUSR,
330 platform->debugfs, &g->disable_syncpoints);
331
332 /* Legacy debugging API. */
333 debugfs_create_u32("dbg_mask", S_IRUGO|S_IWUSR,
334 platform->debugfs, &nvgpu_dbg_mask);
335
336 /* New debug logging API. */
337 debugfs_create_u32("log_mask", S_IRUGO|S_IWUSR,
338 platform->debugfs, &g->log_mask);
339 debugfs_create_u32("log_trace", S_IRUGO|S_IWUSR,
340 platform->debugfs, &g->log_trace);
341
342 nvgpu_spinlock_init(&g->debugfs_lock);
343
344 g->mm.ltc_enabled = true;
345 g->mm.ltc_enabled_debug = true;
346
347 g->debugfs_ltc_enabled =
348 debugfs_create_bool("ltc_enabled", S_IRUGO|S_IWUSR,
349 platform->debugfs,
350 &g->mm.ltc_enabled_debug);
351
352 g->debugfs_gr_idle_timeout_default =
353 debugfs_create_u32("gr_idle_timeout_default_us",
354 S_IRUGO|S_IWUSR, platform->debugfs,
355 &g->gr_idle_timeout_default);
356 g->debugfs_timeouts_enabled =
357 debugfs_create_bool("timeouts_enabled",
358 S_IRUGO|S_IWUSR,
359 platform->debugfs,
360 &g->timeouts_enabled);
361
362 g->debugfs_bypass_smmu =
363 debugfs_create_bool("bypass_smmu",
364 S_IRUGO|S_IWUSR,
365 platform->debugfs,
366 &g->mm.bypass_smmu);
367 g->debugfs_disable_bigpage =
368 debugfs_create_bool("disable_bigpage",
369 S_IRUGO|S_IWUSR,
370 platform->debugfs,
371 &g->mm.disable_bigpage);
372
373 g->debugfs_timeslice_low_priority_us =
374 debugfs_create_u32("timeslice_low_priority_us",
375 S_IRUGO|S_IWUSR,
376 platform->debugfs,
377 &g->timeslice_low_priority_us);
378 g->debugfs_timeslice_medium_priority_us =
379 debugfs_create_u32("timeslice_medium_priority_us",
380 S_IRUGO|S_IWUSR,
381 platform->debugfs,
382 &g->timeslice_medium_priority_us);
383 g->debugfs_timeslice_high_priority_us =
384 debugfs_create_u32("timeslice_high_priority_us",
385 S_IRUGO|S_IWUSR,
386 platform->debugfs,
387 &g->timeslice_high_priority_us);
388 g->debugfs_runlist_interleave =
389 debugfs_create_bool("runlist_interleave",
390 S_IRUGO|S_IWUSR,
391 platform->debugfs,
392 &g->runlist_interleave);
393#ifdef CONFIG_ARCH_TEGRA_18x_SOC
394 g->gr.t18x.ctx_vars.debugfs_force_preemption_gfxp =
395 debugfs_create_bool("force_preemption_gfxp", S_IRUGO|S_IWUSR,
396 platform->debugfs,
397 &g->gr.t18x.ctx_vars.force_preemption_gfxp);
398
399 g->gr.t18x.ctx_vars.debugfs_force_preemption_cilp =
400 debugfs_create_bool("force_preemption_cilp", S_IRUGO|S_IWUSR,
401 platform->debugfs,
402 &g->gr.t18x.ctx_vars.force_preemption_cilp);
403
404 g->gr.t18x.ctx_vars.debugfs_dump_ctxsw_stats =
405 debugfs_create_bool("dump_ctxsw_stats_on_channel_close",
406 S_IRUGO|S_IWUSR, platform->debugfs,
407 &g->gr.t18x.
408 ctx_vars.dump_ctxsw_stats_on_channel_close);
409#endif
410
411 gr_gk20a_debugfs_init(g);
412 gk20a_pmu_debugfs_init(g->dev);
413 gk20a_railgating_debugfs_init(g->dev);
414 gk20a_cde_debugfs_init(g->dev);
415 gk20a_ce_debugfs_init(g->dev);
416 nvgpu_alloc_debugfs_init(g->dev);
417 gk20a_mm_debugfs_init(g->dev);
418 gk20a_fifo_debugfs_init(g->dev);
419 gk20a_sched_debugfs_init(g->dev);
420#ifdef CONFIG_NVGPU_TRACK_MEM_USAGE
421 nvgpu_kmem_debugfs_init(g->dev);
422#endif
423#endif
424
425}