summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_dbg.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_dbg.c1730
1 files changed, 1730 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
new file mode 100644
index 00000000..56edc11b
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
@@ -0,0 +1,1730 @@
1/*
2 * Tegra GK20A GPU Debugger/Profiler Driver
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/cdev.h>
22#include <linux/uaccess.h>
23#include <linux/dma-buf.h>
24#include <uapi/linux/nvgpu.h>
25
26#include <nvgpu/kmem.h>
27#include <nvgpu/log.h>
28#include <nvgpu/vm.h>
29#include <nvgpu/atomic.h>
30#include <nvgpu/cond.h>
31#include <nvgpu/linux/vidmem.h>
32
33#include "gk20a/gk20a.h"
34#include "gk20a/platform_gk20a.h"
35#include "gk20a/gr_gk20a.h"
36#include "gk20a/regops_gk20a.h"
37#include "gk20a/dbg_gpu_gk20a.h"
38#include "os_linux.h"
39#include "ioctl_dbg.h"
40
41#include "vm_priv.h"
42
43/* silly allocator - just increment id */
44static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0);
45static int generate_unique_id(void)
46{
47 return nvgpu_atomic_add_return(1, &unique_id);
48}
49
50static int alloc_profiler(struct gk20a *g,
51 struct dbg_profiler_object_data **_prof)
52{
53 struct dbg_profiler_object_data *prof;
54 *_prof = NULL;
55
56 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
57
58 prof = nvgpu_kzalloc(g, sizeof(*prof));
59 if (!prof)
60 return -ENOMEM;
61
62 prof->prof_handle = generate_unique_id();
63 *_prof = prof;
64 return 0;
65}
66
67static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_s_linux)
68{
69 struct dbg_session_gk20a_linux *dbg_s_linux;
70 *_dbg_s_linux = NULL;
71
72 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
73
74 dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux));
75 if (!dbg_s_linux)
76 return -ENOMEM;
77
78 dbg_s_linux->dbg_s.id = generate_unique_id();
79 *_dbg_s_linux = dbg_s_linux;
80 return 0;
81}
82
83static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
84 struct gr_gk20a *gr);
85
86static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset);
87
88static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
89 struct nvgpu_dbg_gpu_exec_reg_ops_args *args);
90
91static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
92 struct nvgpu_dbg_gpu_powergate_args *args);
93
94static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
95 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args);
96
97static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
98 struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args);
99
100static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
101 struct dbg_session_gk20a *dbg_s,
102 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args);
103
104static int nvgpu_ioctl_allocate_profiler_object(struct dbg_session_gk20a_linux *dbg_s,
105 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args);
106
107static int nvgpu_ioctl_free_profiler_object(struct dbg_session_gk20a_linux *dbg_s_linux,
108 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args);
109
110static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s,
111 struct nvgpu_dbg_gpu_profiler_reserve_args *args);
112
113static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
114 struct nvgpu_dbg_gpu_perfbuf_map_args *args);
115
116static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
117 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args);
118
119static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
120 int timeout_mode);
121
122static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
123 u32 profiler_handle);
124
125static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s);
126
127static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s);
128
129static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
130 u32 profiler_handle);
131
132static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s);
133
134static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
135 struct file *filp, bool is_profiler);
136
137unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
138{
139 unsigned int mask = 0;
140 struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data;
141 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
142
143 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
144
145 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait);
146
147 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
148
149 if (dbg_s->dbg_events.events_enabled &&
150 dbg_s->dbg_events.num_pending_events > 0) {
151 gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d",
152 dbg_s->id);
153 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
154 dbg_s->dbg_events.num_pending_events);
155 mask = (POLLPRI | POLLIN);
156 }
157
158 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
159
160 return mask;
161}
162
163int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
164{
165 struct dbg_session_gk20a_linux *dbg_session_linux = filp->private_data;
166 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
167 struct gk20a *g = dbg_s->g;
168 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
169
170 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name);
171
172 /* unbind channels */
173 dbg_unbind_all_channels_gk20a(dbg_s);
174
175 /* Powergate/Timeout enable is called here as possibility of dbg_session
176 * which called powergate/timeout disable ioctl, to be killed without
177 * calling powergate/timeout enable ioctl
178 */
179 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
180 g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
181 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
182 nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE);
183
184 /* If this session owned the perf buffer, release it */
185 if (g->perfbuf.owner == dbg_s)
186 gk20a_perfbuf_release_locked(g, g->perfbuf.offset);
187
188 /* Per-context profiler objects were released when we called
189 * dbg_unbind_all_channels. We could still have global ones.
190 */
191 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
192 dbg_profiler_object_data, prof_obj_entry) {
193 if (prof_obj->session_id == dbg_s->id) {
194 if (prof_obj->has_reservation)
195 g->ops.dbg_session_ops.
196 release_profiler_reservation(dbg_s, prof_obj);
197 nvgpu_list_del(&prof_obj->prof_obj_entry);
198 nvgpu_kfree(g, prof_obj);
199 }
200 }
201 nvgpu_mutex_release(&g->dbg_sessions_lock);
202
203 nvgpu_mutex_destroy(&dbg_s->ch_list_lock);
204 nvgpu_mutex_destroy(&dbg_s->ioctl_lock);
205
206 nvgpu_kfree(g, dbg_session_linux);
207 gk20a_put(g);
208
209 return 0;
210}
211
212int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
213{
214 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
215 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
216}
217
218static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
219 struct nvgpu_dbg_gpu_timeout_args *args)
220{
221 int err;
222 struct gk20a *g = dbg_s->g;
223
224 gk20a_dbg_fn("powergate mode = %d", args->enable);
225
226 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
227 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
228 nvgpu_mutex_release(&g->dbg_sessions_lock);
229
230 return err;
231}
232
233static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
234 struct dbg_session_gk20a *dbg_s,
235 struct nvgpu_dbg_gpu_write_single_sm_error_state_args *args)
236{
237 struct gk20a *g = dbg_s->g;
238 struct gr_gk20a *gr = &g->gr;
239 u32 sm_id;
240 struct channel_gk20a *ch;
241 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state;
242 int err = 0;
243
244 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
245 if (!ch)
246 return -EINVAL;
247
248 sm_id = args->sm_id;
249 if (sm_id >= gr->no_of_sm)
250 return -EINVAL;
251
252 sm_error_state = nvgpu_kzalloc(g, sizeof(*sm_error_state));
253 if (!sm_error_state)
254 return -ENOMEM;
255
256 if (args->sm_error_state_record_size > 0) {
257 size_t read_size = sizeof(*sm_error_state);
258
259 if (read_size > args->sm_error_state_record_size)
260 read_size = args->sm_error_state_record_size;
261
262 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
263 err = copy_from_user(sm_error_state,
264 (void __user *)(uintptr_t)
265 args->sm_error_state_record_mem,
266 read_size);
267 nvgpu_mutex_release(&g->dbg_sessions_lock);
268 if (err) {
269 err = -ENOMEM;
270 goto err_free;
271 }
272 }
273
274 err = gk20a_busy(g);
275 if (err)
276 goto err_free;
277
278 err = gr_gk20a_elpg_protected_call(g,
279 g->ops.gr.update_sm_error_state(g, ch,
280 sm_id, sm_error_state));
281
282 gk20a_idle(g);
283
284err_free:
285 nvgpu_kfree(g, sm_error_state);
286
287 return err;
288}
289
290
291static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
292 struct dbg_session_gk20a *dbg_s,
293 struct nvgpu_dbg_gpu_read_single_sm_error_state_args *args)
294{
295 struct gk20a *g = dbg_s->g;
296 struct gr_gk20a *gr = &g->gr;
297 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state;
298 u32 sm_id;
299 int err = 0;
300
301 sm_id = args->sm_id;
302 if (sm_id >= gr->no_of_sm)
303 return -EINVAL;
304
305 sm_error_state = gr->sm_error_states + sm_id;
306
307 if (args->sm_error_state_record_size > 0) {
308 size_t write_size = sizeof(*sm_error_state);
309
310 if (write_size > args->sm_error_state_record_size)
311 write_size = args->sm_error_state_record_size;
312
313 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
314 err = copy_to_user((void __user *)(uintptr_t)
315 args->sm_error_state_record_mem,
316 sm_error_state,
317 write_size);
318 nvgpu_mutex_release(&g->dbg_sessions_lock);
319 if (err) {
320 nvgpu_err(g, "copy_to_user failed!");
321 return err;
322 }
323
324 args->sm_error_state_record_size = write_size;
325 }
326
327 return 0;
328}
329
330
331static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(
332 struct dbg_session_gk20a *dbg_s,
333 struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args)
334{
335 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
336
337 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
338
339 dbg_s->broadcast_stop_trigger = (args->broadcast != 0);
340
341 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
342
343 return 0;
344}
345
346static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
347 int timeout_mode)
348{
349 struct gk20a *g = dbg_s->g;
350 int err = 0;
351
352 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d",
353 timeout_mode);
354
355 switch (timeout_mode) {
356 case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE:
357 if (dbg_s->is_timeout_disabled &&
358 --g->dbg_timeout_disabled_refcount == 0) {
359 g->timeouts_enabled = true;
360 }
361 dbg_s->is_timeout_disabled = false;
362 break;
363
364 case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE:
365 if ((dbg_s->is_timeout_disabled == false) &&
366 (g->dbg_timeout_disabled_refcount++ == 0)) {
367 g->timeouts_enabled = false;
368 }
369 dbg_s->is_timeout_disabled = true;
370 break;
371
372 default:
373 nvgpu_err(g,
374 "unrecognized dbg gpu timeout mode : 0x%x",
375 timeout_mode);
376 err = -EINVAL;
377 break;
378 }
379
380 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s",
381 g->timeouts_enabled ? "Yes" : "No");
382
383 return err;
384}
385
386static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
387 struct file *filp, bool is_profiler)
388{
389 struct nvgpu_os_linux *l;
390 struct dbg_session_gk20a_linux *dbg_session_linux;
391 struct dbg_session_gk20a *dbg_s;
392 struct gk20a *g;
393
394 struct device *dev;
395
396 int err;
397
398 if (!is_profiler)
399 l = container_of(inode->i_cdev,
400 struct nvgpu_os_linux, dbg.cdev);
401 else
402 l = container_of(inode->i_cdev,
403 struct nvgpu_os_linux, prof.cdev);
404 g = gk20a_get(&l->g);
405 if (!g)
406 return -ENODEV;
407
408 dev = dev_from_gk20a(g);
409
410 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name);
411
412 err = alloc_session(g, &dbg_session_linux);
413 if (err)
414 goto free_ref;
415
416 dbg_s = &dbg_session_linux->dbg_s;
417
418 filp->private_data = dbg_session_linux;
419 dbg_session_linux->dev = dev;
420 dbg_s->g = g;
421 dbg_s->is_profiler = is_profiler;
422 dbg_s->is_pg_disabled = false;
423 dbg_s->is_timeout_disabled = false;
424
425 nvgpu_cond_init(&dbg_s->dbg_events.wait_queue);
426 nvgpu_init_list_node(&dbg_s->ch_list);
427 err = nvgpu_mutex_init(&dbg_s->ch_list_lock);
428 if (err)
429 goto err_free_session;
430 err = nvgpu_mutex_init(&dbg_s->ioctl_lock);
431 if (err)
432 goto err_destroy_lock;
433 dbg_s->dbg_events.events_enabled = false;
434 dbg_s->dbg_events.num_pending_events = 0;
435
436 return 0;
437
438err_destroy_lock:
439 nvgpu_mutex_destroy(&dbg_s->ch_list_lock);
440err_free_session:
441 nvgpu_kfree(g, dbg_session_linux);
442free_ref:
443 gk20a_put(g);
444 return err;
445}
446
447static int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
448 struct dbg_session_channel_data *ch_data)
449{
450 struct gk20a *g = dbg_s->g;
451 int chid;
452 struct dbg_session_data *session_data;
453 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
454 struct dbg_session_channel_data_linux *ch_data_linux;
455
456 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
457
458 chid = ch_data->chid;
459
460 /* If there's a profiler ctx reservation record associated with this
461 * session/channel pair, release it.
462 */
463 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
464 dbg_profiler_object_data, prof_obj_entry) {
465 if ((prof_obj->session_id == dbg_s->id) &&
466 (prof_obj->ch->chid == chid)) {
467 if (prof_obj->has_reservation) {
468 g->ops.dbg_session_ops.
469 release_profiler_reservation(dbg_s, prof_obj);
470 }
471 nvgpu_list_del(&prof_obj->prof_obj_entry);
472 nvgpu_kfree(g, prof_obj);
473 }
474 }
475
476 nvgpu_list_del(&ch_data->ch_entry);
477
478 session_data = ch_data->session_data;
479 nvgpu_list_del(&session_data->dbg_s_entry);
480 nvgpu_kfree(dbg_s->g, session_data);
481
482 ch_data_linux = container_of(ch_data, struct dbg_session_channel_data_linux,
483 ch_data);
484
485 fput(ch_data_linux->ch_f);
486 nvgpu_kfree(dbg_s->g, ch_data_linux);
487
488 return 0;
489}
490
491static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
492 struct nvgpu_dbg_gpu_bind_channel_args *args)
493{
494 struct file *f;
495 struct gk20a *g = dbg_s->g;
496 struct channel_gk20a *ch;
497 struct dbg_session_channel_data_linux *ch_data_linux;
498 struct dbg_session_data *session_data;
499 int err = 0;
500
501 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
502 g->name, args->channel_fd);
503
504 /*
505 * Although gk20a_get_channel_from_file gives us a channel ref, need to
506 * hold a ref to the file during the session lifetime. See comment in
507 * struct dbg_session_channel_data.
508 */
509 f = fget(args->channel_fd);
510 if (!f)
511 return -ENODEV;
512
513 ch = gk20a_get_channel_from_file(args->channel_fd);
514 if (!ch) {
515 gk20a_dbg_fn("no channel found for fd");
516 err = -EINVAL;
517 goto out_fput;
518 }
519
520 gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid);
521
522 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
523 nvgpu_mutex_acquire(&ch->dbg_s_lock);
524
525 ch_data_linux = nvgpu_kzalloc(g, sizeof(*ch_data_linux));
526 if (!ch_data_linux) {
527 err = -ENOMEM;
528 goto out_chput;
529 }
530 ch_data_linux->ch_f = f;
531 ch_data_linux->ch_data.channel_fd = args->channel_fd;
532 ch_data_linux->ch_data.chid = ch->chid;
533 ch_data_linux->ch_data.unbind_single_channel = dbg_unbind_single_channel_gk20a;
534 nvgpu_init_list_node(&ch_data_linux->ch_data.ch_entry);
535
536 session_data = nvgpu_kzalloc(g, sizeof(*session_data));
537 if (!session_data) {
538 err = -ENOMEM;
539 goto out_kfree;
540 }
541 session_data->dbg_s = dbg_s;
542 nvgpu_init_list_node(&session_data->dbg_s_entry);
543 ch_data_linux->ch_data.session_data = session_data;
544
545 nvgpu_list_add(&session_data->dbg_s_entry, &ch->dbg_s_list);
546
547 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
548 nvgpu_list_add_tail(&ch_data_linux->ch_data.ch_entry, &dbg_s->ch_list);
549 nvgpu_mutex_release(&dbg_s->ch_list_lock);
550
551 nvgpu_mutex_release(&ch->dbg_s_lock);
552 nvgpu_mutex_release(&g->dbg_sessions_lock);
553
554 gk20a_channel_put(ch);
555
556 return 0;
557
558out_kfree:
559 nvgpu_kfree(g, ch_data_linux);
560out_chput:
561 gk20a_channel_put(ch);
562 nvgpu_mutex_release(&ch->dbg_s_lock);
563 nvgpu_mutex_release(&g->dbg_sessions_lock);
564out_fput:
565 fput(f);
566 return err;
567}
568
569static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s)
570{
571 struct dbg_session_channel_data *ch_data, *tmp;
572 struct gk20a *g = dbg_s->g;
573
574 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
575 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
576 nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list,
577 dbg_session_channel_data, ch_entry)
578 ch_data->unbind_single_channel(dbg_s, ch_data);
579 nvgpu_mutex_release(&dbg_s->ch_list_lock);
580 nvgpu_mutex_release(&g->dbg_sessions_lock);
581
582 return 0;
583}
584
585static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
586 struct nvgpu_dbg_gpu_exec_reg_ops_args *args)
587{
588 int err = 0, powergate_err = 0;
589 bool is_pg_disabled = false;
590
591 struct gk20a *g = dbg_s->g;
592 struct channel_gk20a *ch;
593
594 gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops);
595
596 if (args->num_ops > g->gpu_characteristics.reg_ops_limit) {
597 nvgpu_err(g, "regops limit exceeded");
598 return -EINVAL;
599 }
600
601 if (args->num_ops == 0) {
602 /* Nothing to do */
603 return 0;
604 }
605
606 if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) {
607 nvgpu_err(g, "reg ops work buffer not allocated");
608 return -ENODEV;
609 }
610
611 if (!dbg_s->id) {
612 nvgpu_err(g, "can't call reg_ops on an unbound debugger session");
613 return -EINVAL;
614 }
615
616 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
617 if (!dbg_s->is_profiler && !ch) {
618 nvgpu_err(g, "bind a channel before regops for a debugging session");
619 return -EINVAL;
620 }
621
622 /* be sure that ctx info is in place */
623 if (!g->is_virtual &&
624 !gr_context_info_available(dbg_s, &g->gr)) {
625 nvgpu_err(g, "gr context data not available");
626 return -ENODEV;
627 }
628
629 /* since exec_reg_ops sends methods to the ucode, it must take the
630 * global gpu lock to protect against mixing methods from debug sessions
631 * on other channels */
632 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
633
634 if (!dbg_s->is_pg_disabled && !g->is_virtual) {
635 /* In the virtual case, the server will handle
636 * disabling/enabling powergating when processing reg ops
637 */
638 powergate_err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
639 NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE);
640 is_pg_disabled = true;
641 }
642
643 if (!powergate_err) {
644 u64 ops_offset = 0; /* index offset */
645
646 while (ops_offset < args->num_ops && !err) {
647 const u64 num_ops =
648 min(args->num_ops - ops_offset,
649 (u64)(g->dbg_regops_tmp_buf_ops));
650 const u64 fragment_size =
651 num_ops * sizeof(g->dbg_regops_tmp_buf[0]);
652
653 void __user *const fragment =
654 (void __user *)(uintptr_t)
655 (args->ops +
656 ops_offset * sizeof(g->dbg_regops_tmp_buf[0]));
657
658 gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu",
659 ops_offset, num_ops);
660
661 gk20a_dbg_fn("Copying regops from userspace");
662
663 if (copy_from_user(g->dbg_regops_tmp_buf,
664 fragment, fragment_size)) {
665 nvgpu_err(g, "copy_from_user failed!");
666 err = -EFAULT;
667 break;
668 }
669
670 err = g->ops.dbg_session_ops.exec_reg_ops(
671 dbg_s, g->dbg_regops_tmp_buf, num_ops);
672
673 gk20a_dbg_fn("Copying result to userspace");
674
675 if (copy_to_user(fragment, g->dbg_regops_tmp_buf,
676 fragment_size)) {
677 nvgpu_err(g, "copy_to_user failed!");
678 err = -EFAULT;
679 break;
680 }
681
682 ops_offset += num_ops;
683 }
684
685 /* enable powergate, if previously disabled */
686 if (is_pg_disabled) {
687 powergate_err =
688 g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
689 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
690 }
691 }
692
693 nvgpu_mutex_release(&g->dbg_sessions_lock);
694
695 if (!err && powergate_err)
696 err = powergate_err;
697
698 if (err)
699 nvgpu_err(g, "dbg regops failed");
700
701 return err;
702}
703
704static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
705 struct nvgpu_dbg_gpu_powergate_args *args)
706{
707 int err;
708 struct gk20a *g = dbg_s->g;
709 gk20a_dbg_fn("%s powergate mode = %d",
710 g->name, args->mode);
711
712 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
713 err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, args->mode);
714 nvgpu_mutex_release(&g->dbg_sessions_lock);
715 return err;
716}
717
718static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
719 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args)
720{
721 int err;
722 struct gk20a *g = dbg_s->g;
723 struct channel_gk20a *ch_gk20a;
724
725 gk20a_dbg_fn("%s smpc ctxsw mode = %d",
726 g->name, args->mode);
727
728 err = gk20a_busy(g);
729 if (err) {
730 nvgpu_err(g, "failed to poweron");
731 return err;
732 }
733
734 /* Take the global lock, since we'll be doing global regops */
735 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
736
737 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
738 if (!ch_gk20a) {
739 nvgpu_err(g,
740 "no bound channel for smpc ctxsw mode update");
741 err = -EINVAL;
742 goto clean_up;
743 }
744
745 err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a,
746 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
747 if (err) {
748 nvgpu_err(g,
749 "error (%d) during smpc ctxsw mode update", err);
750 goto clean_up;
751 }
752
753 err = g->ops.regops.apply_smpc_war(dbg_s);
754 clean_up:
755 nvgpu_mutex_release(&g->dbg_sessions_lock);
756 gk20a_idle(g);
757 return err;
758}
759
760static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
761 struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args)
762{
763 int err;
764 struct gk20a *g = dbg_s->g;
765 struct channel_gk20a *ch_gk20a;
766
767 gk20a_dbg_fn("%s pm ctxsw mode = %d",
768 g->name, args->mode);
769
770 /* Must have a valid reservation to enable/disable hwpm cxtsw.
771 * Just print an error message for now, but eventually this should
772 * return an error, at the point where all client sw has been
773 * cleaned up.
774 */
775 if (!dbg_s->has_profiler_reservation) {
776 nvgpu_err(g,
777 "session doesn't have a valid reservation");
778 }
779
780 err = gk20a_busy(g);
781 if (err) {
782 nvgpu_err(g, "failed to poweron");
783 return err;
784 }
785
786 /* Take the global lock, since we'll be doing global regops */
787 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
788
789 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
790 if (!ch_gk20a) {
791 nvgpu_err(g,
792 "no bound channel for pm ctxsw mode update");
793 err = -EINVAL;
794 goto clean_up;
795 }
796
797 err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a,
798 args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW);
799 if (err)
800 nvgpu_err(g,
801 "error (%d) during pm ctxsw mode update", err);
802
803 /* gk20a would require a WAR to set the core PM_ENABLE bit, not
804 * added here with gk20a being deprecated
805 */
806 clean_up:
807 nvgpu_mutex_release(&g->dbg_sessions_lock);
808 gk20a_idle(g);
809 return err;
810}
811
812static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
813 struct dbg_session_gk20a *dbg_s,
814 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
815{
816 struct gk20a *g = dbg_s->g;
817 struct channel_gk20a *ch;
818 int err = 0, action = args->mode;
819
820 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
821
822 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
823 if (!ch)
824 return -EINVAL;
825
826 err = gk20a_busy(g);
827 if (err) {
828 nvgpu_err(g, "failed to poweron");
829 return err;
830 }
831
832 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
833
834 /* Suspend GPU context switching */
835 err = gr_gk20a_disable_ctxsw(g);
836 if (err) {
837 nvgpu_err(g, "unable to stop gr ctxsw");
838 /* this should probably be ctx-fatal... */
839 goto clean_up;
840 }
841
842 switch (action) {
843 case NVGPU_DBG_GPU_SUSPEND_ALL_SMS:
844 gr_gk20a_suspend_context(ch);
845 break;
846
847 case NVGPU_DBG_GPU_RESUME_ALL_SMS:
848 gr_gk20a_resume_context(ch);
849 break;
850 }
851
852 err = gr_gk20a_enable_ctxsw(g);
853 if (err)
854 nvgpu_err(g, "unable to restart ctxsw!");
855
856clean_up:
857 nvgpu_mutex_release(&g->dbg_sessions_lock);
858 gk20a_idle(g);
859
860 return err;
861}
862
863static int nvgpu_ioctl_allocate_profiler_object(
864 struct dbg_session_gk20a_linux *dbg_session_linux,
865 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args)
866{
867 int err = 0;
868 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
869 struct gk20a *g = get_gk20a(dbg_session_linux->dev);
870 struct dbg_profiler_object_data *prof_obj;
871
872 gk20a_dbg_fn("%s", g->name);
873
874 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
875
876 err = alloc_profiler(g, &prof_obj);
877 if (err)
878 goto clean_up;
879
880 prof_obj->session_id = dbg_s->id;
881
882 if (dbg_s->is_profiler)
883 prof_obj->ch = NULL;
884 else {
885 prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
886 if (prof_obj->ch == NULL) {
887 nvgpu_err(g,
888 "bind a channel for dbg session");
889 nvgpu_kfree(g, prof_obj);
890 err = -EINVAL;
891 goto clean_up;
892 }
893 }
894
895 /* Return handle to client */
896 args->profiler_handle = prof_obj->prof_handle;
897
898 nvgpu_init_list_node(&prof_obj->prof_obj_entry);
899
900 nvgpu_list_add(&prof_obj->prof_obj_entry, &g->profiler_objects);
901clean_up:
902 nvgpu_mutex_release(&g->dbg_sessions_lock);
903 return err;
904}
905
906static int nvgpu_ioctl_free_profiler_object(
907 struct dbg_session_gk20a_linux *dbg_s_linux,
908 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args)
909{
910 int err = 0;
911 struct dbg_session_gk20a *dbg_s = &dbg_s_linux->dbg_s;
912 struct gk20a *g = get_gk20a(dbg_s_linux->dev);
913 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
914 bool obj_found = false;
915
916 gk20a_dbg_fn("%s session_id = %d profiler_handle = %x",
917 g->name, dbg_s->id, args->profiler_handle);
918
919 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
920
921 /* Remove profiler object from the list, if a match is found */
922 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
923 dbg_profiler_object_data, prof_obj_entry) {
924 if (prof_obj->prof_handle == args->profiler_handle) {
925 if (prof_obj->session_id != dbg_s->id) {
926 nvgpu_err(g,
927 "invalid handle %x",
928 args->profiler_handle);
929 err = -EINVAL;
930 break;
931 }
932 if (prof_obj->has_reservation)
933 g->ops.dbg_session_ops.
934 release_profiler_reservation(dbg_s, prof_obj);
935 nvgpu_list_del(&prof_obj->prof_obj_entry);
936 nvgpu_kfree(g, prof_obj);
937 obj_found = true;
938 break;
939 }
940 }
941 if (!obj_found) {
942 nvgpu_err(g, "profiler %x not found",
943 args->profiler_handle);
944 err = -EINVAL;
945 }
946
947 nvgpu_mutex_release(&g->dbg_sessions_lock);
948 return err;
949}
950
951static struct dbg_profiler_object_data *find_matching_prof_obj(
952 struct dbg_session_gk20a *dbg_s,
953 u32 profiler_handle)
954{
955 struct gk20a *g = dbg_s->g;
956 struct dbg_profiler_object_data *prof_obj;
957
958 nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
959 dbg_profiler_object_data, prof_obj_entry) {
960 if (prof_obj->prof_handle == profiler_handle) {
961 if (prof_obj->session_id != dbg_s->id) {
962 nvgpu_err(g,
963 "invalid handle %x",
964 profiler_handle);
965 return NULL;
966 }
967 return prof_obj;
968 }
969 }
970 return NULL;
971}
972
973/* used in scenarios where the debugger session can take just the inter-session
974 * lock for performance, but the profiler session must take the per-gpu lock
975 * since it might not have an associated channel. */
976static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s)
977{
978 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
979
980 if (dbg_s->is_profiler || !ch)
981 nvgpu_mutex_acquire(&dbg_s->g->dbg_sessions_lock);
982 else
983 nvgpu_mutex_acquire(&ch->dbg_s_lock);
984}
985
986static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s)
987{
988 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
989
990 if (dbg_s->is_profiler || !ch)
991 nvgpu_mutex_release(&dbg_s->g->dbg_sessions_lock);
992 else
993 nvgpu_mutex_release(&ch->dbg_s_lock);
994}
995
996static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
997{
998 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
999
1000 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1001
1002 dbg_s->dbg_events.events_enabled = true;
1003 dbg_s->dbg_events.num_pending_events = 0;
1004
1005 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
1006}
1007
1008static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
1009{
1010 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
1011
1012 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1013
1014 dbg_s->dbg_events.events_enabled = false;
1015 dbg_s->dbg_events.num_pending_events = 0;
1016
1017 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
1018}
1019
1020static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
1021{
1022 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
1023
1024 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1025
1026 if (dbg_s->dbg_events.events_enabled &&
1027 dbg_s->dbg_events.num_pending_events > 0)
1028 dbg_s->dbg_events.num_pending_events--;
1029
1030 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
1031}
1032
1033
1034static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
1035 struct nvgpu_dbg_gpu_events_ctrl_args *args)
1036{
1037 int ret = 0;
1038 struct channel_gk20a *ch;
1039
1040 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
1041
1042 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1043 if (!ch) {
1044 nvgpu_err(dbg_s->g,
1045 "no channel bound to dbg session");
1046 return -EINVAL;
1047 }
1048
1049 switch (args->cmd) {
1050 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE:
1051 gk20a_dbg_gpu_events_enable(dbg_s);
1052 break;
1053
1054 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE:
1055 gk20a_dbg_gpu_events_disable(dbg_s);
1056 break;
1057
1058 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR:
1059 gk20a_dbg_gpu_events_clear(dbg_s);
1060 break;
1061
1062 default:
1063 nvgpu_err(dbg_s->g,
1064 "unrecognized dbg gpu events ctrl cmd: 0x%x",
1065 args->cmd);
1066 ret = -EINVAL;
1067 break;
1068 }
1069
1070 return ret;
1071}
1072
1073static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
1074 struct nvgpu_dbg_gpu_perfbuf_map_args *args)
1075{
1076 struct gk20a *g = dbg_s->g;
1077 struct mm_gk20a *mm = &g->mm;
1078 int err;
1079 u32 virt_size;
1080 u32 big_page_size = g->ops.mm.get_default_big_page_size();
1081
1082 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1083
1084 if (g->perfbuf.owner) {
1085 nvgpu_mutex_release(&g->dbg_sessions_lock);
1086 return -EBUSY;
1087 }
1088
1089 mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size,
1090 big_page_size << 10,
1091 NV_MM_DEFAULT_KERNEL_SIZE,
1092 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
1093 false, false, "perfbuf");
1094 if (!mm->perfbuf.vm) {
1095 nvgpu_mutex_release(&g->dbg_sessions_lock);
1096 return -ENOMEM;
1097 }
1098
1099 err = nvgpu_vm_map_buffer(mm->perfbuf.vm,
1100 args->dmabuf_fd,
1101 &args->offset,
1102 0,
1103 0,
1104 0,
1105 0,
1106 args->mapping_size,
1107 NULL);
1108 if (err)
1109 goto err_remove_vm;
1110
1111 /* perf output buffer may not cross a 4GB boundary */
1112 virt_size = u64_lo32(args->mapping_size);
1113 if (u64_hi32(args->offset) != u64_hi32(args->offset + virt_size)) {
1114 err = -EINVAL;
1115 goto err_unmap;
1116 }
1117
1118 err = g->ops.dbg_session_ops.perfbuffer_enable(g,
1119 args->offset, virt_size);
1120 if (err)
1121 goto err_unmap;
1122
1123 g->perfbuf.owner = dbg_s;
1124 g->perfbuf.offset = args->offset;
1125 nvgpu_mutex_release(&g->dbg_sessions_lock);
1126
1127 return 0;
1128
1129err_unmap:
1130 nvgpu_vm_unmap_buffer(mm->perfbuf.vm, args->offset, NULL);
1131err_remove_vm:
1132 nvgpu_vm_put(mm->perfbuf.vm);
1133 nvgpu_mutex_release(&g->dbg_sessions_lock);
1134 return err;
1135}
1136
1137static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
1138 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args)
1139{
1140 struct gk20a *g = dbg_s->g;
1141 int err;
1142
1143 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1144 if ((g->perfbuf.owner != dbg_s) ||
1145 (g->perfbuf.offset != args->offset)) {
1146 nvgpu_mutex_release(&g->dbg_sessions_lock);
1147 return -EINVAL;
1148 }
1149
1150 err = gk20a_perfbuf_release_locked(g, args->offset);
1151
1152 nvgpu_mutex_release(&g->dbg_sessions_lock);
1153
1154 return err;
1155}
1156
1157static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
1158 struct nvgpu_dbg_gpu_pc_sampling_args *args)
1159{
1160 struct channel_gk20a *ch;
1161 struct gk20a *g = dbg_s->g;
1162
1163 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1164 if (!ch)
1165 return -EINVAL;
1166
1167 gk20a_dbg_fn("");
1168
1169 return g->ops.gr.update_pc_sampling ?
1170 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL;
1171}
1172
1173static int nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(
1174 struct dbg_session_gk20a *dbg_s,
1175 struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *args)
1176{
1177 struct gk20a *g = dbg_s->g;
1178 struct gr_gk20a *gr = &g->gr;
1179 u32 sm_id;
1180 struct channel_gk20a *ch;
1181 int err = 0;
1182
1183 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1184 if (!ch)
1185 return -EINVAL;
1186
1187 sm_id = args->sm_id;
1188
1189 if (sm_id >= gr->no_of_sm)
1190 return -EINVAL;
1191
1192 err = gk20a_busy(g);
1193 if (err)
1194 return err;
1195
1196 err = gr_gk20a_elpg_protected_call(g,
1197 g->ops.gr.clear_sm_error_state(g, ch, sm_id));
1198
1199 gk20a_idle(g);
1200
1201 return err;
1202}
1203
1204static int
1205nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s,
1206 struct nvgpu_dbg_gpu_suspend_resume_contexts_args *args)
1207{
1208 struct gk20a *g = dbg_s->g;
1209 int err = 0;
1210 int ctx_resident_ch_fd = -1;
1211
1212 err = gk20a_busy(g);
1213 if (err)
1214 return err;
1215
1216 switch (args->action) {
1217 case NVGPU_DBG_GPU_SUSPEND_ALL_CONTEXTS:
1218 err = g->ops.gr.suspend_contexts(g, dbg_s,
1219 &ctx_resident_ch_fd);
1220 break;
1221
1222 case NVGPU_DBG_GPU_RESUME_ALL_CONTEXTS:
1223 err = g->ops.gr.resume_contexts(g, dbg_s,
1224 &ctx_resident_ch_fd);
1225 break;
1226 }
1227
1228 if (ctx_resident_ch_fd < 0) {
1229 args->is_resident_context = 0;
1230 } else {
1231 args->is_resident_context = 1;
1232 args->resident_context_fd = ctx_resident_ch_fd;
1233 }
1234
1235 gk20a_idle(g);
1236
1237 return err;
1238}
1239
1240static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
1241 struct nvgpu_dbg_gpu_access_fb_memory_args *args)
1242{
1243 struct gk20a *g = dbg_s->g;
1244 struct dma_buf *dmabuf;
1245 void __user *user_buffer = (void __user *)(uintptr_t)args->buffer;
1246 void *buffer;
1247 u64 size, access_size, offset;
1248 u64 access_limit_size = SZ_4K;
1249 int err = 0;
1250
1251 if ((args->offset & 3) || (!args->size) || (args->size & 3))
1252 return -EINVAL;
1253
1254 dmabuf = dma_buf_get(args->dmabuf_fd);
1255 if (IS_ERR(dmabuf))
1256 return -EINVAL;
1257
1258 if ((args->offset > dmabuf->size) ||
1259 (args->size > dmabuf->size) ||
1260 (args->offset + args->size > dmabuf->size)) {
1261 err = -EINVAL;
1262 goto fail_dmabuf_put;
1263 }
1264
1265 buffer = nvgpu_big_zalloc(g, access_limit_size);
1266 if (!buffer) {
1267 err = -ENOMEM;
1268 goto fail_dmabuf_put;
1269 }
1270
1271 size = args->size;
1272 offset = 0;
1273
1274 err = gk20a_busy(g);
1275 if (err)
1276 goto fail_free_buffer;
1277
1278 while (size) {
1279 /* Max access size of access_limit_size in one loop */
1280 access_size = min(access_limit_size, size);
1281
1282 if (args->cmd ==
1283 NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE) {
1284 err = copy_from_user(buffer, user_buffer + offset,
1285 access_size);
1286 if (err)
1287 goto fail_idle;
1288 }
1289
1290 err = nvgpu_vidmem_buf_access_memory(g, dmabuf, buffer,
1291 args->offset + offset, access_size,
1292 args->cmd);
1293 if (err)
1294 goto fail_idle;
1295
1296 if (args->cmd ==
1297 NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ) {
1298 err = copy_to_user(user_buffer + offset,
1299 buffer, access_size);
1300 if (err)
1301 goto fail_idle;
1302 }
1303
1304 size -= access_size;
1305 offset += access_size;
1306 }
1307
1308fail_idle:
1309 gk20a_idle(g);
1310fail_free_buffer:
1311 nvgpu_big_free(g, buffer);
1312fail_dmabuf_put:
1313 dma_buf_put(dmabuf);
1314
1315 return err;
1316}
1317
1318static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s,
1319 struct nvgpu_dbg_gpu_profiler_reserve_args *args)
1320{
1321 if (args->acquire)
1322 return nvgpu_profiler_reserve_acquire(dbg_s, args->profiler_handle);
1323
1324 return nvgpu_profiler_reserve_release(dbg_s, args->profiler_handle);
1325}
1326
1327static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s,
1328 struct nvgpu_dbg_gpu_timeout_args *args)
1329{
1330 int status;
1331 struct gk20a *g = dbg_s->g;
1332
1333 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1334 status = g->timeouts_enabled;
1335 nvgpu_mutex_release(&g->dbg_sessions_lock);
1336
1337 if (status)
1338 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE;
1339 else
1340 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE;
1341}
1342
1343/* In order to perform a context relative op the context has
1344 * to be created already... which would imply that the
1345 * context switch mechanism has already been put in place.
1346 * So by the time we perform such an opertation it should always
1347 * be possible to query for the appropriate context offsets, etc.
1348 *
1349 * But note: while the dbg_gpu bind requires the a channel fd,
1350 * it doesn't require an allocated gr/compute obj at that point...
1351 */
1352static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
1353 struct gr_gk20a *gr)
1354{
1355 int err;
1356
1357 nvgpu_mutex_acquire(&gr->ctx_mutex);
1358 err = !gr->ctx_vars.golden_image_initialized;
1359 nvgpu_mutex_release(&gr->ctx_mutex);
1360 if (err)
1361 return false;
1362 return true;
1363
1364}
1365
1366static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset)
1367{
1368 struct mm_gk20a *mm = &g->mm;
1369 struct vm_gk20a *vm = mm->perfbuf.vm;
1370 int err;
1371
1372 err = g->ops.dbg_session_ops.perfbuffer_disable(g);
1373
1374 nvgpu_vm_unmap_buffer(vm, offset, NULL);
1375 gk20a_free_inst_block(g, &mm->perfbuf.inst_block);
1376 nvgpu_vm_put(vm);
1377
1378 g->perfbuf.owner = NULL;
1379 g->perfbuf.offset = 0;
1380 return err;
1381}
1382
1383static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
1384 u32 profiler_handle)
1385{
1386 struct gk20a *g = dbg_s->g;
1387 struct dbg_profiler_object_data *prof_obj;
1388 int err = 0;
1389
1390 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle);
1391
1392 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1393
1394 /* Find matching object. */
1395 prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
1396
1397 if (!prof_obj) {
1398 nvgpu_err(g, "object not found");
1399 err = -EINVAL;
1400 goto exit;
1401 }
1402
1403 if (prof_obj->has_reservation)
1404 g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj);
1405 else {
1406 nvgpu_err(g, "No reservation found");
1407 err = -EINVAL;
1408 goto exit;
1409 }
1410exit:
1411 nvgpu_mutex_release(&g->dbg_sessions_lock);
1412 return err;
1413}
1414
1415static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1416 u32 profiler_handle)
1417{
1418 struct gk20a *g = dbg_s->g;
1419 struct dbg_profiler_object_data *prof_obj, *my_prof_obj;
1420 int err = 0;
1421
1422 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle);
1423
1424 if (g->profiler_reservation_count < 0) {
1425 nvgpu_err(g, "Negative reservation count!");
1426 return -EINVAL;
1427 }
1428
1429 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1430
1431 /* Find matching object. */
1432 my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
1433
1434 if (!my_prof_obj) {
1435 nvgpu_err(g, "object not found");
1436 err = -EINVAL;
1437 goto exit;
1438 }
1439
1440 /* If we already have the reservation, we're done */
1441 if (my_prof_obj->has_reservation) {
1442 err = 0;
1443 goto exit;
1444 }
1445
1446 if (my_prof_obj->ch == NULL) {
1447 /* Global reservations are only allowed if there are no other
1448 * global or per-context reservations currently held
1449 */
1450 if (!g->ops.dbg_session_ops.check_and_set_global_reservation(
1451 dbg_s, my_prof_obj)) {
1452 nvgpu_err(g,
1453 "global reserve: have existing reservation");
1454 err = -EBUSY;
1455 }
1456 } else if (g->global_profiler_reservation_held) {
1457 /* If there's a global reservation,
1458 * we can't take a per-context one.
1459 */
1460 nvgpu_err(g,
1461 "per-ctxt reserve: global reservation in effect");
1462 err = -EBUSY;
1463 } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) {
1464 /* TSG: check that another channel in the TSG
1465 * doesn't already have the reservation
1466 */
1467 int my_tsgid = my_prof_obj->ch->tsgid;
1468
1469 nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
1470 dbg_profiler_object_data, prof_obj_entry) {
1471 if (prof_obj->has_reservation &&
1472 (prof_obj->ch->tsgid == my_tsgid)) {
1473 nvgpu_err(g,
1474 "per-ctxt reserve (tsg): already reserved");
1475 err = -EBUSY;
1476 goto exit;
1477 }
1478 }
1479
1480 if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
1481 dbg_s, my_prof_obj)) {
1482 /* Another guest OS has the global reservation */
1483 nvgpu_err(g,
1484 "per-ctxt reserve: global reservation in effect");
1485 err = -EBUSY;
1486 }
1487 } else {
1488 /* channel: check that some other profiler object doesn't
1489 * already have the reservation.
1490 */
1491 struct channel_gk20a *my_ch = my_prof_obj->ch;
1492
1493 nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
1494 dbg_profiler_object_data, prof_obj_entry) {
1495 if (prof_obj->has_reservation &&
1496 (prof_obj->ch == my_ch)) {
1497 nvgpu_err(g,
1498 "per-ctxt reserve (ch): already reserved");
1499 err = -EBUSY;
1500 goto exit;
1501 }
1502 }
1503
1504 if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
1505 dbg_s, my_prof_obj)) {
1506 /* Another guest OS has the global reservation */
1507 nvgpu_err(g,
1508 "per-ctxt reserve: global reservation in effect");
1509 err = -EBUSY;
1510 }
1511 }
1512exit:
1513 nvgpu_mutex_release(&g->dbg_sessions_lock);
1514 return err;
1515}
1516
1517static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
1518 struct nvgpu_dbg_gpu_unbind_channel_args *args)
1519{
1520 struct dbg_session_channel_data *ch_data;
1521 struct gk20a *g = dbg_s->g;
1522 bool channel_found = false;
1523 struct channel_gk20a *ch;
1524 int err;
1525
1526 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
1527 g->name, args->channel_fd);
1528
1529 ch = gk20a_get_channel_from_file(args->channel_fd);
1530 if (!ch) {
1531 gk20a_dbg_fn("no channel found for fd");
1532 return -EINVAL;
1533 }
1534
1535 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
1536 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
1537 dbg_session_channel_data, ch_entry) {
1538 if (ch->chid == ch_data->chid) {
1539 channel_found = true;
1540 break;
1541 }
1542 }
1543 nvgpu_mutex_release(&dbg_s->ch_list_lock);
1544
1545 if (!channel_found) {
1546 gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd);
1547 err = -EINVAL;
1548 goto out;
1549 }
1550
1551 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1552 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
1553 err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
1554 nvgpu_mutex_release(&dbg_s->ch_list_lock);
1555 nvgpu_mutex_release(&g->dbg_sessions_lock);
1556
1557out:
1558 gk20a_channel_put(ch);
1559 return err;
1560}
1561
1562int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
1563{
1564 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
1565 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
1566}
1567
1568long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1569 unsigned long arg)
1570{
1571 struct dbg_session_gk20a_linux *dbg_s_linux = filp->private_data;
1572 struct dbg_session_gk20a *dbg_s = &dbg_s_linux->dbg_s;
1573 struct gk20a *g = dbg_s->g;
1574 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
1575 int err = 0;
1576
1577 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
1578
1579 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
1580 (_IOC_NR(cmd) == 0) ||
1581 (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST) ||
1582 (_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE))
1583 return -EINVAL;
1584
1585 memset(buf, 0, sizeof(buf));
1586 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1587 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1588 return -EFAULT;
1589 }
1590
1591 if (!g->gr.sw_ready) {
1592 err = gk20a_busy(g);
1593 if (err)
1594 return err;
1595
1596 gk20a_idle(g);
1597 }
1598
1599 /* protect from threaded user space calls */
1600 nvgpu_mutex_acquire(&dbg_s->ioctl_lock);
1601
1602 switch (cmd) {
1603 case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL:
1604 err = dbg_bind_channel_gk20a(dbg_s,
1605 (struct nvgpu_dbg_gpu_bind_channel_args *)buf);
1606 break;
1607
1608 case NVGPU_DBG_GPU_IOCTL_REG_OPS:
1609 err = nvgpu_ioctl_channel_reg_ops(dbg_s,
1610 (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf);
1611 break;
1612
1613 case NVGPU_DBG_GPU_IOCTL_POWERGATE:
1614 err = nvgpu_ioctl_powergate_gk20a(dbg_s,
1615 (struct nvgpu_dbg_gpu_powergate_args *)buf);
1616 break;
1617
1618 case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL:
1619 err = gk20a_dbg_gpu_events_ctrl(dbg_s,
1620 (struct nvgpu_dbg_gpu_events_ctrl_args *)buf);
1621 break;
1622
1623 case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE:
1624 err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s,
1625 (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf);
1626 break;
1627
1628 case NVGPU_DBG_GPU_IOCTL_HWPM_CTXSW_MODE:
1629 err = nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(dbg_s,
1630 (struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *)buf);
1631 break;
1632
1633 case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_ALL_SMS:
1634 err = nvgpu_dbg_gpu_ioctl_suspend_resume_sm(dbg_s,
1635 (struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *)buf);
1636 break;
1637
1638 case NVGPU_DBG_GPU_IOCTL_PERFBUF_MAP:
1639 err = gk20a_perfbuf_map(dbg_s,
1640 (struct nvgpu_dbg_gpu_perfbuf_map_args *)buf);
1641 break;
1642
1643 case NVGPU_DBG_GPU_IOCTL_PERFBUF_UNMAP:
1644 err = gk20a_perfbuf_unmap(dbg_s,
1645 (struct nvgpu_dbg_gpu_perfbuf_unmap_args *)buf);
1646 break;
1647
1648 case NVGPU_DBG_GPU_IOCTL_PC_SAMPLING:
1649 err = gk20a_dbg_pc_sampling(dbg_s,
1650 (struct nvgpu_dbg_gpu_pc_sampling_args *)buf);
1651 break;
1652
1653 case NVGPU_DBG_GPU_IOCTL_SET_NEXT_STOP_TRIGGER_TYPE:
1654 err = nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(dbg_s,
1655 (struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *)buf);
1656 break;
1657
1658 case NVGPU_DBG_GPU_IOCTL_TIMEOUT:
1659 err = nvgpu_dbg_gpu_ioctl_timeout(dbg_s,
1660 (struct nvgpu_dbg_gpu_timeout_args *)buf);
1661 break;
1662
1663 case NVGPU_DBG_GPU_IOCTL_GET_TIMEOUT:
1664 nvgpu_dbg_gpu_ioctl_get_timeout(dbg_s,
1665 (struct nvgpu_dbg_gpu_timeout_args *)buf);
1666 break;
1667
1668 case NVGPU_DBG_GPU_IOCTL_READ_SINGLE_SM_ERROR_STATE:
1669 err = nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(dbg_s,
1670 (struct nvgpu_dbg_gpu_read_single_sm_error_state_args *)buf);
1671 break;
1672
1673 case NVGPU_DBG_GPU_IOCTL_CLEAR_SINGLE_SM_ERROR_STATE:
1674 err = nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(dbg_s,
1675 (struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *)buf);
1676 break;
1677
1678 case NVGPU_DBG_GPU_IOCTL_WRITE_SINGLE_SM_ERROR_STATE:
1679 err = nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(dbg_s,
1680 (struct nvgpu_dbg_gpu_write_single_sm_error_state_args *)buf);
1681 break;
1682
1683 case NVGPU_DBG_GPU_IOCTL_UNBIND_CHANNEL:
1684 err = dbg_unbind_channel_gk20a(dbg_s,
1685 (struct nvgpu_dbg_gpu_unbind_channel_args *)buf);
1686 break;
1687
1688 case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_CONTEXTS:
1689 err = nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(dbg_s,
1690 (struct nvgpu_dbg_gpu_suspend_resume_contexts_args *)buf);
1691 break;
1692
1693 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY:
1694 err = nvgpu_dbg_gpu_ioctl_access_fb_memory(dbg_s,
1695 (struct nvgpu_dbg_gpu_access_fb_memory_args *)buf);
1696 break;
1697
1698 case NVGPU_DBG_GPU_IOCTL_PROFILER_ALLOCATE:
1699 err = nvgpu_ioctl_allocate_profiler_object(dbg_s_linux,
1700 (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf);
1701 break;
1702
1703 case NVGPU_DBG_GPU_IOCTL_PROFILER_FREE:
1704 err = nvgpu_ioctl_free_profiler_object(dbg_s_linux,
1705 (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf);
1706 break;
1707
1708 case NVGPU_DBG_GPU_IOCTL_PROFILER_RESERVE:
1709 err = nvgpu_ioctl_profiler_reserve(dbg_s,
1710 (struct nvgpu_dbg_gpu_profiler_reserve_args *)buf);
1711 break;
1712
1713 default:
1714 nvgpu_err(g,
1715 "unrecognized dbg gpu ioctl cmd: 0x%x",
1716 cmd);
1717 err = -ENOTTY;
1718 break;
1719 }
1720
1721 nvgpu_mutex_release(&dbg_s->ioctl_lock);
1722
1723 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
1724
1725 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1726 err = copy_to_user((void __user *)arg,
1727 buf, _IOC_SIZE(cmd));
1728
1729 return err;
1730}