summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2017-10-10 07:03:17 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-17 15:55:22 -0400
commit8f55976d4952020f1e7f257087bb79cfeb64f193 (patch)
tree67a640295146de1949eed36a0f1eb5589720abe6
parentb252653ac5f2b12a9a84476f9dde6a844a77a602 (diff)
gpu: nvgpu: cleanup of dbg_gpu_gk20a
This change contains a generic cleanup of linux dependent parts of the dbg_gpu_gk20a.* files. The following changes have been made 1) Moving methods into ioctl_dbg.* inside common/linux/ 2) The structures dbg_session_gk20a and dbg_session_channel_data have been split into two parts. struct device *dev is removed from struct dbg_session_gk20a and instead packed into struct dbg_session_gk20a_linux alongwith dbg_session_gk20a and is moved into ioctl_dbg. dbg_session_gk20a is now rid of any linux dependencies and remains in dbg_gpu_gk20a. Similarly, struct file is removed from struct dbg_session_channel_data and is now packed into struct dbg_session_channel_data_linux alongwith dbg_session_channel_data and is moved into ioctl_dbg. struct dbg_session_channel_data is now rid of linux dependencies and remains in dbg_gpu_gk20a. 3) A callback function is added in order to release the dbg_session_channel_data. JIRA NVGPU-205 Change-Id: I853da6dfbf9a96b7cd210beb77f2304445ff7ea6 Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1575191 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/Makefile.nvgpu1
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl.c1
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_channel.c1
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_dbg.c1730
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_dbg.h50
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c9
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c1682
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h29
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c3
9 files changed, 1797 insertions, 1709 deletions
diff --git a/drivers/gpu/nvgpu/Makefile.nvgpu b/drivers/gpu/nvgpu/Makefile.nvgpu
index 3f3978f9..42447e0c 100644
--- a/drivers/gpu/nvgpu/Makefile.nvgpu
+++ b/drivers/gpu/nvgpu/Makefile.nvgpu
@@ -33,6 +33,7 @@ nvgpu-y := \
33 common/linux/ioctl_as.o \ 33 common/linux/ioctl_as.o \
34 common/linux/ioctl_channel.o \ 34 common/linux/ioctl_channel.o \
35 common/linux/ioctl_tsg.o \ 35 common/linux/ioctl_tsg.o \
36 common/linux/ioctl_dbg.o \
36 common/linux/log.o \ 37 common/linux/log.o \
37 common/linux/cond.o \ 38 common/linux/cond.o \
38 common/linux/nvgpu_mem.o \ 39 common/linux/nvgpu_mem.o \
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl.c b/drivers/gpu/nvgpu/common/linux/ioctl.c
index 8262c326..7e6df9df 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl.c
@@ -28,6 +28,7 @@
28#include "ioctl_ctrl.h" 28#include "ioctl_ctrl.h"
29#include "ioctl_as.h" 29#include "ioctl_as.h"
30#include "ioctl_tsg.h" 30#include "ioctl_tsg.h"
31#include "ioctl_dbg.h"
31#include "module.h" 32#include "module.h"
32#include "os_linux.h" 33#include "os_linux.h"
33 34
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
index 3ea07eed..78352d6b 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_channel.c
@@ -20,6 +20,7 @@
20#include <linux/file.h> 20#include <linux/file.h>
21#include <linux/anon_inodes.h> 21#include <linux/anon_inodes.h>
22#include <linux/dma-buf.h> 22#include <linux/dma-buf.h>
23#include <linux/poll.h>
23 24
24#include <nvgpu/semaphore.h> 25#include <nvgpu/semaphore.h>
25#include <nvgpu/timers.h> 26#include <nvgpu/timers.h>
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
new file mode 100644
index 00000000..56edc11b
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
@@ -0,0 +1,1730 @@
1/*
2 * Tegra GK20A GPU Debugger/Profiler Driver
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/cdev.h>
22#include <linux/uaccess.h>
23#include <linux/dma-buf.h>
24#include <uapi/linux/nvgpu.h>
25
26#include <nvgpu/kmem.h>
27#include <nvgpu/log.h>
28#include <nvgpu/vm.h>
29#include <nvgpu/atomic.h>
30#include <nvgpu/cond.h>
31#include <nvgpu/linux/vidmem.h>
32
33#include "gk20a/gk20a.h"
34#include "gk20a/platform_gk20a.h"
35#include "gk20a/gr_gk20a.h"
36#include "gk20a/regops_gk20a.h"
37#include "gk20a/dbg_gpu_gk20a.h"
38#include "os_linux.h"
39#include "ioctl_dbg.h"
40
41#include "vm_priv.h"
42
43/* silly allocator - just increment id */
44static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0);
45static int generate_unique_id(void)
46{
47 return nvgpu_atomic_add_return(1, &unique_id);
48}
49
50static int alloc_profiler(struct gk20a *g,
51 struct dbg_profiler_object_data **_prof)
52{
53 struct dbg_profiler_object_data *prof;
54 *_prof = NULL;
55
56 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
57
58 prof = nvgpu_kzalloc(g, sizeof(*prof));
59 if (!prof)
60 return -ENOMEM;
61
62 prof->prof_handle = generate_unique_id();
63 *_prof = prof;
64 return 0;
65}
66
67static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_s_linux)
68{
69 struct dbg_session_gk20a_linux *dbg_s_linux;
70 *_dbg_s_linux = NULL;
71
72 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
73
74 dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux));
75 if (!dbg_s_linux)
76 return -ENOMEM;
77
78 dbg_s_linux->dbg_s.id = generate_unique_id();
79 *_dbg_s_linux = dbg_s_linux;
80 return 0;
81}
82
83static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
84 struct gr_gk20a *gr);
85
86static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset);
87
88static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
89 struct nvgpu_dbg_gpu_exec_reg_ops_args *args);
90
91static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
92 struct nvgpu_dbg_gpu_powergate_args *args);
93
94static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
95 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args);
96
97static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
98 struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args);
99
100static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
101 struct dbg_session_gk20a *dbg_s,
102 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args);
103
104static int nvgpu_ioctl_allocate_profiler_object(struct dbg_session_gk20a_linux *dbg_s,
105 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args);
106
107static int nvgpu_ioctl_free_profiler_object(struct dbg_session_gk20a_linux *dbg_s_linux,
108 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args);
109
110static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s,
111 struct nvgpu_dbg_gpu_profiler_reserve_args *args);
112
113static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
114 struct nvgpu_dbg_gpu_perfbuf_map_args *args);
115
116static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
117 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args);
118
119static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
120 int timeout_mode);
121
122static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
123 u32 profiler_handle);
124
125static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s);
126
127static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s);
128
129static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
130 u32 profiler_handle);
131
132static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s);
133
134static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
135 struct file *filp, bool is_profiler);
136
137unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
138{
139 unsigned int mask = 0;
140 struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data;
141 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
142
143 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
144
145 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait);
146
147 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
148
149 if (dbg_s->dbg_events.events_enabled &&
150 dbg_s->dbg_events.num_pending_events > 0) {
151 gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d",
152 dbg_s->id);
153 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
154 dbg_s->dbg_events.num_pending_events);
155 mask = (POLLPRI | POLLIN);
156 }
157
158 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
159
160 return mask;
161}
162
163int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
164{
165 struct dbg_session_gk20a_linux *dbg_session_linux = filp->private_data;
166 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
167 struct gk20a *g = dbg_s->g;
168 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
169
170 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name);
171
172 /* unbind channels */
173 dbg_unbind_all_channels_gk20a(dbg_s);
174
175 /* Powergate/Timeout enable is called here as possibility of dbg_session
176 * which called powergate/timeout disable ioctl, to be killed without
177 * calling powergate/timeout enable ioctl
178 */
179 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
180 g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
181 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
182 nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE);
183
184 /* If this session owned the perf buffer, release it */
185 if (g->perfbuf.owner == dbg_s)
186 gk20a_perfbuf_release_locked(g, g->perfbuf.offset);
187
188 /* Per-context profiler objects were released when we called
189 * dbg_unbind_all_channels. We could still have global ones.
190 */
191 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
192 dbg_profiler_object_data, prof_obj_entry) {
193 if (prof_obj->session_id == dbg_s->id) {
194 if (prof_obj->has_reservation)
195 g->ops.dbg_session_ops.
196 release_profiler_reservation(dbg_s, prof_obj);
197 nvgpu_list_del(&prof_obj->prof_obj_entry);
198 nvgpu_kfree(g, prof_obj);
199 }
200 }
201 nvgpu_mutex_release(&g->dbg_sessions_lock);
202
203 nvgpu_mutex_destroy(&dbg_s->ch_list_lock);
204 nvgpu_mutex_destroy(&dbg_s->ioctl_lock);
205
206 nvgpu_kfree(g, dbg_session_linux);
207 gk20a_put(g);
208
209 return 0;
210}
211
212int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
213{
214 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
215 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
216}
217
218static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
219 struct nvgpu_dbg_gpu_timeout_args *args)
220{
221 int err;
222 struct gk20a *g = dbg_s->g;
223
224 gk20a_dbg_fn("powergate mode = %d", args->enable);
225
226 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
227 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
228 nvgpu_mutex_release(&g->dbg_sessions_lock);
229
230 return err;
231}
232
233static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
234 struct dbg_session_gk20a *dbg_s,
235 struct nvgpu_dbg_gpu_write_single_sm_error_state_args *args)
236{
237 struct gk20a *g = dbg_s->g;
238 struct gr_gk20a *gr = &g->gr;
239 u32 sm_id;
240 struct channel_gk20a *ch;
241 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state;
242 int err = 0;
243
244 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
245 if (!ch)
246 return -EINVAL;
247
248 sm_id = args->sm_id;
249 if (sm_id >= gr->no_of_sm)
250 return -EINVAL;
251
252 sm_error_state = nvgpu_kzalloc(g, sizeof(*sm_error_state));
253 if (!sm_error_state)
254 return -ENOMEM;
255
256 if (args->sm_error_state_record_size > 0) {
257 size_t read_size = sizeof(*sm_error_state);
258
259 if (read_size > args->sm_error_state_record_size)
260 read_size = args->sm_error_state_record_size;
261
262 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
263 err = copy_from_user(sm_error_state,
264 (void __user *)(uintptr_t)
265 args->sm_error_state_record_mem,
266 read_size);
267 nvgpu_mutex_release(&g->dbg_sessions_lock);
268 if (err) {
269 err = -ENOMEM;
270 goto err_free;
271 }
272 }
273
274 err = gk20a_busy(g);
275 if (err)
276 goto err_free;
277
278 err = gr_gk20a_elpg_protected_call(g,
279 g->ops.gr.update_sm_error_state(g, ch,
280 sm_id, sm_error_state));
281
282 gk20a_idle(g);
283
284err_free:
285 nvgpu_kfree(g, sm_error_state);
286
287 return err;
288}
289
290
291static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
292 struct dbg_session_gk20a *dbg_s,
293 struct nvgpu_dbg_gpu_read_single_sm_error_state_args *args)
294{
295 struct gk20a *g = dbg_s->g;
296 struct gr_gk20a *gr = &g->gr;
297 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state;
298 u32 sm_id;
299 int err = 0;
300
301 sm_id = args->sm_id;
302 if (sm_id >= gr->no_of_sm)
303 return -EINVAL;
304
305 sm_error_state = gr->sm_error_states + sm_id;
306
307 if (args->sm_error_state_record_size > 0) {
308 size_t write_size = sizeof(*sm_error_state);
309
310 if (write_size > args->sm_error_state_record_size)
311 write_size = args->sm_error_state_record_size;
312
313 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
314 err = copy_to_user((void __user *)(uintptr_t)
315 args->sm_error_state_record_mem,
316 sm_error_state,
317 write_size);
318 nvgpu_mutex_release(&g->dbg_sessions_lock);
319 if (err) {
320 nvgpu_err(g, "copy_to_user failed!");
321 return err;
322 }
323
324 args->sm_error_state_record_size = write_size;
325 }
326
327 return 0;
328}
329
330
331static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(
332 struct dbg_session_gk20a *dbg_s,
333 struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args)
334{
335 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
336
337 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
338
339 dbg_s->broadcast_stop_trigger = (args->broadcast != 0);
340
341 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
342
343 return 0;
344}
345
346static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
347 int timeout_mode)
348{
349 struct gk20a *g = dbg_s->g;
350 int err = 0;
351
352 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d",
353 timeout_mode);
354
355 switch (timeout_mode) {
356 case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE:
357 if (dbg_s->is_timeout_disabled &&
358 --g->dbg_timeout_disabled_refcount == 0) {
359 g->timeouts_enabled = true;
360 }
361 dbg_s->is_timeout_disabled = false;
362 break;
363
364 case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE:
365 if ((dbg_s->is_timeout_disabled == false) &&
366 (g->dbg_timeout_disabled_refcount++ == 0)) {
367 g->timeouts_enabled = false;
368 }
369 dbg_s->is_timeout_disabled = true;
370 break;
371
372 default:
373 nvgpu_err(g,
374 "unrecognized dbg gpu timeout mode : 0x%x",
375 timeout_mode);
376 err = -EINVAL;
377 break;
378 }
379
380 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s",
381 g->timeouts_enabled ? "Yes" : "No");
382
383 return err;
384}
385
386static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
387 struct file *filp, bool is_profiler)
388{
389 struct nvgpu_os_linux *l;
390 struct dbg_session_gk20a_linux *dbg_session_linux;
391 struct dbg_session_gk20a *dbg_s;
392 struct gk20a *g;
393
394 struct device *dev;
395
396 int err;
397
398 if (!is_profiler)
399 l = container_of(inode->i_cdev,
400 struct nvgpu_os_linux, dbg.cdev);
401 else
402 l = container_of(inode->i_cdev,
403 struct nvgpu_os_linux, prof.cdev);
404 g = gk20a_get(&l->g);
405 if (!g)
406 return -ENODEV;
407
408 dev = dev_from_gk20a(g);
409
410 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name);
411
412 err = alloc_session(g, &dbg_session_linux);
413 if (err)
414 goto free_ref;
415
416 dbg_s = &dbg_session_linux->dbg_s;
417
418 filp->private_data = dbg_session_linux;
419 dbg_session_linux->dev = dev;
420 dbg_s->g = g;
421 dbg_s->is_profiler = is_profiler;
422 dbg_s->is_pg_disabled = false;
423 dbg_s->is_timeout_disabled = false;
424
425 nvgpu_cond_init(&dbg_s->dbg_events.wait_queue);
426 nvgpu_init_list_node(&dbg_s->ch_list);
427 err = nvgpu_mutex_init(&dbg_s->ch_list_lock);
428 if (err)
429 goto err_free_session;
430 err = nvgpu_mutex_init(&dbg_s->ioctl_lock);
431 if (err)
432 goto err_destroy_lock;
433 dbg_s->dbg_events.events_enabled = false;
434 dbg_s->dbg_events.num_pending_events = 0;
435
436 return 0;
437
438err_destroy_lock:
439 nvgpu_mutex_destroy(&dbg_s->ch_list_lock);
440err_free_session:
441 nvgpu_kfree(g, dbg_session_linux);
442free_ref:
443 gk20a_put(g);
444 return err;
445}
446
447static int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
448 struct dbg_session_channel_data *ch_data)
449{
450 struct gk20a *g = dbg_s->g;
451 int chid;
452 struct dbg_session_data *session_data;
453 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
454 struct dbg_session_channel_data_linux *ch_data_linux;
455
456 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
457
458 chid = ch_data->chid;
459
460 /* If there's a profiler ctx reservation record associated with this
461 * session/channel pair, release it.
462 */
463 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
464 dbg_profiler_object_data, prof_obj_entry) {
465 if ((prof_obj->session_id == dbg_s->id) &&
466 (prof_obj->ch->chid == chid)) {
467 if (prof_obj->has_reservation) {
468 g->ops.dbg_session_ops.
469 release_profiler_reservation(dbg_s, prof_obj);
470 }
471 nvgpu_list_del(&prof_obj->prof_obj_entry);
472 nvgpu_kfree(g, prof_obj);
473 }
474 }
475
476 nvgpu_list_del(&ch_data->ch_entry);
477
478 session_data = ch_data->session_data;
479 nvgpu_list_del(&session_data->dbg_s_entry);
480 nvgpu_kfree(dbg_s->g, session_data);
481
482 ch_data_linux = container_of(ch_data, struct dbg_session_channel_data_linux,
483 ch_data);
484
485 fput(ch_data_linux->ch_f);
486 nvgpu_kfree(dbg_s->g, ch_data_linux);
487
488 return 0;
489}
490
491static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
492 struct nvgpu_dbg_gpu_bind_channel_args *args)
493{
494 struct file *f;
495 struct gk20a *g = dbg_s->g;
496 struct channel_gk20a *ch;
497 struct dbg_session_channel_data_linux *ch_data_linux;
498 struct dbg_session_data *session_data;
499 int err = 0;
500
501 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
502 g->name, args->channel_fd);
503
504 /*
505 * Although gk20a_get_channel_from_file gives us a channel ref, need to
506 * hold a ref to the file during the session lifetime. See comment in
507 * struct dbg_session_channel_data.
508 */
509 f = fget(args->channel_fd);
510 if (!f)
511 return -ENODEV;
512
513 ch = gk20a_get_channel_from_file(args->channel_fd);
514 if (!ch) {
515 gk20a_dbg_fn("no channel found for fd");
516 err = -EINVAL;
517 goto out_fput;
518 }
519
520 gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid);
521
522 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
523 nvgpu_mutex_acquire(&ch->dbg_s_lock);
524
525 ch_data_linux = nvgpu_kzalloc(g, sizeof(*ch_data_linux));
526 if (!ch_data_linux) {
527 err = -ENOMEM;
528 goto out_chput;
529 }
530 ch_data_linux->ch_f = f;
531 ch_data_linux->ch_data.channel_fd = args->channel_fd;
532 ch_data_linux->ch_data.chid = ch->chid;
533 ch_data_linux->ch_data.unbind_single_channel = dbg_unbind_single_channel_gk20a;
534 nvgpu_init_list_node(&ch_data_linux->ch_data.ch_entry);
535
536 session_data = nvgpu_kzalloc(g, sizeof(*session_data));
537 if (!session_data) {
538 err = -ENOMEM;
539 goto out_kfree;
540 }
541 session_data->dbg_s = dbg_s;
542 nvgpu_init_list_node(&session_data->dbg_s_entry);
543 ch_data_linux->ch_data.session_data = session_data;
544
545 nvgpu_list_add(&session_data->dbg_s_entry, &ch->dbg_s_list);
546
547 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
548 nvgpu_list_add_tail(&ch_data_linux->ch_data.ch_entry, &dbg_s->ch_list);
549 nvgpu_mutex_release(&dbg_s->ch_list_lock);
550
551 nvgpu_mutex_release(&ch->dbg_s_lock);
552 nvgpu_mutex_release(&g->dbg_sessions_lock);
553
554 gk20a_channel_put(ch);
555
556 return 0;
557
558out_kfree:
559 nvgpu_kfree(g, ch_data_linux);
560out_chput:
561 gk20a_channel_put(ch);
562 nvgpu_mutex_release(&ch->dbg_s_lock);
563 nvgpu_mutex_release(&g->dbg_sessions_lock);
564out_fput:
565 fput(f);
566 return err;
567}
568
569static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s)
570{
571 struct dbg_session_channel_data *ch_data, *tmp;
572 struct gk20a *g = dbg_s->g;
573
574 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
575 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
576 nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list,
577 dbg_session_channel_data, ch_entry)
578 ch_data->unbind_single_channel(dbg_s, ch_data);
579 nvgpu_mutex_release(&dbg_s->ch_list_lock);
580 nvgpu_mutex_release(&g->dbg_sessions_lock);
581
582 return 0;
583}
584
585static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
586 struct nvgpu_dbg_gpu_exec_reg_ops_args *args)
587{
588 int err = 0, powergate_err = 0;
589 bool is_pg_disabled = false;
590
591 struct gk20a *g = dbg_s->g;
592 struct channel_gk20a *ch;
593
594 gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops);
595
596 if (args->num_ops > g->gpu_characteristics.reg_ops_limit) {
597 nvgpu_err(g, "regops limit exceeded");
598 return -EINVAL;
599 }
600
601 if (args->num_ops == 0) {
602 /* Nothing to do */
603 return 0;
604 }
605
606 if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) {
607 nvgpu_err(g, "reg ops work buffer not allocated");
608 return -ENODEV;
609 }
610
611 if (!dbg_s->id) {
612 nvgpu_err(g, "can't call reg_ops on an unbound debugger session");
613 return -EINVAL;
614 }
615
616 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
617 if (!dbg_s->is_profiler && !ch) {
618 nvgpu_err(g, "bind a channel before regops for a debugging session");
619 return -EINVAL;
620 }
621
622 /* be sure that ctx info is in place */
623 if (!g->is_virtual &&
624 !gr_context_info_available(dbg_s, &g->gr)) {
625 nvgpu_err(g, "gr context data not available");
626 return -ENODEV;
627 }
628
629 /* since exec_reg_ops sends methods to the ucode, it must take the
630 * global gpu lock to protect against mixing methods from debug sessions
631 * on other channels */
632 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
633
634 if (!dbg_s->is_pg_disabled && !g->is_virtual) {
635 /* In the virtual case, the server will handle
636 * disabling/enabling powergating when processing reg ops
637 */
638 powergate_err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
639 NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE);
640 is_pg_disabled = true;
641 }
642
643 if (!powergate_err) {
644 u64 ops_offset = 0; /* index offset */
645
646 while (ops_offset < args->num_ops && !err) {
647 const u64 num_ops =
648 min(args->num_ops - ops_offset,
649 (u64)(g->dbg_regops_tmp_buf_ops));
650 const u64 fragment_size =
651 num_ops * sizeof(g->dbg_regops_tmp_buf[0]);
652
653 void __user *const fragment =
654 (void __user *)(uintptr_t)
655 (args->ops +
656 ops_offset * sizeof(g->dbg_regops_tmp_buf[0]));
657
658 gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu",
659 ops_offset, num_ops);
660
661 gk20a_dbg_fn("Copying regops from userspace");
662
663 if (copy_from_user(g->dbg_regops_tmp_buf,
664 fragment, fragment_size)) {
665 nvgpu_err(g, "copy_from_user failed!");
666 err = -EFAULT;
667 break;
668 }
669
670 err = g->ops.dbg_session_ops.exec_reg_ops(
671 dbg_s, g->dbg_regops_tmp_buf, num_ops);
672
673 gk20a_dbg_fn("Copying result to userspace");
674
675 if (copy_to_user(fragment, g->dbg_regops_tmp_buf,
676 fragment_size)) {
677 nvgpu_err(g, "copy_to_user failed!");
678 err = -EFAULT;
679 break;
680 }
681
682 ops_offset += num_ops;
683 }
684
685 /* enable powergate, if previously disabled */
686 if (is_pg_disabled) {
687 powergate_err =
688 g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
689 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
690 }
691 }
692
693 nvgpu_mutex_release(&g->dbg_sessions_lock);
694
695 if (!err && powergate_err)
696 err = powergate_err;
697
698 if (err)
699 nvgpu_err(g, "dbg regops failed");
700
701 return err;
702}
703
704static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
705 struct nvgpu_dbg_gpu_powergate_args *args)
706{
707 int err;
708 struct gk20a *g = dbg_s->g;
709 gk20a_dbg_fn("%s powergate mode = %d",
710 g->name, args->mode);
711
712 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
713 err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, args->mode);
714 nvgpu_mutex_release(&g->dbg_sessions_lock);
715 return err;
716}
717
718static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
719 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args)
720{
721 int err;
722 struct gk20a *g = dbg_s->g;
723 struct channel_gk20a *ch_gk20a;
724
725 gk20a_dbg_fn("%s smpc ctxsw mode = %d",
726 g->name, args->mode);
727
728 err = gk20a_busy(g);
729 if (err) {
730 nvgpu_err(g, "failed to poweron");
731 return err;
732 }
733
734 /* Take the global lock, since we'll be doing global regops */
735 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
736
737 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
738 if (!ch_gk20a) {
739 nvgpu_err(g,
740 "no bound channel for smpc ctxsw mode update");
741 err = -EINVAL;
742 goto clean_up;
743 }
744
745 err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a,
746 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
747 if (err) {
748 nvgpu_err(g,
749 "error (%d) during smpc ctxsw mode update", err);
750 goto clean_up;
751 }
752
753 err = g->ops.regops.apply_smpc_war(dbg_s);
754 clean_up:
755 nvgpu_mutex_release(&g->dbg_sessions_lock);
756 gk20a_idle(g);
757 return err;
758}
759
760static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
761 struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args)
762{
763 int err;
764 struct gk20a *g = dbg_s->g;
765 struct channel_gk20a *ch_gk20a;
766
767 gk20a_dbg_fn("%s pm ctxsw mode = %d",
768 g->name, args->mode);
769
770 /* Must have a valid reservation to enable/disable hwpm cxtsw.
771 * Just print an error message for now, but eventually this should
772 * return an error, at the point where all client sw has been
773 * cleaned up.
774 */
775 if (!dbg_s->has_profiler_reservation) {
776 nvgpu_err(g,
777 "session doesn't have a valid reservation");
778 }
779
780 err = gk20a_busy(g);
781 if (err) {
782 nvgpu_err(g, "failed to poweron");
783 return err;
784 }
785
786 /* Take the global lock, since we'll be doing global regops */
787 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
788
789 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
790 if (!ch_gk20a) {
791 nvgpu_err(g,
792 "no bound channel for pm ctxsw mode update");
793 err = -EINVAL;
794 goto clean_up;
795 }
796
797 err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a,
798 args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW);
799 if (err)
800 nvgpu_err(g,
801 "error (%d) during pm ctxsw mode update", err);
802
803 /* gk20a would require a WAR to set the core PM_ENABLE bit, not
804 * added here with gk20a being deprecated
805 */
806 clean_up:
807 nvgpu_mutex_release(&g->dbg_sessions_lock);
808 gk20a_idle(g);
809 return err;
810}
811
812static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
813 struct dbg_session_gk20a *dbg_s,
814 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
815{
816 struct gk20a *g = dbg_s->g;
817 struct channel_gk20a *ch;
818 int err = 0, action = args->mode;
819
820 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
821
822 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
823 if (!ch)
824 return -EINVAL;
825
826 err = gk20a_busy(g);
827 if (err) {
828 nvgpu_err(g, "failed to poweron");
829 return err;
830 }
831
832 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
833
834 /* Suspend GPU context switching */
835 err = gr_gk20a_disable_ctxsw(g);
836 if (err) {
837 nvgpu_err(g, "unable to stop gr ctxsw");
838 /* this should probably be ctx-fatal... */
839 goto clean_up;
840 }
841
842 switch (action) {
843 case NVGPU_DBG_GPU_SUSPEND_ALL_SMS:
844 gr_gk20a_suspend_context(ch);
845 break;
846
847 case NVGPU_DBG_GPU_RESUME_ALL_SMS:
848 gr_gk20a_resume_context(ch);
849 break;
850 }
851
852 err = gr_gk20a_enable_ctxsw(g);
853 if (err)
854 nvgpu_err(g, "unable to restart ctxsw!");
855
856clean_up:
857 nvgpu_mutex_release(&g->dbg_sessions_lock);
858 gk20a_idle(g);
859
860 return err;
861}
862
863static int nvgpu_ioctl_allocate_profiler_object(
864 struct dbg_session_gk20a_linux *dbg_session_linux,
865 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args)
866{
867 int err = 0;
868 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
869 struct gk20a *g = get_gk20a(dbg_session_linux->dev);
870 struct dbg_profiler_object_data *prof_obj;
871
872 gk20a_dbg_fn("%s", g->name);
873
874 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
875
876 err = alloc_profiler(g, &prof_obj);
877 if (err)
878 goto clean_up;
879
880 prof_obj->session_id = dbg_s->id;
881
882 if (dbg_s->is_profiler)
883 prof_obj->ch = NULL;
884 else {
885 prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
886 if (prof_obj->ch == NULL) {
887 nvgpu_err(g,
888 "bind a channel for dbg session");
889 nvgpu_kfree(g, prof_obj);
890 err = -EINVAL;
891 goto clean_up;
892 }
893 }
894
895 /* Return handle to client */
896 args->profiler_handle = prof_obj->prof_handle;
897
898 nvgpu_init_list_node(&prof_obj->prof_obj_entry);
899
900 nvgpu_list_add(&prof_obj->prof_obj_entry, &g->profiler_objects);
901clean_up:
902 nvgpu_mutex_release(&g->dbg_sessions_lock);
903 return err;
904}
905
906static int nvgpu_ioctl_free_profiler_object(
907 struct dbg_session_gk20a_linux *dbg_s_linux,
908 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args)
909{
910 int err = 0;
911 struct dbg_session_gk20a *dbg_s = &dbg_s_linux->dbg_s;
912 struct gk20a *g = get_gk20a(dbg_s_linux->dev);
913 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
914 bool obj_found = false;
915
916 gk20a_dbg_fn("%s session_id = %d profiler_handle = %x",
917 g->name, dbg_s->id, args->profiler_handle);
918
919 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
920
921 /* Remove profiler object from the list, if a match is found */
922 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
923 dbg_profiler_object_data, prof_obj_entry) {
924 if (prof_obj->prof_handle == args->profiler_handle) {
925 if (prof_obj->session_id != dbg_s->id) {
926 nvgpu_err(g,
927 "invalid handle %x",
928 args->profiler_handle);
929 err = -EINVAL;
930 break;
931 }
932 if (prof_obj->has_reservation)
933 g->ops.dbg_session_ops.
934 release_profiler_reservation(dbg_s, prof_obj);
935 nvgpu_list_del(&prof_obj->prof_obj_entry);
936 nvgpu_kfree(g, prof_obj);
937 obj_found = true;
938 break;
939 }
940 }
941 if (!obj_found) {
942 nvgpu_err(g, "profiler %x not found",
943 args->profiler_handle);
944 err = -EINVAL;
945 }
946
947 nvgpu_mutex_release(&g->dbg_sessions_lock);
948 return err;
949}
950
951static struct dbg_profiler_object_data *find_matching_prof_obj(
952 struct dbg_session_gk20a *dbg_s,
953 u32 profiler_handle)
954{
955 struct gk20a *g = dbg_s->g;
956 struct dbg_profiler_object_data *prof_obj;
957
958 nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
959 dbg_profiler_object_data, prof_obj_entry) {
960 if (prof_obj->prof_handle == profiler_handle) {
961 if (prof_obj->session_id != dbg_s->id) {
962 nvgpu_err(g,
963 "invalid handle %x",
964 profiler_handle);
965 return NULL;
966 }
967 return prof_obj;
968 }
969 }
970 return NULL;
971}
972
973/* used in scenarios where the debugger session can take just the inter-session
974 * lock for performance, but the profiler session must take the per-gpu lock
975 * since it might not have an associated channel. */
976static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s)
977{
978 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
979
980 if (dbg_s->is_profiler || !ch)
981 nvgpu_mutex_acquire(&dbg_s->g->dbg_sessions_lock);
982 else
983 nvgpu_mutex_acquire(&ch->dbg_s_lock);
984}
985
986static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s)
987{
988 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
989
990 if (dbg_s->is_profiler || !ch)
991 nvgpu_mutex_release(&dbg_s->g->dbg_sessions_lock);
992 else
993 nvgpu_mutex_release(&ch->dbg_s_lock);
994}
995
996static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
997{
998 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
999
1000 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1001
1002 dbg_s->dbg_events.events_enabled = true;
1003 dbg_s->dbg_events.num_pending_events = 0;
1004
1005 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
1006}
1007
1008static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
1009{
1010 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
1011
1012 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1013
1014 dbg_s->dbg_events.events_enabled = false;
1015 dbg_s->dbg_events.num_pending_events = 0;
1016
1017 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
1018}
1019
1020static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
1021{
1022 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
1023
1024 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1025
1026 if (dbg_s->dbg_events.events_enabled &&
1027 dbg_s->dbg_events.num_pending_events > 0)
1028 dbg_s->dbg_events.num_pending_events--;
1029
1030 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
1031}
1032
1033
1034static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
1035 struct nvgpu_dbg_gpu_events_ctrl_args *args)
1036{
1037 int ret = 0;
1038 struct channel_gk20a *ch;
1039
1040 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
1041
1042 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1043 if (!ch) {
1044 nvgpu_err(dbg_s->g,
1045 "no channel bound to dbg session");
1046 return -EINVAL;
1047 }
1048
1049 switch (args->cmd) {
1050 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE:
1051 gk20a_dbg_gpu_events_enable(dbg_s);
1052 break;
1053
1054 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE:
1055 gk20a_dbg_gpu_events_disable(dbg_s);
1056 break;
1057
1058 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR:
1059 gk20a_dbg_gpu_events_clear(dbg_s);
1060 break;
1061
1062 default:
1063 nvgpu_err(dbg_s->g,
1064 "unrecognized dbg gpu events ctrl cmd: 0x%x",
1065 args->cmd);
1066 ret = -EINVAL;
1067 break;
1068 }
1069
1070 return ret;
1071}
1072
1073static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
1074 struct nvgpu_dbg_gpu_perfbuf_map_args *args)
1075{
1076 struct gk20a *g = dbg_s->g;
1077 struct mm_gk20a *mm = &g->mm;
1078 int err;
1079 u32 virt_size;
1080 u32 big_page_size = g->ops.mm.get_default_big_page_size();
1081
1082 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1083
1084 if (g->perfbuf.owner) {
1085 nvgpu_mutex_release(&g->dbg_sessions_lock);
1086 return -EBUSY;
1087 }
1088
1089 mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size,
1090 big_page_size << 10,
1091 NV_MM_DEFAULT_KERNEL_SIZE,
1092 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
1093 false, false, "perfbuf");
1094 if (!mm->perfbuf.vm) {
1095 nvgpu_mutex_release(&g->dbg_sessions_lock);
1096 return -ENOMEM;
1097 }
1098
1099 err = nvgpu_vm_map_buffer(mm->perfbuf.vm,
1100 args->dmabuf_fd,
1101 &args->offset,
1102 0,
1103 0,
1104 0,
1105 0,
1106 args->mapping_size,
1107 NULL);
1108 if (err)
1109 goto err_remove_vm;
1110
1111 /* perf output buffer may not cross a 4GB boundary */
1112 virt_size = u64_lo32(args->mapping_size);
1113 if (u64_hi32(args->offset) != u64_hi32(args->offset + virt_size)) {
1114 err = -EINVAL;
1115 goto err_unmap;
1116 }
1117
1118 err = g->ops.dbg_session_ops.perfbuffer_enable(g,
1119 args->offset, virt_size);
1120 if (err)
1121 goto err_unmap;
1122
1123 g->perfbuf.owner = dbg_s;
1124 g->perfbuf.offset = args->offset;
1125 nvgpu_mutex_release(&g->dbg_sessions_lock);
1126
1127 return 0;
1128
1129err_unmap:
1130 nvgpu_vm_unmap_buffer(mm->perfbuf.vm, args->offset, NULL);
1131err_remove_vm:
1132 nvgpu_vm_put(mm->perfbuf.vm);
1133 nvgpu_mutex_release(&g->dbg_sessions_lock);
1134 return err;
1135}
1136
1137static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
1138 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args)
1139{
1140 struct gk20a *g = dbg_s->g;
1141 int err;
1142
1143 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1144 if ((g->perfbuf.owner != dbg_s) ||
1145 (g->perfbuf.offset != args->offset)) {
1146 nvgpu_mutex_release(&g->dbg_sessions_lock);
1147 return -EINVAL;
1148 }
1149
1150 err = gk20a_perfbuf_release_locked(g, args->offset);
1151
1152 nvgpu_mutex_release(&g->dbg_sessions_lock);
1153
1154 return err;
1155}
1156
1157static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
1158 struct nvgpu_dbg_gpu_pc_sampling_args *args)
1159{
1160 struct channel_gk20a *ch;
1161 struct gk20a *g = dbg_s->g;
1162
1163 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1164 if (!ch)
1165 return -EINVAL;
1166
1167 gk20a_dbg_fn("");
1168
1169 return g->ops.gr.update_pc_sampling ?
1170 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL;
1171}
1172
1173static int nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(
1174 struct dbg_session_gk20a *dbg_s,
1175 struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *args)
1176{
1177 struct gk20a *g = dbg_s->g;
1178 struct gr_gk20a *gr = &g->gr;
1179 u32 sm_id;
1180 struct channel_gk20a *ch;
1181 int err = 0;
1182
1183 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1184 if (!ch)
1185 return -EINVAL;
1186
1187 sm_id = args->sm_id;
1188
1189 if (sm_id >= gr->no_of_sm)
1190 return -EINVAL;
1191
1192 err = gk20a_busy(g);
1193 if (err)
1194 return err;
1195
1196 err = gr_gk20a_elpg_protected_call(g,
1197 g->ops.gr.clear_sm_error_state(g, ch, sm_id));
1198
1199 gk20a_idle(g);
1200
1201 return err;
1202}
1203
1204static int
1205nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s,
1206 struct nvgpu_dbg_gpu_suspend_resume_contexts_args *args)
1207{
1208 struct gk20a *g = dbg_s->g;
1209 int err = 0;
1210 int ctx_resident_ch_fd = -1;
1211
1212 err = gk20a_busy(g);
1213 if (err)
1214 return err;
1215
1216 switch (args->action) {
1217 case NVGPU_DBG_GPU_SUSPEND_ALL_CONTEXTS:
1218 err = g->ops.gr.suspend_contexts(g, dbg_s,
1219 &ctx_resident_ch_fd);
1220 break;
1221
1222 case NVGPU_DBG_GPU_RESUME_ALL_CONTEXTS:
1223 err = g->ops.gr.resume_contexts(g, dbg_s,
1224 &ctx_resident_ch_fd);
1225 break;
1226 }
1227
1228 if (ctx_resident_ch_fd < 0) {
1229 args->is_resident_context = 0;
1230 } else {
1231 args->is_resident_context = 1;
1232 args->resident_context_fd = ctx_resident_ch_fd;
1233 }
1234
1235 gk20a_idle(g);
1236
1237 return err;
1238}
1239
1240static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
1241 struct nvgpu_dbg_gpu_access_fb_memory_args *args)
1242{
1243 struct gk20a *g = dbg_s->g;
1244 struct dma_buf *dmabuf;
1245 void __user *user_buffer = (void __user *)(uintptr_t)args->buffer;
1246 void *buffer;
1247 u64 size, access_size, offset;
1248 u64 access_limit_size = SZ_4K;
1249 int err = 0;
1250
1251 if ((args->offset & 3) || (!args->size) || (args->size & 3))
1252 return -EINVAL;
1253
1254 dmabuf = dma_buf_get(args->dmabuf_fd);
1255 if (IS_ERR(dmabuf))
1256 return -EINVAL;
1257
1258 if ((args->offset > dmabuf->size) ||
1259 (args->size > dmabuf->size) ||
1260 (args->offset + args->size > dmabuf->size)) {
1261 err = -EINVAL;
1262 goto fail_dmabuf_put;
1263 }
1264
1265 buffer = nvgpu_big_zalloc(g, access_limit_size);
1266 if (!buffer) {
1267 err = -ENOMEM;
1268 goto fail_dmabuf_put;
1269 }
1270
1271 size = args->size;
1272 offset = 0;
1273
1274 err = gk20a_busy(g);
1275 if (err)
1276 goto fail_free_buffer;
1277
1278 while (size) {
1279 /* Max access size of access_limit_size in one loop */
1280 access_size = min(access_limit_size, size);
1281
1282 if (args->cmd ==
1283 NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE) {
1284 err = copy_from_user(buffer, user_buffer + offset,
1285 access_size);
1286 if (err)
1287 goto fail_idle;
1288 }
1289
1290 err = nvgpu_vidmem_buf_access_memory(g, dmabuf, buffer,
1291 args->offset + offset, access_size,
1292 args->cmd);
1293 if (err)
1294 goto fail_idle;
1295
1296 if (args->cmd ==
1297 NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ) {
1298 err = copy_to_user(user_buffer + offset,
1299 buffer, access_size);
1300 if (err)
1301 goto fail_idle;
1302 }
1303
1304 size -= access_size;
1305 offset += access_size;
1306 }
1307
1308fail_idle:
1309 gk20a_idle(g);
1310fail_free_buffer:
1311 nvgpu_big_free(g, buffer);
1312fail_dmabuf_put:
1313 dma_buf_put(dmabuf);
1314
1315 return err;
1316}
1317
1318static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s,
1319 struct nvgpu_dbg_gpu_profiler_reserve_args *args)
1320{
1321 if (args->acquire)
1322 return nvgpu_profiler_reserve_acquire(dbg_s, args->profiler_handle);
1323
1324 return nvgpu_profiler_reserve_release(dbg_s, args->profiler_handle);
1325}
1326
1327static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s,
1328 struct nvgpu_dbg_gpu_timeout_args *args)
1329{
1330 int status;
1331 struct gk20a *g = dbg_s->g;
1332
1333 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1334 status = g->timeouts_enabled;
1335 nvgpu_mutex_release(&g->dbg_sessions_lock);
1336
1337 if (status)
1338 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE;
1339 else
1340 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE;
1341}
1342
1343/* In order to perform a context relative op the context has
1344 * to be created already... which would imply that the
1345 * context switch mechanism has already been put in place.
1346 * So by the time we perform such an opertation it should always
1347 * be possible to query for the appropriate context offsets, etc.
1348 *
1349 * But note: while the dbg_gpu bind requires the a channel fd,
1350 * it doesn't require an allocated gr/compute obj at that point...
1351 */
1352static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
1353 struct gr_gk20a *gr)
1354{
1355 int err;
1356
1357 nvgpu_mutex_acquire(&gr->ctx_mutex);
1358 err = !gr->ctx_vars.golden_image_initialized;
1359 nvgpu_mutex_release(&gr->ctx_mutex);
1360 if (err)
1361 return false;
1362 return true;
1363
1364}
1365
1366static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset)
1367{
1368 struct mm_gk20a *mm = &g->mm;
1369 struct vm_gk20a *vm = mm->perfbuf.vm;
1370 int err;
1371
1372 err = g->ops.dbg_session_ops.perfbuffer_disable(g);
1373
1374 nvgpu_vm_unmap_buffer(vm, offset, NULL);
1375 gk20a_free_inst_block(g, &mm->perfbuf.inst_block);
1376 nvgpu_vm_put(vm);
1377
1378 g->perfbuf.owner = NULL;
1379 g->perfbuf.offset = 0;
1380 return err;
1381}
1382
1383static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
1384 u32 profiler_handle)
1385{
1386 struct gk20a *g = dbg_s->g;
1387 struct dbg_profiler_object_data *prof_obj;
1388 int err = 0;
1389
1390 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle);
1391
1392 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1393
1394 /* Find matching object. */
1395 prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
1396
1397 if (!prof_obj) {
1398 nvgpu_err(g, "object not found");
1399 err = -EINVAL;
1400 goto exit;
1401 }
1402
1403 if (prof_obj->has_reservation)
1404 g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj);
1405 else {
1406 nvgpu_err(g, "No reservation found");
1407 err = -EINVAL;
1408 goto exit;
1409 }
1410exit:
1411 nvgpu_mutex_release(&g->dbg_sessions_lock);
1412 return err;
1413}
1414
1415static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1416 u32 profiler_handle)
1417{
1418 struct gk20a *g = dbg_s->g;
1419 struct dbg_profiler_object_data *prof_obj, *my_prof_obj;
1420 int err = 0;
1421
1422 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle);
1423
1424 if (g->profiler_reservation_count < 0) {
1425 nvgpu_err(g, "Negative reservation count!");
1426 return -EINVAL;
1427 }
1428
1429 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1430
1431 /* Find matching object. */
1432 my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
1433
1434 if (!my_prof_obj) {
1435 nvgpu_err(g, "object not found");
1436 err = -EINVAL;
1437 goto exit;
1438 }
1439
1440 /* If we already have the reservation, we're done */
1441 if (my_prof_obj->has_reservation) {
1442 err = 0;
1443 goto exit;
1444 }
1445
1446 if (my_prof_obj->ch == NULL) {
1447 /* Global reservations are only allowed if there are no other
1448 * global or per-context reservations currently held
1449 */
1450 if (!g->ops.dbg_session_ops.check_and_set_global_reservation(
1451 dbg_s, my_prof_obj)) {
1452 nvgpu_err(g,
1453 "global reserve: have existing reservation");
1454 err = -EBUSY;
1455 }
1456 } else if (g->global_profiler_reservation_held) {
1457 /* If there's a global reservation,
1458 * we can't take a per-context one.
1459 */
1460 nvgpu_err(g,
1461 "per-ctxt reserve: global reservation in effect");
1462 err = -EBUSY;
1463 } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) {
1464 /* TSG: check that another channel in the TSG
1465 * doesn't already have the reservation
1466 */
1467 int my_tsgid = my_prof_obj->ch->tsgid;
1468
1469 nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
1470 dbg_profiler_object_data, prof_obj_entry) {
1471 if (prof_obj->has_reservation &&
1472 (prof_obj->ch->tsgid == my_tsgid)) {
1473 nvgpu_err(g,
1474 "per-ctxt reserve (tsg): already reserved");
1475 err = -EBUSY;
1476 goto exit;
1477 }
1478 }
1479
1480 if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
1481 dbg_s, my_prof_obj)) {
1482 /* Another guest OS has the global reservation */
1483 nvgpu_err(g,
1484 "per-ctxt reserve: global reservation in effect");
1485 err = -EBUSY;
1486 }
1487 } else {
1488 /* channel: check that some other profiler object doesn't
1489 * already have the reservation.
1490 */
1491 struct channel_gk20a *my_ch = my_prof_obj->ch;
1492
1493 nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
1494 dbg_profiler_object_data, prof_obj_entry) {
1495 if (prof_obj->has_reservation &&
1496 (prof_obj->ch == my_ch)) {
1497 nvgpu_err(g,
1498 "per-ctxt reserve (ch): already reserved");
1499 err = -EBUSY;
1500 goto exit;
1501 }
1502 }
1503
1504 if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
1505 dbg_s, my_prof_obj)) {
1506 /* Another guest OS has the global reservation */
1507 nvgpu_err(g,
1508 "per-ctxt reserve: global reservation in effect");
1509 err = -EBUSY;
1510 }
1511 }
1512exit:
1513 nvgpu_mutex_release(&g->dbg_sessions_lock);
1514 return err;
1515}
1516
1517static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
1518 struct nvgpu_dbg_gpu_unbind_channel_args *args)
1519{
1520 struct dbg_session_channel_data *ch_data;
1521 struct gk20a *g = dbg_s->g;
1522 bool channel_found = false;
1523 struct channel_gk20a *ch;
1524 int err;
1525
1526 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
1527 g->name, args->channel_fd);
1528
1529 ch = gk20a_get_channel_from_file(args->channel_fd);
1530 if (!ch) {
1531 gk20a_dbg_fn("no channel found for fd");
1532 return -EINVAL;
1533 }
1534
1535 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
1536 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
1537 dbg_session_channel_data, ch_entry) {
1538 if (ch->chid == ch_data->chid) {
1539 channel_found = true;
1540 break;
1541 }
1542 }
1543 nvgpu_mutex_release(&dbg_s->ch_list_lock);
1544
1545 if (!channel_found) {
1546 gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd);
1547 err = -EINVAL;
1548 goto out;
1549 }
1550
1551 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1552 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
1553 err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
1554 nvgpu_mutex_release(&dbg_s->ch_list_lock);
1555 nvgpu_mutex_release(&g->dbg_sessions_lock);
1556
1557out:
1558 gk20a_channel_put(ch);
1559 return err;
1560}
1561
1562int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
1563{
1564 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
1565 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
1566}
1567
1568long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1569 unsigned long arg)
1570{
1571 struct dbg_session_gk20a_linux *dbg_s_linux = filp->private_data;
1572 struct dbg_session_gk20a *dbg_s = &dbg_s_linux->dbg_s;
1573 struct gk20a *g = dbg_s->g;
1574 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
1575 int err = 0;
1576
1577 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
1578
1579 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
1580 (_IOC_NR(cmd) == 0) ||
1581 (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST) ||
1582 (_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE))
1583 return -EINVAL;
1584
1585 memset(buf, 0, sizeof(buf));
1586 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1587 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1588 return -EFAULT;
1589 }
1590
1591 if (!g->gr.sw_ready) {
1592 err = gk20a_busy(g);
1593 if (err)
1594 return err;
1595
1596 gk20a_idle(g);
1597 }
1598
1599 /* protect from threaded user space calls */
1600 nvgpu_mutex_acquire(&dbg_s->ioctl_lock);
1601
1602 switch (cmd) {
1603 case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL:
1604 err = dbg_bind_channel_gk20a(dbg_s,
1605 (struct nvgpu_dbg_gpu_bind_channel_args *)buf);
1606 break;
1607
1608 case NVGPU_DBG_GPU_IOCTL_REG_OPS:
1609 err = nvgpu_ioctl_channel_reg_ops(dbg_s,
1610 (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf);
1611 break;
1612
1613 case NVGPU_DBG_GPU_IOCTL_POWERGATE:
1614 err = nvgpu_ioctl_powergate_gk20a(dbg_s,
1615 (struct nvgpu_dbg_gpu_powergate_args *)buf);
1616 break;
1617
1618 case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL:
1619 err = gk20a_dbg_gpu_events_ctrl(dbg_s,
1620 (struct nvgpu_dbg_gpu_events_ctrl_args *)buf);
1621 break;
1622
1623 case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE:
1624 err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s,
1625 (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf);
1626 break;
1627
1628 case NVGPU_DBG_GPU_IOCTL_HWPM_CTXSW_MODE:
1629 err = nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(dbg_s,
1630 (struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *)buf);
1631 break;
1632
1633 case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_ALL_SMS:
1634 err = nvgpu_dbg_gpu_ioctl_suspend_resume_sm(dbg_s,
1635 (struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *)buf);
1636 break;
1637
1638 case NVGPU_DBG_GPU_IOCTL_PERFBUF_MAP:
1639 err = gk20a_perfbuf_map(dbg_s,
1640 (struct nvgpu_dbg_gpu_perfbuf_map_args *)buf);
1641 break;
1642
1643 case NVGPU_DBG_GPU_IOCTL_PERFBUF_UNMAP:
1644 err = gk20a_perfbuf_unmap(dbg_s,
1645 (struct nvgpu_dbg_gpu_perfbuf_unmap_args *)buf);
1646 break;
1647
1648 case NVGPU_DBG_GPU_IOCTL_PC_SAMPLING:
1649 err = gk20a_dbg_pc_sampling(dbg_s,
1650 (struct nvgpu_dbg_gpu_pc_sampling_args *)buf);
1651 break;
1652
1653 case NVGPU_DBG_GPU_IOCTL_SET_NEXT_STOP_TRIGGER_TYPE:
1654 err = nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(dbg_s,
1655 (struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *)buf);
1656 break;
1657
1658 case NVGPU_DBG_GPU_IOCTL_TIMEOUT:
1659 err = nvgpu_dbg_gpu_ioctl_timeout(dbg_s,
1660 (struct nvgpu_dbg_gpu_timeout_args *)buf);
1661 break;
1662
1663 case NVGPU_DBG_GPU_IOCTL_GET_TIMEOUT:
1664 nvgpu_dbg_gpu_ioctl_get_timeout(dbg_s,
1665 (struct nvgpu_dbg_gpu_timeout_args *)buf);
1666 break;
1667
1668 case NVGPU_DBG_GPU_IOCTL_READ_SINGLE_SM_ERROR_STATE:
1669 err = nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(dbg_s,
1670 (struct nvgpu_dbg_gpu_read_single_sm_error_state_args *)buf);
1671 break;
1672
1673 case NVGPU_DBG_GPU_IOCTL_CLEAR_SINGLE_SM_ERROR_STATE:
1674 err = nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(dbg_s,
1675 (struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *)buf);
1676 break;
1677
1678 case NVGPU_DBG_GPU_IOCTL_WRITE_SINGLE_SM_ERROR_STATE:
1679 err = nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(dbg_s,
1680 (struct nvgpu_dbg_gpu_write_single_sm_error_state_args *)buf);
1681 break;
1682
1683 case NVGPU_DBG_GPU_IOCTL_UNBIND_CHANNEL:
1684 err = dbg_unbind_channel_gk20a(dbg_s,
1685 (struct nvgpu_dbg_gpu_unbind_channel_args *)buf);
1686 break;
1687
1688 case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_CONTEXTS:
1689 err = nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(dbg_s,
1690 (struct nvgpu_dbg_gpu_suspend_resume_contexts_args *)buf);
1691 break;
1692
1693 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY:
1694 err = nvgpu_dbg_gpu_ioctl_access_fb_memory(dbg_s,
1695 (struct nvgpu_dbg_gpu_access_fb_memory_args *)buf);
1696 break;
1697
1698 case NVGPU_DBG_GPU_IOCTL_PROFILER_ALLOCATE:
1699 err = nvgpu_ioctl_allocate_profiler_object(dbg_s_linux,
1700 (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf);
1701 break;
1702
1703 case NVGPU_DBG_GPU_IOCTL_PROFILER_FREE:
1704 err = nvgpu_ioctl_free_profiler_object(dbg_s_linux,
1705 (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf);
1706 break;
1707
1708 case NVGPU_DBG_GPU_IOCTL_PROFILER_RESERVE:
1709 err = nvgpu_ioctl_profiler_reserve(dbg_s,
1710 (struct nvgpu_dbg_gpu_profiler_reserve_args *)buf);
1711 break;
1712
1713 default:
1714 nvgpu_err(g,
1715 "unrecognized dbg gpu ioctl cmd: 0x%x",
1716 cmd);
1717 err = -ENOTTY;
1718 break;
1719 }
1720
1721 nvgpu_mutex_release(&dbg_s->ioctl_lock);
1722
1723 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
1724
1725 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1726 err = copy_to_user((void __user *)arg,
1727 buf, _IOC_SIZE(cmd));
1728
1729 return err;
1730}
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.h b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.h
new file mode 100644
index 00000000..810555a8
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.h
@@ -0,0 +1,50 @@
1/*
2 * Tegra GK20A GPU Debugger Driver
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef DBG_GPU_IOCTL_GK20A_H
19#define DBG_GPU_IOCTL_GK20A_H
20#include <linux/poll.h>
21
22#include "gk20a/dbg_gpu_gk20a.h"
23
24struct dbg_session_gk20a_linux {
25 struct device *dev;
26 struct dbg_session_gk20a dbg_s;
27};
28
29struct dbg_session_channel_data_linux {
30 /*
31 * We have to keep a ref to the _file_, not the channel, because
32 * close(channel_fd) is synchronous and would deadlock if we had an
33 * open debug session fd holding a channel ref at that time. Holding a
34 * ref to the file makes close(channel_fd) just drop a kernel ref to
35 * the file; the channel will close when the last file ref is dropped.
36 */
37 struct file *ch_f;
38 struct dbg_session_channel_data ch_data;
39};
40
41/* module debug driver interface */
42int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp);
43int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp);
44long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
45unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait);
46
47/* used by profiler driver interface */
48int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp);
49
50#endif \ No newline at end of file
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 713c4215..e3fc61c0 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -51,6 +51,13 @@
51#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> 51#include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h>
52 52
53/* 53/*
54 * Note
55 * This is added for all the copy_from_user methods in this file which needs to
56 * be moved lated to reduce depenedency on Linux
57 */
58#include <linux/uaccess.h>
59
60/*
54 * This is required for nvgpu_vm_find_buffer() which is used in the tracing 61 * This is required for nvgpu_vm_find_buffer() which is used in the tracing
55 * code. Once we can get and access userspace buffers without requiring 62 * code. Once we can get and access userspace buffers without requiring
56 * direct dma_buf usage this can be removed. 63 * direct dma_buf usage this can be removed.
@@ -623,7 +630,7 @@ unbind:
623 list_for_each_entry_safe(ch_data, tmp, 630 list_for_each_entry_safe(ch_data, tmp,
624 &dbg_s->ch_list, ch_entry) { 631 &dbg_s->ch_list, ch_entry) {
625 if (ch_data->chid == ch->chid) 632 if (ch_data->chid == ch->chid)
626 dbg_unbind_single_channel_gk20a(dbg_s, ch_data); 633 ch_data->unbind_single_channel(dbg_s, ch_data);
627 } 634 }
628 nvgpu_mutex_release(&dbg_s->ch_list_lock); 635 nvgpu_mutex_release(&dbg_s->ch_list_lock);
629 } 636 }
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 135cb1e9..8c39ecb7 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -22,40 +22,22 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include <linux/fs.h>
26#include <linux/file.h>
27#include <linux/cdev.h>
28#include <linux/uaccess.h>
29#include <linux/dma-buf.h>
30#include <uapi/linux/nvgpu.h>
31
32#include <nvgpu/kmem.h> 25#include <nvgpu/kmem.h>
33#include <nvgpu/log.h> 26#include <nvgpu/log.h>
34#include <nvgpu/vm.h> 27#include <nvgpu/vm.h>
35#include <nvgpu/atomic.h> 28#include <nvgpu/atomic.h>
36 29
37#include <nvgpu/linux/vidmem.h>
38
39#include "gk20a.h" 30#include "gk20a.h"
40#include "gk20a/platform_gk20a.h" 31#include "gk20a/platform_gk20a.h"
41#include "gr_gk20a.h" 32#include "gr_gk20a.h"
42#include "dbg_gpu_gk20a.h" 33#include "dbg_gpu_gk20a.h"
43#include "regops_gk20a.h" 34#include "regops_gk20a.h"
44#include "common/linux/os_linux.h"
45 35
46#include <nvgpu/hw/gk20a/hw_therm_gk20a.h> 36#include <nvgpu/hw/gk20a/hw_therm_gk20a.h>
47#include <nvgpu/hw/gk20a/hw_gr_gk20a.h> 37#include <nvgpu/hw/gk20a/hw_gr_gk20a.h>
48#include <nvgpu/hw/gk20a/hw_perf_gk20a.h> 38#include <nvgpu/hw/gk20a/hw_perf_gk20a.h>
49 39
50/* 40/*
51 * Currently this code uses nvgpu_vm_map_buffer() since it takes dmabuf FDs from
52 * the dbg ioctls. That has to change; this needs to hide the usage of dmabufs
53 * in Linux specific code. All core driver usage of mapping must be done through
54 * nvgpu_gmmu_map().
55 */
56#include "common/linux/vm_priv.h"
57
58/*
59 * API to get first channel from the list of all channels 41 * API to get first channel from the list of all channels
60 * bound to the debug session 42 * bound to the debug session
61 */ 43 */
@@ -82,240 +64,6 @@ nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s)
82 return ch; 64 return ch;
83} 65}
84 66
85/* silly allocator - just increment id */
86static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0);
87static int generate_unique_id(void)
88{
89 return nvgpu_atomic_add_return(1, &unique_id);
90}
91
92static int alloc_session(struct gk20a *g, struct dbg_session_gk20a **_dbg_s)
93{
94 struct dbg_session_gk20a *dbg_s;
95 *_dbg_s = NULL;
96
97 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
98
99 dbg_s = nvgpu_kzalloc(g, sizeof(*dbg_s));
100 if (!dbg_s)
101 return -ENOMEM;
102
103 dbg_s->id = generate_unique_id();
104 *_dbg_s = dbg_s;
105 return 0;
106}
107
108static int alloc_profiler(struct gk20a *g,
109 struct dbg_profiler_object_data **_prof)
110{
111 struct dbg_profiler_object_data *prof;
112 *_prof = NULL;
113
114 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
115
116 prof = nvgpu_kzalloc(g, sizeof(*prof));
117 if (!prof)
118 return -ENOMEM;
119
120 prof->prof_handle = generate_unique_id();
121 *_prof = prof;
122 return 0;
123}
124
125static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
126 struct file *filp, bool is_profiler)
127{
128 struct nvgpu_os_linux *l;
129 struct dbg_session_gk20a *dbg_session;
130 struct gk20a *g;
131
132 struct device *dev;
133
134 int err;
135
136 if (!is_profiler)
137 l = container_of(inode->i_cdev,
138 struct nvgpu_os_linux, dbg.cdev);
139 else
140 l = container_of(inode->i_cdev,
141 struct nvgpu_os_linux, prof.cdev);
142 g = gk20a_get(&l->g);
143 if (!g)
144 return -ENODEV;
145
146 dev = dev_from_gk20a(g);
147
148 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name);
149
150 err = alloc_session(g, &dbg_session);
151 if (err)
152 goto free_ref;
153
154 filp->private_data = dbg_session;
155 dbg_session->dev = dev;
156 dbg_session->g = g;
157 dbg_session->is_profiler = is_profiler;
158 dbg_session->is_pg_disabled = false;
159 dbg_session->is_timeout_disabled = false;
160
161 nvgpu_cond_init(&dbg_session->dbg_events.wait_queue);
162 nvgpu_init_list_node(&dbg_session->ch_list);
163 err = nvgpu_mutex_init(&dbg_session->ch_list_lock);
164 if (err)
165 goto err_free_session;
166 err = nvgpu_mutex_init(&dbg_session->ioctl_lock);
167 if (err)
168 goto err_destroy_lock;
169 dbg_session->dbg_events.events_enabled = false;
170 dbg_session->dbg_events.num_pending_events = 0;
171
172 return 0;
173
174err_destroy_lock:
175 nvgpu_mutex_destroy(&dbg_session->ch_list_lock);
176err_free_session:
177 nvgpu_kfree(g, dbg_session);
178free_ref:
179 gk20a_put(g);
180 return err;
181}
182
183/* used in scenarios where the debugger session can take just the inter-session
184 * lock for performance, but the profiler session must take the per-gpu lock
185 * since it might not have an associated channel. */
186static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s)
187{
188 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
189
190 if (dbg_s->is_profiler || !ch)
191 nvgpu_mutex_acquire(&dbg_s->g->dbg_sessions_lock);
192 else
193 nvgpu_mutex_acquire(&ch->dbg_s_lock);
194}
195
196static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s)
197{
198 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
199
200 if (dbg_s->is_profiler || !ch)
201 nvgpu_mutex_release(&dbg_s->g->dbg_sessions_lock);
202 else
203 nvgpu_mutex_release(&ch->dbg_s_lock);
204}
205
206static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
207{
208 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
209
210 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
211
212 dbg_s->dbg_events.events_enabled = true;
213 dbg_s->dbg_events.num_pending_events = 0;
214
215 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
216}
217
218static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
219{
220 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
221
222 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
223
224 dbg_s->dbg_events.events_enabled = false;
225 dbg_s->dbg_events.num_pending_events = 0;
226
227 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
228}
229
230static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
231{
232 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
233
234 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
235
236 if (dbg_s->dbg_events.events_enabled &&
237 dbg_s->dbg_events.num_pending_events > 0)
238 dbg_s->dbg_events.num_pending_events--;
239
240 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
241}
242
243static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
244 struct nvgpu_dbg_gpu_events_ctrl_args *args)
245{
246 int ret = 0;
247 struct channel_gk20a *ch;
248
249 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
250
251 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
252 if (!ch) {
253 nvgpu_err(dbg_s->g,
254 "no channel bound to dbg session");
255 return -EINVAL;
256 }
257
258 switch (args->cmd) {
259 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE:
260 gk20a_dbg_gpu_events_enable(dbg_s);
261 break;
262
263 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE:
264 gk20a_dbg_gpu_events_disable(dbg_s);
265 break;
266
267 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR:
268 gk20a_dbg_gpu_events_clear(dbg_s);
269 break;
270
271 default:
272 nvgpu_err(dbg_s->g,
273 "unrecognized dbg gpu events ctrl cmd: 0x%x",
274 args->cmd);
275 ret = -EINVAL;
276 break;
277 }
278
279 return ret;
280}
281
282unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
283{
284 unsigned int mask = 0;
285 struct dbg_session_gk20a *dbg_s = filep->private_data;
286
287 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
288
289 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait);
290
291 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
292
293 if (dbg_s->dbg_events.events_enabled &&
294 dbg_s->dbg_events.num_pending_events > 0) {
295 gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d",
296 dbg_s->id);
297 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending",
298 dbg_s->dbg_events.num_pending_events);
299 mask = (POLLPRI | POLLIN);
300 }
301
302 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
303
304 return mask;
305}
306
307int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
308{
309 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
310 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
311}
312
313int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
314{
315 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
316 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
317}
318
319void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) 67void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
320{ 68{
321 struct dbg_session_data *session_data; 69 struct dbg_session_data *session_data;
@@ -396,917 +144,6 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch)
396 return 0; 144 return 0;
397} 145}
398 146
399static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
400 int timeout_mode)
401{
402 struct gk20a *g = dbg_s->g;
403 int err = 0;
404
405 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d",
406 timeout_mode);
407
408 switch (timeout_mode) {
409 case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE:
410 if (dbg_s->is_timeout_disabled &&
411 --g->dbg_timeout_disabled_refcount == 0) {
412 g->timeouts_enabled = true;
413 }
414 dbg_s->is_timeout_disabled = false;
415 break;
416
417 case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE:
418 if ((dbg_s->is_timeout_disabled == false) &&
419 (g->dbg_timeout_disabled_refcount++ == 0)) {
420 g->timeouts_enabled = false;
421 }
422 dbg_s->is_timeout_disabled = true;
423 break;
424
425 default:
426 nvgpu_err(g,
427 "unrecognized dbg gpu timeout mode : 0x%x",
428 timeout_mode);
429 err = -EINVAL;
430 break;
431 }
432
433 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s",
434 g->timeouts_enabled ? "Yes" : "No");
435
436 return err;
437}
438
439int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
440 struct dbg_session_channel_data *ch_data)
441{
442 struct gk20a *g = dbg_s->g;
443 int chid;
444 struct dbg_session_data *session_data;
445 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
446
447 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
448
449 chid = ch_data->chid;
450
451 /* If there's a profiler ctx reservation record associated with this
452 * session/channel pair, release it.
453 */
454 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
455 dbg_profiler_object_data, prof_obj_entry) {
456 if ((prof_obj->session_id == dbg_s->id) &&
457 (prof_obj->ch->chid == chid)) {
458 if (prof_obj->has_reservation) {
459 g->ops.dbg_session_ops.
460 release_profiler_reservation(dbg_s, prof_obj);
461 }
462 nvgpu_list_del(&prof_obj->prof_obj_entry);
463 nvgpu_kfree(g, prof_obj);
464 }
465 }
466
467 nvgpu_list_del(&ch_data->ch_entry);
468
469 session_data = ch_data->session_data;
470 nvgpu_list_del(&session_data->dbg_s_entry);
471 nvgpu_kfree(dbg_s->g, session_data);
472
473 fput(ch_data->ch_f);
474 nvgpu_kfree(dbg_s->g, ch_data);
475
476 return 0;
477}
478
479static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s)
480{
481 struct dbg_session_channel_data *ch_data, *tmp;
482 struct gk20a *g = dbg_s->g;
483
484 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
485 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
486 nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list,
487 dbg_session_channel_data, ch_entry)
488 dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
489 nvgpu_mutex_release(&dbg_s->ch_list_lock);
490 nvgpu_mutex_release(&g->dbg_sessions_lock);
491
492 return 0;
493}
494
495static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
496 struct nvgpu_dbg_gpu_unbind_channel_args *args)
497{
498 struct dbg_session_channel_data *ch_data;
499 struct gk20a *g = dbg_s->g;
500 bool channel_found = false;
501 struct channel_gk20a *ch;
502 int err;
503
504 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
505 g->name, args->channel_fd);
506
507 ch = gk20a_get_channel_from_file(args->channel_fd);
508 if (!ch) {
509 gk20a_dbg_fn("no channel found for fd");
510 return -EINVAL;
511 }
512
513 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
514 nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list,
515 dbg_session_channel_data, ch_entry) {
516 if (ch->chid == ch_data->chid) {
517 channel_found = true;
518 break;
519 }
520 }
521 nvgpu_mutex_release(&dbg_s->ch_list_lock);
522
523 if (!channel_found) {
524 gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd);
525 err = -EINVAL;
526 goto out;
527 }
528
529 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
530 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
531 err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
532 nvgpu_mutex_release(&dbg_s->ch_list_lock);
533 nvgpu_mutex_release(&g->dbg_sessions_lock);
534
535out:
536 gk20a_channel_put(ch);
537 return err;
538}
539
540static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset);
541
542int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
543{
544 struct dbg_session_gk20a *dbg_s = filp->private_data;
545 struct gk20a *g = dbg_s->g;
546 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
547
548 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name);
549
550 /* unbind channels */
551 dbg_unbind_all_channels_gk20a(dbg_s);
552
553 /* Powergate/Timeout enable is called here as possibility of dbg_session
554 * which called powergate/timeout disable ioctl, to be killed without
555 * calling powergate/timeout enable ioctl
556 */
557 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
558 g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
559 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
560 nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE);
561
562 /* If this session owned the perf buffer, release it */
563 if (g->perfbuf.owner == dbg_s)
564 gk20a_perfbuf_release_locked(g, g->perfbuf.offset);
565
566 /* Per-context profiler objects were released when we called
567 * dbg_unbind_all_channels. We could still have global ones.
568 */
569 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
570 dbg_profiler_object_data, prof_obj_entry) {
571 if (prof_obj->session_id == dbg_s->id) {
572 if (prof_obj->has_reservation)
573 g->ops.dbg_session_ops.
574 release_profiler_reservation(dbg_s, prof_obj);
575 nvgpu_list_del(&prof_obj->prof_obj_entry);
576 nvgpu_kfree(g, prof_obj);
577 }
578 }
579 nvgpu_mutex_release(&g->dbg_sessions_lock);
580
581 nvgpu_mutex_destroy(&dbg_s->ch_list_lock);
582 nvgpu_mutex_destroy(&dbg_s->ioctl_lock);
583
584 nvgpu_kfree(g, dbg_s);
585 gk20a_put(g);
586
587 return 0;
588}
589
590static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
591 struct nvgpu_dbg_gpu_bind_channel_args *args)
592{
593 struct file *f;
594 struct gk20a *g = dbg_s->g;
595 struct channel_gk20a *ch;
596 struct dbg_session_channel_data *ch_data;
597 struct dbg_session_data *session_data;
598 int err = 0;
599
600 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
601 g->name, args->channel_fd);
602
603 /*
604 * Although gk20a_get_channel_from_file gives us a channel ref, need to
605 * hold a ref to the file during the session lifetime. See comment in
606 * struct dbg_session_channel_data.
607 */
608 f = fget(args->channel_fd);
609 if (!f)
610 return -ENODEV;
611
612 ch = gk20a_get_channel_from_file(args->channel_fd);
613 if (!ch) {
614 gk20a_dbg_fn("no channel found for fd");
615 err = -EINVAL;
616 goto out_fput;
617 }
618
619 gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid);
620
621 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
622 nvgpu_mutex_acquire(&ch->dbg_s_lock);
623
624 ch_data = nvgpu_kzalloc(g, sizeof(*ch_data));
625 if (!ch_data) {
626 err = -ENOMEM;
627 goto out_chput;
628 }
629 ch_data->ch_f = f;
630 ch_data->channel_fd = args->channel_fd;
631 ch_data->chid = ch->chid;
632 nvgpu_init_list_node(&ch_data->ch_entry);
633
634 session_data = nvgpu_kzalloc(g, sizeof(*session_data));
635 if (!session_data) {
636 err = -ENOMEM;
637 goto out_kfree;
638 }
639 session_data->dbg_s = dbg_s;
640 nvgpu_init_list_node(&session_data->dbg_s_entry);
641 ch_data->session_data = session_data;
642
643 nvgpu_list_add(&session_data->dbg_s_entry, &ch->dbg_s_list);
644
645 nvgpu_mutex_acquire(&dbg_s->ch_list_lock);
646 nvgpu_list_add_tail(&ch_data->ch_entry, &dbg_s->ch_list);
647 nvgpu_mutex_release(&dbg_s->ch_list_lock);
648
649 nvgpu_mutex_release(&ch->dbg_s_lock);
650 nvgpu_mutex_release(&g->dbg_sessions_lock);
651
652 gk20a_channel_put(ch);
653
654 return 0;
655
656out_kfree:
657 nvgpu_kfree(g, ch_data);
658out_chput:
659 gk20a_channel_put(ch);
660 nvgpu_mutex_release(&ch->dbg_s_lock);
661 nvgpu_mutex_release(&g->dbg_sessions_lock);
662out_fput:
663 fput(f);
664 return err;
665}
666
667static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
668 struct nvgpu_dbg_gpu_exec_reg_ops_args *args);
669
670static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
671 struct nvgpu_dbg_gpu_powergate_args *args);
672
673static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
674 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args);
675
676static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
677 struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args);
678
679static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
680 struct dbg_session_gk20a *dbg_s,
681 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args);
682
683static int nvgpu_ioctl_allocate_profiler_object(struct dbg_session_gk20a *dbg_s,
684 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args);
685
686static int nvgpu_ioctl_free_profiler_object(struct dbg_session_gk20a *dbg_s,
687 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args);
688
689static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s,
690 struct nvgpu_dbg_gpu_profiler_reserve_args *args);
691
692static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
693 struct nvgpu_dbg_gpu_perfbuf_map_args *args);
694
695static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
696 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args);
697
698static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
699 struct nvgpu_dbg_gpu_pc_sampling_args *args)
700{
701 struct channel_gk20a *ch;
702 struct gk20a *g = dbg_s->g;
703
704 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
705 if (!ch)
706 return -EINVAL;
707
708 gk20a_dbg_fn("");
709
710 return g->ops.gr.update_pc_sampling ?
711 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL;
712}
713
714static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
715 struct nvgpu_dbg_gpu_timeout_args *args)
716{
717 int err;
718 struct gk20a *g = dbg_s->g;
719
720 gk20a_dbg_fn("powergate mode = %d", args->enable);
721
722 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
723 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
724 nvgpu_mutex_release(&g->dbg_sessions_lock);
725
726 return err;
727}
728
729static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s,
730 struct nvgpu_dbg_gpu_timeout_args *args)
731{
732 int status;
733 struct gk20a *g = dbg_s->g;
734
735 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
736 status = g->timeouts_enabled;
737 nvgpu_mutex_release(&g->dbg_sessions_lock);
738
739 if (status)
740 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE;
741 else
742 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE;
743}
744
745static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(
746 struct dbg_session_gk20a *dbg_s,
747 struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args)
748{
749 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
750
751 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
752
753 dbg_s->broadcast_stop_trigger = (args->broadcast != 0);
754
755 gk20a_dbg_session_nvgpu_mutex_release(dbg_s);
756
757 return 0;
758}
759
760static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(
761 struct dbg_session_gk20a *dbg_s,
762 struct nvgpu_dbg_gpu_read_single_sm_error_state_args *args)
763{
764 struct gk20a *g = dbg_s->g;
765 struct gr_gk20a *gr = &g->gr;
766 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state;
767 u32 sm_id;
768 int err = 0;
769
770 sm_id = args->sm_id;
771 if (sm_id >= gr->no_of_sm)
772 return -EINVAL;
773
774 sm_error_state = gr->sm_error_states + sm_id;
775
776 if (args->sm_error_state_record_size > 0) {
777 size_t write_size = sizeof(*sm_error_state);
778
779 if (write_size > args->sm_error_state_record_size)
780 write_size = args->sm_error_state_record_size;
781
782 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
783 err = copy_to_user((void __user *)(uintptr_t)
784 args->sm_error_state_record_mem,
785 sm_error_state,
786 write_size);
787 nvgpu_mutex_release(&g->dbg_sessions_lock);
788 if (err) {
789 nvgpu_err(g, "copy_to_user failed!");
790 return err;
791 }
792
793 args->sm_error_state_record_size = write_size;
794 }
795
796 return 0;
797}
798
799static int nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(
800 struct dbg_session_gk20a *dbg_s,
801 struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *args)
802{
803 struct gk20a *g = dbg_s->g;
804 struct gr_gk20a *gr = &g->gr;
805 u32 sm_id;
806 struct channel_gk20a *ch;
807 int err = 0;
808
809 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
810 if (!ch)
811 return -EINVAL;
812
813 sm_id = args->sm_id;
814
815 if (sm_id >= gr->no_of_sm)
816 return -EINVAL;
817
818 err = gk20a_busy(g);
819 if (err)
820 return err;
821
822 err = gr_gk20a_elpg_protected_call(g,
823 g->ops.gr.clear_sm_error_state(g, ch, sm_id));
824
825 gk20a_idle(g);
826
827 return err;
828}
829
830static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
831 struct dbg_session_gk20a *dbg_s,
832 struct nvgpu_dbg_gpu_write_single_sm_error_state_args *args)
833{
834 struct gk20a *g = dbg_s->g;
835 struct gr_gk20a *gr = &g->gr;
836 u32 sm_id;
837 struct channel_gk20a *ch;
838 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state;
839 int err = 0;
840
841 /* Not currently supported in the virtual case */
842 if (g->is_virtual)
843 return -ENOSYS;
844
845 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
846 if (!ch)
847 return -EINVAL;
848
849 sm_id = args->sm_id;
850 if (sm_id >= gr->no_of_sm)
851 return -EINVAL;
852
853 sm_error_state = nvgpu_kzalloc(g, sizeof(*sm_error_state));
854 if (!sm_error_state)
855 return -ENOMEM;
856
857 if (args->sm_error_state_record_size > 0) {
858 size_t read_size = sizeof(*sm_error_state);
859
860 if (read_size > args->sm_error_state_record_size)
861 read_size = args->sm_error_state_record_size;
862
863 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
864 err = copy_from_user(sm_error_state,
865 (void __user *)(uintptr_t)
866 args->sm_error_state_record_mem,
867 read_size);
868 nvgpu_mutex_release(&g->dbg_sessions_lock);
869 if (err) {
870 err = -ENOMEM;
871 goto err_free;
872 }
873 }
874
875 err = gk20a_busy(g);
876 if (err)
877 goto err_free;
878
879 err = gr_gk20a_elpg_protected_call(g,
880 g->ops.gr.update_sm_error_state(g, ch,
881 sm_id, sm_error_state));
882
883 gk20a_idle(g);
884
885err_free:
886 nvgpu_kfree(g, sm_error_state);
887
888 return err;
889}
890
891static int
892nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s,
893 struct nvgpu_dbg_gpu_suspend_resume_contexts_args *args)
894{
895 struct gk20a *g = dbg_s->g;
896 int err = 0;
897 int ctx_resident_ch_fd = -1;
898
899 err = gk20a_busy(g);
900 if (err)
901 return err;
902
903 switch (args->action) {
904 case NVGPU_DBG_GPU_SUSPEND_ALL_CONTEXTS:
905 err = g->ops.gr.suspend_contexts(g, dbg_s,
906 &ctx_resident_ch_fd);
907 break;
908
909 case NVGPU_DBG_GPU_RESUME_ALL_CONTEXTS:
910 err = g->ops.gr.resume_contexts(g, dbg_s,
911 &ctx_resident_ch_fd);
912 break;
913 }
914
915 if (ctx_resident_ch_fd < 0) {
916 args->is_resident_context = 0;
917 } else {
918 args->is_resident_context = 1;
919 args->resident_context_fd = ctx_resident_ch_fd;
920 }
921
922 gk20a_idle(g);
923
924 return err;
925}
926
927static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s,
928 struct nvgpu_dbg_gpu_access_fb_memory_args *args)
929{
930 struct gk20a *g = dbg_s->g;
931 struct dma_buf *dmabuf;
932 void __user *user_buffer = (void __user *)(uintptr_t)args->buffer;
933 void *buffer;
934 u64 size, access_size, offset;
935 u64 access_limit_size = SZ_4K;
936 int err = 0;
937
938 if ((args->offset & 3) || (!args->size) || (args->size & 3))
939 return -EINVAL;
940
941 dmabuf = dma_buf_get(args->dmabuf_fd);
942 if (IS_ERR(dmabuf))
943 return -EINVAL;
944
945 if ((args->offset > dmabuf->size) ||
946 (args->size > dmabuf->size) ||
947 (args->offset + args->size > dmabuf->size)) {
948 err = -EINVAL;
949 goto fail_dmabuf_put;
950 }
951
952 buffer = nvgpu_big_zalloc(g, access_limit_size);
953 if (!buffer) {
954 err = -ENOMEM;
955 goto fail_dmabuf_put;
956 }
957
958 size = args->size;
959 offset = 0;
960
961 err = gk20a_busy(g);
962 if (err)
963 goto fail_free_buffer;
964
965 while (size) {
966 /* Max access size of access_limit_size in one loop */
967 access_size = min(access_limit_size, size);
968
969 if (args->cmd ==
970 NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE) {
971 err = copy_from_user(buffer, user_buffer + offset,
972 access_size);
973 if (err)
974 goto fail_idle;
975 }
976
977 err = nvgpu_vidmem_buf_access_memory(g, dmabuf, buffer,
978 args->offset + offset, access_size,
979 args->cmd);
980 if (err)
981 goto fail_idle;
982
983 if (args->cmd ==
984 NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ) {
985 err = copy_to_user(user_buffer + offset,
986 buffer, access_size);
987 if (err)
988 goto fail_idle;
989 }
990
991 size -= access_size;
992 offset += access_size;
993 }
994
995fail_idle:
996 gk20a_idle(g);
997fail_free_buffer:
998 nvgpu_big_free(g, buffer);
999fail_dmabuf_put:
1000 dma_buf_put(dmabuf);
1001
1002 return err;
1003}
1004
1005long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1006 unsigned long arg)
1007{
1008 struct dbg_session_gk20a *dbg_s = filp->private_data;
1009 struct gk20a *g = dbg_s->g;
1010 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
1011 int err = 0;
1012
1013 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
1014
1015 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
1016 (_IOC_NR(cmd) == 0) ||
1017 (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST) ||
1018 (_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE))
1019 return -EINVAL;
1020
1021 memset(buf, 0, sizeof(buf));
1022 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1023 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1024 return -EFAULT;
1025 }
1026
1027 if (!g->gr.sw_ready) {
1028 err = gk20a_busy(g);
1029 if (err)
1030 return err;
1031
1032 gk20a_idle(g);
1033 }
1034
1035 /* protect from threaded user space calls */
1036 nvgpu_mutex_acquire(&dbg_s->ioctl_lock);
1037
1038 switch (cmd) {
1039 case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL:
1040 err = dbg_bind_channel_gk20a(dbg_s,
1041 (struct nvgpu_dbg_gpu_bind_channel_args *)buf);
1042 break;
1043
1044 case NVGPU_DBG_GPU_IOCTL_REG_OPS:
1045 err = nvgpu_ioctl_channel_reg_ops(dbg_s,
1046 (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf);
1047 break;
1048
1049 case NVGPU_DBG_GPU_IOCTL_POWERGATE:
1050 err = nvgpu_ioctl_powergate_gk20a(dbg_s,
1051 (struct nvgpu_dbg_gpu_powergate_args *)buf);
1052 break;
1053
1054 case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL:
1055 err = gk20a_dbg_gpu_events_ctrl(dbg_s,
1056 (struct nvgpu_dbg_gpu_events_ctrl_args *)buf);
1057 break;
1058
1059 case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE:
1060 err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s,
1061 (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf);
1062 break;
1063
1064 case NVGPU_DBG_GPU_IOCTL_HWPM_CTXSW_MODE:
1065 err = nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(dbg_s,
1066 (struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *)buf);
1067 break;
1068
1069 case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_ALL_SMS:
1070 err = nvgpu_dbg_gpu_ioctl_suspend_resume_sm(dbg_s,
1071 (struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *)buf);
1072 break;
1073
1074 case NVGPU_DBG_GPU_IOCTL_PERFBUF_MAP:
1075 err = gk20a_perfbuf_map(dbg_s,
1076 (struct nvgpu_dbg_gpu_perfbuf_map_args *)buf);
1077 break;
1078
1079 case NVGPU_DBG_GPU_IOCTL_PERFBUF_UNMAP:
1080 err = gk20a_perfbuf_unmap(dbg_s,
1081 (struct nvgpu_dbg_gpu_perfbuf_unmap_args *)buf);
1082 break;
1083
1084 case NVGPU_DBG_GPU_IOCTL_PC_SAMPLING:
1085 err = gk20a_dbg_pc_sampling(dbg_s,
1086 (struct nvgpu_dbg_gpu_pc_sampling_args *)buf);
1087 break;
1088
1089 case NVGPU_DBG_GPU_IOCTL_SET_NEXT_STOP_TRIGGER_TYPE:
1090 err = nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(dbg_s,
1091 (struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *)buf);
1092 break;
1093
1094 case NVGPU_DBG_GPU_IOCTL_TIMEOUT:
1095 err = nvgpu_dbg_gpu_ioctl_timeout(dbg_s,
1096 (struct nvgpu_dbg_gpu_timeout_args *)buf);
1097 break;
1098
1099 case NVGPU_DBG_GPU_IOCTL_GET_TIMEOUT:
1100 nvgpu_dbg_gpu_ioctl_get_timeout(dbg_s,
1101 (struct nvgpu_dbg_gpu_timeout_args *)buf);
1102 break;
1103
1104 case NVGPU_DBG_GPU_IOCTL_READ_SINGLE_SM_ERROR_STATE:
1105 err = nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(dbg_s,
1106 (struct nvgpu_dbg_gpu_read_single_sm_error_state_args *)buf);
1107 break;
1108
1109 case NVGPU_DBG_GPU_IOCTL_CLEAR_SINGLE_SM_ERROR_STATE:
1110 err = nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(dbg_s,
1111 (struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *)buf);
1112 break;
1113
1114 case NVGPU_DBG_GPU_IOCTL_WRITE_SINGLE_SM_ERROR_STATE:
1115 err = nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(dbg_s,
1116 (struct nvgpu_dbg_gpu_write_single_sm_error_state_args *)buf);
1117 break;
1118
1119 case NVGPU_DBG_GPU_IOCTL_UNBIND_CHANNEL:
1120 err = dbg_unbind_channel_gk20a(dbg_s,
1121 (struct nvgpu_dbg_gpu_unbind_channel_args *)buf);
1122 break;
1123
1124 case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_CONTEXTS:
1125 err = nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(dbg_s,
1126 (struct nvgpu_dbg_gpu_suspend_resume_contexts_args *)buf);
1127 break;
1128
1129 case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY:
1130 err = nvgpu_dbg_gpu_ioctl_access_fb_memory(dbg_s,
1131 (struct nvgpu_dbg_gpu_access_fb_memory_args *)buf);
1132 break;
1133
1134 case NVGPU_DBG_GPU_IOCTL_PROFILER_ALLOCATE:
1135 err = nvgpu_ioctl_allocate_profiler_object(dbg_s,
1136 (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf);
1137 break;
1138
1139 case NVGPU_DBG_GPU_IOCTL_PROFILER_FREE:
1140 err = nvgpu_ioctl_free_profiler_object(dbg_s,
1141 (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf);
1142 break;
1143
1144 case NVGPU_DBG_GPU_IOCTL_PROFILER_RESERVE:
1145 err = nvgpu_ioctl_profiler_reserve(dbg_s,
1146 (struct nvgpu_dbg_gpu_profiler_reserve_args *)buf);
1147 break;
1148
1149 default:
1150 nvgpu_err(g,
1151 "unrecognized dbg gpu ioctl cmd: 0x%x",
1152 cmd);
1153 err = -ENOTTY;
1154 break;
1155 }
1156
1157 nvgpu_mutex_release(&dbg_s->ioctl_lock);
1158
1159 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
1160
1161 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1162 err = copy_to_user((void __user *)arg,
1163 buf, _IOC_SIZE(cmd));
1164
1165 return err;
1166}
1167
1168/* In order to perform a context relative op the context has
1169 * to be created already... which would imply that the
1170 * context switch mechanism has already been put in place.
1171 * So by the time we perform such an opertation it should always
1172 * be possible to query for the appropriate context offsets, etc.
1173 *
1174 * But note: while the dbg_gpu bind requires the a channel fd,
1175 * it doesn't require an allocated gr/compute obj at that point...
1176 */
1177static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
1178 struct gr_gk20a *gr)
1179{
1180 int err;
1181
1182 nvgpu_mutex_acquire(&gr->ctx_mutex);
1183 err = !gr->ctx_vars.golden_image_initialized;
1184 nvgpu_mutex_release(&gr->ctx_mutex);
1185 if (err)
1186 return false;
1187 return true;
1188
1189}
1190
1191static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
1192 struct nvgpu_dbg_gpu_exec_reg_ops_args *args)
1193{
1194 int err = 0, powergate_err = 0;
1195 bool is_pg_disabled = false;
1196
1197 struct gk20a *g = dbg_s->g;
1198 struct channel_gk20a *ch;
1199
1200 gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops);
1201
1202 if (args->num_ops > g->gpu_characteristics.reg_ops_limit) {
1203 nvgpu_err(g, "regops limit exceeded");
1204 return -EINVAL;
1205 }
1206
1207 if (args->num_ops == 0) {
1208 /* Nothing to do */
1209 return 0;
1210 }
1211
1212 if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) {
1213 nvgpu_err(g, "reg ops work buffer not allocated");
1214 return -ENODEV;
1215 }
1216
1217 if (!dbg_s->id) {
1218 nvgpu_err(g, "can't call reg_ops on an unbound debugger session");
1219 return -EINVAL;
1220 }
1221
1222 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1223 if (!dbg_s->is_profiler && !ch) {
1224 nvgpu_err(g, "bind a channel before regops for a debugging session");
1225 return -EINVAL;
1226 }
1227
1228 /* be sure that ctx info is in place */
1229 if (!g->is_virtual &&
1230 !gr_context_info_available(dbg_s, &g->gr)) {
1231 nvgpu_err(g, "gr context data not available");
1232 return -ENODEV;
1233 }
1234
1235 /* since exec_reg_ops sends methods to the ucode, it must take the
1236 * global gpu lock to protect against mixing methods from debug sessions
1237 * on other channels */
1238 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1239
1240 if (!dbg_s->is_pg_disabled && !g->is_virtual) {
1241 /* In the virtual case, the server will handle
1242 * disabling/enabling powergating when processing reg ops
1243 */
1244 powergate_err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
1245 NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE);
1246 is_pg_disabled = true;
1247 }
1248
1249 if (!powergate_err) {
1250 u64 ops_offset = 0; /* index offset */
1251
1252 while (ops_offset < args->num_ops && !err) {
1253 const u64 num_ops =
1254 min(args->num_ops - ops_offset,
1255 (u64)(g->dbg_regops_tmp_buf_ops));
1256 const u64 fragment_size =
1257 num_ops * sizeof(g->dbg_regops_tmp_buf[0]);
1258
1259 void __user *const fragment =
1260 (void __user *)(uintptr_t)
1261 (args->ops +
1262 ops_offset * sizeof(g->dbg_regops_tmp_buf[0]));
1263
1264 gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu",
1265 ops_offset, num_ops);
1266
1267 gk20a_dbg_fn("Copying regops from userspace");
1268
1269 if (copy_from_user(g->dbg_regops_tmp_buf,
1270 fragment, fragment_size)) {
1271 nvgpu_err(g, "copy_from_user failed!");
1272 err = -EFAULT;
1273 break;
1274 }
1275
1276 err = g->ops.dbg_session_ops.exec_reg_ops(
1277 dbg_s, g->dbg_regops_tmp_buf, num_ops);
1278
1279 gk20a_dbg_fn("Copying result to userspace");
1280
1281 if (copy_to_user(fragment, g->dbg_regops_tmp_buf,
1282 fragment_size)) {
1283 nvgpu_err(g, "copy_to_user failed!");
1284 err = -EFAULT;
1285 break;
1286 }
1287
1288 ops_offset += num_ops;
1289 }
1290
1291 /* enable powergate, if previously disabled */
1292 if (is_pg_disabled) {
1293 powergate_err =
1294 g->ops.dbg_session_ops.dbg_set_powergate(dbg_s,
1295 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
1296 }
1297 }
1298
1299 nvgpu_mutex_release(&g->dbg_sessions_lock);
1300
1301 if (!err && powergate_err)
1302 err = powergate_err;
1303
1304 if (err)
1305 nvgpu_err(g, "dbg regops failed");
1306
1307 return err;
1308}
1309
1310int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode) 147int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
1311{ 148{
1312 int err = 0; 149 int err = 0;
@@ -1409,273 +246,6 @@ int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode)
1409 return err; 246 return err;
1410} 247}
1411 248
1412static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
1413 struct nvgpu_dbg_gpu_powergate_args *args)
1414{
1415 int err;
1416 struct gk20a *g = dbg_s->g;
1417 gk20a_dbg_fn("%s powergate mode = %d",
1418 g->name, args->mode);
1419
1420 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1421 err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, args->mode);
1422 nvgpu_mutex_release(&g->dbg_sessions_lock);
1423 return err;
1424}
1425
1426static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1427 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args)
1428{
1429 int err;
1430 struct gk20a *g = dbg_s->g;
1431 struct channel_gk20a *ch_gk20a;
1432
1433 gk20a_dbg_fn("%s smpc ctxsw mode = %d",
1434 g->name, args->mode);
1435
1436 err = gk20a_busy(g);
1437 if (err) {
1438 nvgpu_err(g, "failed to poweron");
1439 return err;
1440 }
1441
1442 /* Take the global lock, since we'll be doing global regops */
1443 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1444
1445 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1446 if (!ch_gk20a) {
1447 nvgpu_err(g,
1448 "no bound channel for smpc ctxsw mode update");
1449 err = -EINVAL;
1450 goto clean_up;
1451 }
1452
1453 err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a,
1454 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
1455 if (err) {
1456 nvgpu_err(g,
1457 "error (%d) during smpc ctxsw mode update", err);
1458 goto clean_up;
1459 }
1460
1461 err = g->ops.regops.apply_smpc_war(dbg_s);
1462 clean_up:
1463 nvgpu_mutex_release(&g->dbg_sessions_lock);
1464 gk20a_idle(g);
1465 return err;
1466}
1467
1468static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1469 struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args)
1470{
1471 int err;
1472 struct gk20a *g = dbg_s->g;
1473 struct channel_gk20a *ch_gk20a;
1474
1475 gk20a_dbg_fn("%s pm ctxsw mode = %d",
1476 g->name, args->mode);
1477
1478 /* Must have a valid reservation to enable/disable hwpm cxtsw.
1479 * Just print an error message for now, but eventually this should
1480 * return an error, at the point where all client sw has been
1481 * cleaned up.
1482 */
1483 if (!dbg_s->has_profiler_reservation) {
1484 nvgpu_err(g,
1485 "session doesn't have a valid reservation");
1486 }
1487
1488 err = gk20a_busy(g);
1489 if (err) {
1490 nvgpu_err(g, "failed to poweron");
1491 return err;
1492 }
1493
1494 /* Take the global lock, since we'll be doing global regops */
1495 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1496
1497 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1498 if (!ch_gk20a) {
1499 nvgpu_err(g,
1500 "no bound channel for pm ctxsw mode update");
1501 err = -EINVAL;
1502 goto clean_up;
1503 }
1504
1505 err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a,
1506 args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW);
1507 if (err)
1508 nvgpu_err(g,
1509 "error (%d) during pm ctxsw mode update", err);
1510
1511 /* gk20a would require a WAR to set the core PM_ENABLE bit, not
1512 * added here with gk20a being deprecated
1513 */
1514 clean_up:
1515 nvgpu_mutex_release(&g->dbg_sessions_lock);
1516 gk20a_idle(g);
1517 return err;
1518}
1519
1520static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1521 struct dbg_session_gk20a *dbg_s,
1522 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
1523{
1524 struct gk20a *g = dbg_s->g;
1525 struct channel_gk20a *ch;
1526 int err = 0, action = args->mode;
1527
1528 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
1529
1530 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1531 if (!ch)
1532 return -EINVAL;
1533
1534 err = gk20a_busy(g);
1535 if (err) {
1536 nvgpu_err(g, "failed to poweron");
1537 return err;
1538 }
1539
1540 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1541
1542 /* Suspend GPU context switching */
1543 err = gr_gk20a_disable_ctxsw(g);
1544 if (err) {
1545 nvgpu_err(g, "unable to stop gr ctxsw");
1546 /* this should probably be ctx-fatal... */
1547 goto clean_up;
1548 }
1549
1550 switch (action) {
1551 case NVGPU_DBG_GPU_SUSPEND_ALL_SMS:
1552 gr_gk20a_suspend_context(ch);
1553 break;
1554
1555 case NVGPU_DBG_GPU_RESUME_ALL_SMS:
1556 gr_gk20a_resume_context(ch);
1557 break;
1558 }
1559
1560 err = gr_gk20a_enable_ctxsw(g);
1561 if (err)
1562 nvgpu_err(g, "unable to restart ctxsw!");
1563
1564clean_up:
1565 nvgpu_mutex_release(&g->dbg_sessions_lock);
1566 gk20a_idle(g);
1567
1568 return err;
1569}
1570
1571static int nvgpu_ioctl_allocate_profiler_object(
1572 struct dbg_session_gk20a *dbg_s,
1573 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args)
1574{
1575 int err = 0;
1576 struct gk20a *g = get_gk20a(dbg_s->dev);
1577 struct dbg_profiler_object_data *prof_obj;
1578
1579 gk20a_dbg_fn("%s", g->name);
1580
1581 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1582
1583 err = alloc_profiler(g, &prof_obj);
1584 if (err)
1585 goto clean_up;
1586
1587 prof_obj->session_id = dbg_s->id;
1588
1589 if (dbg_s->is_profiler)
1590 prof_obj->ch = NULL;
1591 else {
1592 prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1593 if (prof_obj->ch == NULL) {
1594 nvgpu_err(g,
1595 "bind a channel for dbg session");
1596 nvgpu_kfree(g, prof_obj);
1597 err = -EINVAL;
1598 goto clean_up;
1599 }
1600 }
1601
1602 /* Return handle to client */
1603 args->profiler_handle = prof_obj->prof_handle;
1604
1605 nvgpu_init_list_node(&prof_obj->prof_obj_entry);
1606
1607 nvgpu_list_add(&prof_obj->prof_obj_entry, &g->profiler_objects);
1608clean_up:
1609 nvgpu_mutex_release(&g->dbg_sessions_lock);
1610 return err;
1611}
1612
1613static int nvgpu_ioctl_free_profiler_object(
1614 struct dbg_session_gk20a *dbg_s,
1615 struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args)
1616{
1617 int err = 0;
1618 struct gk20a *g = get_gk20a(dbg_s->dev);
1619 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
1620 bool obj_found = false;
1621
1622 gk20a_dbg_fn("%s session_id = %d profiler_handle = %x",
1623 g->name, dbg_s->id, args->profiler_handle);
1624
1625 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1626
1627 /* Remove profiler object from the list, if a match is found */
1628 nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects,
1629 dbg_profiler_object_data, prof_obj_entry) {
1630 if (prof_obj->prof_handle == args->profiler_handle) {
1631 if (prof_obj->session_id != dbg_s->id) {
1632 nvgpu_err(g,
1633 "invalid handle %x",
1634 args->profiler_handle);
1635 err = -EINVAL;
1636 break;
1637 }
1638 if (prof_obj->has_reservation)
1639 g->ops.dbg_session_ops.
1640 release_profiler_reservation(dbg_s, prof_obj);
1641 nvgpu_list_del(&prof_obj->prof_obj_entry);
1642 nvgpu_kfree(g, prof_obj);
1643 obj_found = true;
1644 break;
1645 }
1646 }
1647 if (!obj_found) {
1648 nvgpu_err(g, "profiler %x not found",
1649 args->profiler_handle);
1650 err = -EINVAL;
1651 }
1652
1653 nvgpu_mutex_release(&g->dbg_sessions_lock);
1654 return err;
1655}
1656
1657static struct dbg_profiler_object_data *find_matching_prof_obj(
1658 struct dbg_session_gk20a *dbg_s,
1659 u32 profiler_handle)
1660{
1661 struct gk20a *g = dbg_s->g;
1662 struct dbg_profiler_object_data *prof_obj;
1663
1664 nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
1665 dbg_profiler_object_data, prof_obj_entry) {
1666 if (prof_obj->prof_handle == profiler_handle) {
1667 if (prof_obj->session_id != dbg_s->id) {
1668 nvgpu_err(g,
1669 "invalid handle %x",
1670 profiler_handle);
1671 return NULL;
1672 }
1673 return prof_obj;
1674 }
1675 }
1676 return NULL;
1677}
1678
1679bool nvgpu_check_and_set_global_reservation( 249bool nvgpu_check_and_set_global_reservation(
1680 struct dbg_session_gk20a *dbg_s, 250 struct dbg_session_gk20a *dbg_s,
1681 struct dbg_profiler_object_data *prof_obj) 251 struct dbg_profiler_object_data *prof_obj)
@@ -1721,149 +291,6 @@ void nvgpu_release_profiler_reservation(struct dbg_session_gk20a *dbg_s,
1721 g->global_profiler_reservation_held = false; 291 g->global_profiler_reservation_held = false;
1722} 292}
1723 293
1724static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1725 u32 profiler_handle)
1726{
1727 struct gk20a *g = dbg_s->g;
1728 struct dbg_profiler_object_data *prof_obj, *my_prof_obj;
1729 int err = 0;
1730
1731 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle);
1732
1733 if (g->profiler_reservation_count < 0) {
1734 nvgpu_err(g, "Negative reservation count!");
1735 return -EINVAL;
1736 }
1737
1738 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1739
1740 /* Find matching object. */
1741 my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
1742
1743 if (!my_prof_obj) {
1744 nvgpu_err(g, "object not found");
1745 err = -EINVAL;
1746 goto exit;
1747 }
1748
1749 /* If we already have the reservation, we're done */
1750 if (my_prof_obj->has_reservation) {
1751 err = 0;
1752 goto exit;
1753 }
1754
1755 if (my_prof_obj->ch == NULL) {
1756 /* Global reservations are only allowed if there are no other
1757 * global or per-context reservations currently held
1758 */
1759 if (!g->ops.dbg_session_ops.check_and_set_global_reservation(
1760 dbg_s, my_prof_obj)) {
1761 nvgpu_err(g,
1762 "global reserve: have existing reservation");
1763 err = -EBUSY;
1764 }
1765 } else if (g->global_profiler_reservation_held) {
1766 /* If there's a global reservation,
1767 * we can't take a per-context one.
1768 */
1769 nvgpu_err(g,
1770 "per-ctxt reserve: global reservation in effect");
1771 err = -EBUSY;
1772 } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) {
1773 /* TSG: check that another channel in the TSG
1774 * doesn't already have the reservation
1775 */
1776 int my_tsgid = my_prof_obj->ch->tsgid;
1777
1778 nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
1779 dbg_profiler_object_data, prof_obj_entry) {
1780 if (prof_obj->has_reservation &&
1781 (prof_obj->ch->tsgid == my_tsgid)) {
1782 nvgpu_err(g,
1783 "per-ctxt reserve (tsg): already reserved");
1784 err = -EBUSY;
1785 goto exit;
1786 }
1787 }
1788
1789 if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
1790 dbg_s, my_prof_obj)) {
1791 /* Another guest OS has the global reservation */
1792 nvgpu_err(g,
1793 "per-ctxt reserve: global reservation in effect");
1794 err = -EBUSY;
1795 }
1796 } else {
1797 /* channel: check that some other profiler object doesn't
1798 * already have the reservation.
1799 */
1800 struct channel_gk20a *my_ch = my_prof_obj->ch;
1801
1802 nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects,
1803 dbg_profiler_object_data, prof_obj_entry) {
1804 if (prof_obj->has_reservation &&
1805 (prof_obj->ch == my_ch)) {
1806 nvgpu_err(g,
1807 "per-ctxt reserve (ch): already reserved");
1808 err = -EBUSY;
1809 goto exit;
1810 }
1811 }
1812
1813 if (!g->ops.dbg_session_ops.check_and_set_context_reservation(
1814 dbg_s, my_prof_obj)) {
1815 /* Another guest OS has the global reservation */
1816 nvgpu_err(g,
1817 "per-ctxt reserve: global reservation in effect");
1818 err = -EBUSY;
1819 }
1820 }
1821exit:
1822 nvgpu_mutex_release(&g->dbg_sessions_lock);
1823 return err;
1824}
1825
1826static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
1827 u32 profiler_handle)
1828{
1829 struct gk20a *g = dbg_s->g;
1830 struct dbg_profiler_object_data *prof_obj;
1831 int err = 0;
1832
1833 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle);
1834
1835 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1836
1837 /* Find matching object. */
1838 prof_obj = find_matching_prof_obj(dbg_s, profiler_handle);
1839
1840 if (!prof_obj) {
1841 nvgpu_err(g, "object not found");
1842 err = -EINVAL;
1843 goto exit;
1844 }
1845
1846 if (prof_obj->has_reservation)
1847 g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj);
1848 else {
1849 nvgpu_err(g, "No reservation found");
1850 err = -EINVAL;
1851 goto exit;
1852 }
1853exit:
1854 nvgpu_mutex_release(&g->dbg_sessions_lock);
1855 return err;
1856}
1857
1858static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s,
1859 struct nvgpu_dbg_gpu_profiler_reserve_args *args)
1860{
1861 if (args->acquire)
1862 return nvgpu_profiler_reserve_acquire(dbg_s, args->profiler_handle);
1863
1864 return nvgpu_profiler_reserve_release(dbg_s, args->profiler_handle);
1865}
1866
1867int gk20a_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size) 294int gk20a_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size)
1868{ 295{
1869 struct mm_gk20a *mm = &g->mm; 296 struct mm_gk20a *mm = &g->mm;
@@ -1909,75 +336,6 @@ int gk20a_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size)
1909 return 0; 336 return 0;
1910} 337}
1911 338
1912static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s,
1913 struct nvgpu_dbg_gpu_perfbuf_map_args *args)
1914{
1915 struct gk20a *g = dbg_s->g;
1916 struct mm_gk20a *mm = &g->mm;
1917 int err;
1918 u32 virt_size;
1919 u32 big_page_size;
1920
1921 if (!g->ops.dbg_session_ops.perfbuffer_enable)
1922 return -ENOSYS;
1923
1924 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1925
1926 big_page_size = g->ops.mm.get_default_big_page_size();
1927
1928 if (g->perfbuf.owner) {
1929 nvgpu_mutex_release(&g->dbg_sessions_lock);
1930 return -EBUSY;
1931 }
1932
1933 mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size,
1934 big_page_size << 10,
1935 NV_MM_DEFAULT_KERNEL_SIZE,
1936 NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE,
1937 false, false, "perfbuf");
1938 if (!mm->perfbuf.vm) {
1939 nvgpu_mutex_release(&g->dbg_sessions_lock);
1940 return -ENOMEM;
1941 }
1942
1943 err = nvgpu_vm_map_buffer(mm->perfbuf.vm,
1944 args->dmabuf_fd,
1945 &args->offset,
1946 0,
1947 0,
1948 0,
1949 0,
1950 args->mapping_size,
1951 NULL);
1952 if (err)
1953 goto err_remove_vm;
1954
1955 /* perf output buffer may not cross a 4GB boundary */
1956 virt_size = u64_lo32(args->mapping_size);
1957 if (u64_hi32(args->offset) != u64_hi32(args->offset + virt_size)) {
1958 err = -EINVAL;
1959 goto err_unmap;
1960 }
1961
1962 err = g->ops.dbg_session_ops.perfbuffer_enable(g,
1963 args->offset, virt_size);
1964 if (err)
1965 goto err_unmap;
1966
1967 g->perfbuf.owner = dbg_s;
1968 g->perfbuf.offset = args->offset;
1969 nvgpu_mutex_release(&g->dbg_sessions_lock);
1970
1971 return 0;
1972
1973err_unmap:
1974 nvgpu_vm_unmap_buffer(mm->perfbuf.vm, args->offset, NULL);
1975err_remove_vm:
1976 nvgpu_vm_put(mm->perfbuf.vm);
1977 nvgpu_mutex_release(&g->dbg_sessions_lock);
1978 return err;
1979}
1980
1981/* must be called with dbg_sessions_lock held */ 339/* must be called with dbg_sessions_lock held */
1982int gk20a_perfbuf_disable_locked(struct gk20a *g) 340int gk20a_perfbuf_disable_locked(struct gk20a *g)
1983{ 341{
@@ -2001,43 +359,3 @@ int gk20a_perfbuf_disable_locked(struct gk20a *g)
2001 359
2002 return 0; 360 return 0;
2003} 361}
2004
2005static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset)
2006{
2007 struct mm_gk20a *mm = &g->mm;
2008 struct vm_gk20a *vm = mm->perfbuf.vm;
2009 int err;
2010
2011 err = g->ops.dbg_session_ops.perfbuffer_disable(g);
2012
2013 nvgpu_vm_unmap_buffer(vm, offset, NULL);
2014 gk20a_free_inst_block(g, &mm->perfbuf.inst_block);
2015 nvgpu_vm_put(vm);
2016
2017 g->perfbuf.owner = NULL;
2018 g->perfbuf.offset = 0;
2019 return err;
2020}
2021
2022static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
2023 struct nvgpu_dbg_gpu_perfbuf_unmap_args *args)
2024{
2025 struct gk20a *g = dbg_s->g;
2026 int err;
2027
2028 if (!g->ops.dbg_session_ops.perfbuffer_disable)
2029 return -ENOSYS;
2030
2031 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
2032 if ((g->perfbuf.owner != dbg_s) ||
2033 (g->perfbuf.offset != args->offset)) {
2034 nvgpu_mutex_release(&g->dbg_sessions_lock);
2035 return -EINVAL;
2036 }
2037
2038 err = gk20a_perfbuf_release_locked(g, args->offset);
2039
2040 nvgpu_mutex_release(&g->dbg_sessions_lock);
2041
2042 return err;
2043}
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h
index 1a6de3a8..d50ce844 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h
@@ -21,18 +21,10 @@
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24#ifndef DBG_GPU_GK20A_H 24#ifndef DBG_GPU_H
25#define DBG_GPU_GK20A_H 25#define DBG_GPU_H
26#include <linux/poll.h>
27 26
28/* module debug driver interface */ 27#include <nvgpu/cond.h>
29int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp);
30int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp);
31long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
32unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait);
33
34/* used by profiler driver interface */
35int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp);
36 28
37/* used by the interrupt handler to post events */ 29/* used by the interrupt handler to post events */
38void gk20a_dbg_gpu_post_events(struct channel_gk20a *fault_ch); 30void gk20a_dbg_gpu_post_events(struct channel_gk20a *fault_ch);
@@ -70,8 +62,6 @@ struct dbg_session_gk20a {
70 struct regops_whitelist *global; 62 struct regops_whitelist *global;
71 struct regops_whitelist *per_context; 63 struct regops_whitelist *per_context;
72 64
73 /* gpu module vagaries */
74 struct device *dev;
75 struct gk20a *g; 65 struct gk20a *g;
76 66
77 /* list of bound channels, if any */ 67 /* list of bound channels, if any */
@@ -99,18 +89,12 @@ dbg_session_data_from_dbg_s_entry(struct nvgpu_list_node *node)
99}; 89};
100 90
101struct dbg_session_channel_data { 91struct dbg_session_channel_data {
102 /*
103 * We have to keep a ref to the _file_, not the channel, because
104 * close(channel_fd) is synchronous and would deadlock if we had an
105 * open debug session fd holding a channel ref at that time. Holding a
106 * ref to the file makes close(channel_fd) just drop a kernel ref to
107 * the file; the channel will close when the last file ref is dropped.
108 */
109 struct file *ch_f;
110 int channel_fd; 92 int channel_fd;
111 int chid; 93 int chid;
112 struct nvgpu_list_node ch_entry; 94 struct nvgpu_list_node ch_entry;
113 struct dbg_session_data *session_data; 95 struct dbg_session_data *session_data;
96 int (*unbind_single_channel)(struct dbg_session_gk20a *dbg_s,
97 struct dbg_session_channel_data *ch_data);
114}; 98};
115 99
116static inline struct dbg_session_channel_data * 100static inline struct dbg_session_channel_data *
@@ -135,9 +119,6 @@ dbg_profiler_object_data_from_prof_obj_entry(struct nvgpu_list_node *node)
135 ((uintptr_t)node - offsetof(struct dbg_profiler_object_data, prof_obj_entry)); 119 ((uintptr_t)node - offsetof(struct dbg_profiler_object_data, prof_obj_entry));
136}; 120};
137 121
138int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
139 struct dbg_session_channel_data *ch_data);
140
141bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch); 122bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch);
142int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch); 123int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch);
143 124
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index be8e0f0a..e7aeaa54 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -506,8 +506,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
506 err = gr_gk20a_exec_ctx_ops(ch, ops, num_ops, 506 err = gr_gk20a_exec_ctx_ops(ch, ops, num_ops,
507 ctx_wr_count, ctx_rd_count); 507 ctx_wr_count, ctx_rd_count);
508 if (err) { 508 if (err) {
509 dev_warn(dbg_s->dev, 509 nvgpu_warn(g, "failed to perform ctx ops\n");
510 "failed to perform ctx ops\n");
511 goto clean_up; 510 goto clean_up;
512 } 511 }
513 } 512 }