diff options
Diffstat (limited to 'include/os/linux/ioctl_dbg.c')
-rw-r--r-- | include/os/linux/ioctl_dbg.c | 2210 |
1 files changed, 2210 insertions, 0 deletions
diff --git a/include/os/linux/ioctl_dbg.c b/include/os/linux/ioctl_dbg.c new file mode 100644 index 0000000..b5a1071 --- /dev/null +++ b/include/os/linux/ioctl_dbg.c | |||
@@ -0,0 +1,2210 @@ | |||
1 | /* | ||
2 | * Tegra GK20A GPU Debugger/Profiler Driver | ||
3 | * | ||
4 | * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/fs.h> | ||
20 | #include <linux/file.h> | ||
21 | #include <linux/cdev.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/dma-buf.h> | ||
24 | #include <linux/poll.h> | ||
25 | #include <uapi/linux/nvgpu.h> | ||
26 | |||
27 | #include <nvgpu/kmem.h> | ||
28 | #include <nvgpu/log.h> | ||
29 | #include <nvgpu/vm.h> | ||
30 | #include <nvgpu/atomic.h> | ||
31 | #include <nvgpu/cond.h> | ||
32 | #include <nvgpu/utils.h> | ||
33 | #include <nvgpu/gk20a.h> | ||
34 | #include <nvgpu/channel.h> | ||
35 | #include <nvgpu/tsg.h> | ||
36 | |||
37 | #include <nvgpu/linux/vm.h> | ||
38 | |||
39 | #include "gk20a/gr_gk20a.h" | ||
40 | #include "gk20a/regops_gk20a.h" | ||
41 | #include "gk20a/dbg_gpu_gk20a.h" | ||
42 | #include "os_linux.h" | ||
43 | #include "platform_gk20a.h" | ||
44 | #include "ioctl_dbg.h" | ||
45 | #include "ioctl_channel.h" | ||
46 | #include "dmabuf_vidmem.h" | ||
47 | |||
48 | struct dbg_session_gk20a_linux { | ||
49 | struct device *dev; | ||
50 | struct dbg_session_gk20a dbg_s; | ||
51 | }; | ||
52 | |||
53 | struct dbg_session_channel_data_linux { | ||
54 | /* | ||
55 | * We have to keep a ref to the _file_, not the channel, because | ||
56 | * close(channel_fd) is synchronous and would deadlock if we had an | ||
57 | * open debug session fd holding a channel ref at that time. Holding a | ||
58 | * ref to the file makes close(channel_fd) just drop a kernel ref to | ||
59 | * the file; the channel will close when the last file ref is dropped. | ||
60 | */ | ||
61 | struct file *ch_f; | ||
62 | struct dbg_session_channel_data ch_data; | ||
63 | }; | ||
64 | /* turn seriously unwieldy names -> something shorter */ | ||
65 | #define REGOP_LINUX(x) NVGPU_DBG_GPU_REG_OP_##x | ||
66 | |||
67 | /* silly allocator - just increment id */ | ||
68 | static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0); | ||
69 | static int generate_unique_id(void) | ||
70 | { | ||
71 | return nvgpu_atomic_add_return(1, &unique_id); | ||
72 | } | ||
73 | |||
74 | static int alloc_profiler(struct gk20a *g, | ||
75 | struct dbg_profiler_object_data **_prof) | ||
76 | { | ||
77 | struct dbg_profiler_object_data *prof; | ||
78 | *_prof = NULL; | ||
79 | |||
80 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
81 | |||
82 | prof = nvgpu_kzalloc(g, sizeof(*prof)); | ||
83 | if (!prof) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | prof->prof_handle = generate_unique_id(); | ||
87 | *_prof = prof; | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_s_linux) | ||
92 | { | ||
93 | struct dbg_session_gk20a_linux *dbg_s_linux; | ||
94 | *_dbg_s_linux = NULL; | ||
95 | |||
96 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
97 | |||
98 | dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux)); | ||
99 | if (!dbg_s_linux) | ||
100 | return -ENOMEM; | ||
101 | |||
102 | dbg_s_linux->dbg_s.id = generate_unique_id(); | ||
103 | *_dbg_s_linux = dbg_s_linux; | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset); | ||
108 | |||
109 | static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | ||
110 | struct nvgpu_dbg_gpu_exec_reg_ops_args *args); | ||
111 | |||
112 | static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, | ||
113 | struct nvgpu_dbg_gpu_powergate_args *args); | ||
114 | |||
115 | static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
116 | struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args); | ||
117 | |||
118 | static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
119 | struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args); | ||
120 | |||
121 | static int nvgpu_dbg_gpu_ioctl_set_mmu_debug_mode( | ||
122 | struct dbg_session_gk20a *dbg_s, | ||
123 | struct nvgpu_dbg_gpu_set_ctx_mmu_debug_mode_args *args); | ||
124 | |||
125 | static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | ||
126 | struct dbg_session_gk20a *dbg_s, | ||
127 | struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args); | ||
128 | |||
129 | static int nvgpu_ioctl_allocate_profiler_object(struct dbg_session_gk20a_linux *dbg_s, | ||
130 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args); | ||
131 | |||
132 | static int nvgpu_ioctl_free_profiler_object(struct dbg_session_gk20a_linux *dbg_s_linux, | ||
133 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args); | ||
134 | |||
135 | static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s, | ||
136 | struct nvgpu_dbg_gpu_profiler_reserve_args *args); | ||
137 | |||
138 | static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, | ||
139 | struct nvgpu_dbg_gpu_perfbuf_map_args *args); | ||
140 | |||
141 | static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s, | ||
142 | struct nvgpu_dbg_gpu_perfbuf_unmap_args *args); | ||
143 | |||
144 | static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s, | ||
145 | int timeout_mode); | ||
146 | |||
147 | static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | ||
148 | u32 profiler_handle); | ||
149 | |||
150 | static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s); | ||
151 | |||
152 | static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s); | ||
153 | |||
154 | static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, | ||
155 | u32 profiler_handle); | ||
156 | |||
157 | static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s); | ||
158 | |||
159 | static int gk20a_dbg_gpu_do_dev_open(struct inode *inode, | ||
160 | struct file *filp, bool is_profiler); | ||
161 | |||
162 | static int nvgpu_set_sm_exception_type_mask_locked( | ||
163 | struct dbg_session_gk20a *dbg_s, | ||
164 | u32 exception_mask); | ||
165 | |||
166 | unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait) | ||
167 | { | ||
168 | unsigned int mask = 0; | ||
169 | struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data; | ||
170 | struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; | ||
171 | struct gk20a *g = dbg_s->g; | ||
172 | |||
173 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
174 | |||
175 | poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait); | ||
176 | |||
177 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
178 | |||
179 | if (dbg_s->dbg_events.events_enabled && | ||
180 | dbg_s->dbg_events.num_pending_events > 0) { | ||
181 | nvgpu_log(g, gpu_dbg_gpu_dbg, "found pending event on session id %d", | ||
182 | dbg_s->id); | ||
183 | nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending", | ||
184 | dbg_s->dbg_events.num_pending_events); | ||
185 | mask = (POLLPRI | POLLIN); | ||
186 | } | ||
187 | |||
188 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
189 | |||
190 | return mask; | ||
191 | } | ||
192 | |||
193 | int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp) | ||
194 | { | ||
195 | struct dbg_session_gk20a_linux *dbg_session_linux = filp->private_data; | ||
196 | struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; | ||
197 | struct gk20a *g = dbg_s->g; | ||
198 | struct dbg_profiler_object_data *prof_obj, *tmp_obj; | ||
199 | |||
200 | nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name); | ||
201 | |||
202 | /* unbind channels */ | ||
203 | dbg_unbind_all_channels_gk20a(dbg_s); | ||
204 | |||
205 | /* Powergate/Timeout enable is called here as possibility of dbg_session | ||
206 | * which called powergate/timeout disable ioctl, to be killed without | ||
207 | * calling powergate/timeout enable ioctl | ||
208 | */ | ||
209 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
210 | if (dbg_s->is_pg_disabled) { | ||
211 | nvgpu_set_powergate_locked(dbg_s, false); | ||
212 | } | ||
213 | nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE); | ||
214 | |||
215 | /* If this session owned the perf buffer, release it */ | ||
216 | if (g->perfbuf.owner == dbg_s) | ||
217 | gk20a_perfbuf_release_locked(g, g->perfbuf.offset); | ||
218 | |||
219 | /* Per-context profiler objects were released when we called | ||
220 | * dbg_unbind_all_channels. We could still have global ones. | ||
221 | */ | ||
222 | nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, | ||
223 | dbg_profiler_object_data, prof_obj_entry) { | ||
224 | if (prof_obj->session_id == dbg_s->id) { | ||
225 | if (prof_obj->has_reservation) | ||
226 | g->ops.dbg_session_ops. | ||
227 | release_profiler_reservation(dbg_s, prof_obj); | ||
228 | nvgpu_list_del(&prof_obj->prof_obj_entry); | ||
229 | nvgpu_kfree(g, prof_obj); | ||
230 | } | ||
231 | } | ||
232 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
233 | |||
234 | nvgpu_mutex_destroy(&dbg_s->ch_list_lock); | ||
235 | nvgpu_mutex_destroy(&dbg_s->ioctl_lock); | ||
236 | |||
237 | nvgpu_kfree(g, dbg_session_linux); | ||
238 | gk20a_put(g); | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp) | ||
244 | { | ||
245 | struct nvgpu_os_linux *l = container_of(inode->i_cdev, | ||
246 | struct nvgpu_os_linux, prof.cdev); | ||
247 | struct gk20a *g = &l->g; | ||
248 | |||
249 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
250 | return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */); | ||
251 | } | ||
252 | |||
253 | static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s, | ||
254 | struct nvgpu_dbg_gpu_timeout_args *args) | ||
255 | { | ||
256 | int err; | ||
257 | struct gk20a *g = dbg_s->g; | ||
258 | |||
259 | nvgpu_log(g, gpu_dbg_fn, "timeout enable/disable = %d", args->enable); | ||
260 | |||
261 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
262 | err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); | ||
263 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
264 | |||
265 | return err; | ||
266 | } | ||
267 | |||
268 | static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state( | ||
269 | struct dbg_session_gk20a *dbg_s, | ||
270 | struct nvgpu_dbg_gpu_read_single_sm_error_state_args *args) | ||
271 | { | ||
272 | struct gk20a *g = dbg_s->g; | ||
273 | struct gr_gk20a *gr = &g->gr; | ||
274 | struct nvgpu_tsg_sm_error_state *sm_error_state; | ||
275 | struct nvgpu_dbg_gpu_sm_error_state_record sm_error_state_record; | ||
276 | struct channel_gk20a *ch; | ||
277 | struct tsg_gk20a *tsg; | ||
278 | u32 sm_id; | ||
279 | int err = 0; | ||
280 | |||
281 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
282 | if (ch == NULL) { | ||
283 | return -EINVAL; | ||
284 | } | ||
285 | |||
286 | tsg = tsg_gk20a_from_ch(ch); | ||
287 | if (tsg == NULL) { | ||
288 | nvgpu_err(g, "no valid tsg from ch"); | ||
289 | return -EINVAL; | ||
290 | } | ||
291 | |||
292 | sm_id = args->sm_id; | ||
293 | if (sm_id >= gr->no_of_sm) { | ||
294 | return -EINVAL; | ||
295 | } | ||
296 | |||
297 | if (tsg->sm_error_states == NULL) { | ||
298 | return -EINVAL; | ||
299 | } | ||
300 | |||
301 | nvgpu_speculation_barrier(); | ||
302 | |||
303 | sm_error_state = tsg->sm_error_states + sm_id; | ||
304 | sm_error_state_record.hww_global_esr = | ||
305 | sm_error_state->hww_global_esr; | ||
306 | sm_error_state_record.hww_warp_esr = | ||
307 | sm_error_state->hww_warp_esr; | ||
308 | sm_error_state_record.hww_warp_esr_pc = | ||
309 | sm_error_state->hww_warp_esr_pc; | ||
310 | sm_error_state_record.hww_global_esr_report_mask = | ||
311 | sm_error_state->hww_global_esr_report_mask; | ||
312 | sm_error_state_record.hww_warp_esr_report_mask = | ||
313 | sm_error_state->hww_warp_esr_report_mask; | ||
314 | |||
315 | if (args->sm_error_state_record_size > 0) { | ||
316 | size_t write_size = sizeof(*sm_error_state); | ||
317 | |||
318 | nvgpu_speculation_barrier(); | ||
319 | if (write_size > args->sm_error_state_record_size) | ||
320 | write_size = args->sm_error_state_record_size; | ||
321 | |||
322 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
323 | err = copy_to_user((void __user *)(uintptr_t) | ||
324 | args->sm_error_state_record_mem, | ||
325 | &sm_error_state_record, | ||
326 | write_size); | ||
327 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
328 | if (err != 0) { | ||
329 | nvgpu_err(g, "copy_to_user failed!"); | ||
330 | return err; | ||
331 | } | ||
332 | |||
333 | args->sm_error_state_record_size = write_size; | ||
334 | } | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | |||
339 | |||
340 | static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type( | ||
341 | struct dbg_session_gk20a *dbg_s, | ||
342 | struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args) | ||
343 | { | ||
344 | struct gk20a *g = dbg_s->g; | ||
345 | |||
346 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
347 | |||
348 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
349 | |||
350 | dbg_s->broadcast_stop_trigger = (args->broadcast != 0); | ||
351 | |||
352 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
353 | |||
354 | return 0; | ||
355 | } | ||
356 | |||
357 | static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s, | ||
358 | int timeout_mode) | ||
359 | { | ||
360 | struct gk20a *g = dbg_s->g; | ||
361 | int err = 0; | ||
362 | |||
363 | nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts mode requested : %d", | ||
364 | timeout_mode); | ||
365 | |||
366 | nvgpu_speculation_barrier(); | ||
367 | switch (timeout_mode) { | ||
368 | case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE: | ||
369 | if (dbg_s->is_timeout_disabled == true) | ||
370 | nvgpu_atomic_dec(&g->timeouts_disabled_refcount); | ||
371 | dbg_s->is_timeout_disabled = false; | ||
372 | break; | ||
373 | |||
374 | case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE: | ||
375 | if (dbg_s->is_timeout_disabled == false) | ||
376 | nvgpu_atomic_inc(&g->timeouts_disabled_refcount); | ||
377 | dbg_s->is_timeout_disabled = true; | ||
378 | break; | ||
379 | |||
380 | default: | ||
381 | nvgpu_err(g, | ||
382 | "unrecognized dbg gpu timeout mode : 0x%x", | ||
383 | timeout_mode); | ||
384 | err = -EINVAL; | ||
385 | break; | ||
386 | } | ||
387 | |||
388 | if (!err) | ||
389 | nvgpu_log(g, gpu_dbg_gpu_dbg, "dbg is timeout disabled %s, " | ||
390 | "timeouts disabled refcount %d", | ||
391 | dbg_s->is_timeout_disabled ? "true" : "false", | ||
392 | nvgpu_atomic_read(&g->timeouts_disabled_refcount)); | ||
393 | return err; | ||
394 | } | ||
395 | |||
396 | static int gk20a_dbg_gpu_do_dev_open(struct inode *inode, | ||
397 | struct file *filp, bool is_profiler) | ||
398 | { | ||
399 | struct nvgpu_os_linux *l; | ||
400 | struct dbg_session_gk20a_linux *dbg_session_linux; | ||
401 | struct dbg_session_gk20a *dbg_s; | ||
402 | struct gk20a *g; | ||
403 | |||
404 | struct device *dev; | ||
405 | |||
406 | int err; | ||
407 | |||
408 | if (!is_profiler) | ||
409 | l = container_of(inode->i_cdev, | ||
410 | struct nvgpu_os_linux, dbg.cdev); | ||
411 | else | ||
412 | l = container_of(inode->i_cdev, | ||
413 | struct nvgpu_os_linux, prof.cdev); | ||
414 | g = gk20a_get(&l->g); | ||
415 | if (!g) | ||
416 | return -ENODEV; | ||
417 | |||
418 | dev = dev_from_gk20a(g); | ||
419 | |||
420 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name); | ||
421 | |||
422 | err = alloc_session(g, &dbg_session_linux); | ||
423 | if (err) | ||
424 | goto free_ref; | ||
425 | |||
426 | dbg_s = &dbg_session_linux->dbg_s; | ||
427 | |||
428 | filp->private_data = dbg_session_linux; | ||
429 | dbg_session_linux->dev = dev; | ||
430 | dbg_s->g = g; | ||
431 | dbg_s->is_profiler = is_profiler; | ||
432 | dbg_s->is_pg_disabled = false; | ||
433 | dbg_s->is_timeout_disabled = false; | ||
434 | |||
435 | nvgpu_cond_init(&dbg_s->dbg_events.wait_queue); | ||
436 | nvgpu_init_list_node(&dbg_s->ch_list); | ||
437 | err = nvgpu_mutex_init(&dbg_s->ch_list_lock); | ||
438 | if (err) | ||
439 | goto err_free_session; | ||
440 | err = nvgpu_mutex_init(&dbg_s->ioctl_lock); | ||
441 | if (err) | ||
442 | goto err_destroy_lock; | ||
443 | dbg_s->dbg_events.events_enabled = false; | ||
444 | dbg_s->dbg_events.num_pending_events = 0; | ||
445 | |||
446 | return 0; | ||
447 | |||
448 | err_destroy_lock: | ||
449 | nvgpu_mutex_destroy(&dbg_s->ch_list_lock); | ||
450 | err_free_session: | ||
451 | nvgpu_kfree(g, dbg_session_linux); | ||
452 | free_ref: | ||
453 | gk20a_put(g); | ||
454 | return err; | ||
455 | } | ||
456 | |||
457 | void nvgpu_dbg_session_post_event(struct dbg_session_gk20a *dbg_s) | ||
458 | { | ||
459 | nvgpu_cond_broadcast_interruptible(&dbg_s->dbg_events.wait_queue); | ||
460 | } | ||
461 | |||
462 | static int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
463 | struct dbg_session_channel_data *ch_data) | ||
464 | { | ||
465 | struct gk20a *g = dbg_s->g; | ||
466 | u32 chid; | ||
467 | struct dbg_session_data *session_data; | ||
468 | struct dbg_profiler_object_data *prof_obj, *tmp_obj; | ||
469 | struct dbg_session_channel_data_linux *ch_data_linux; | ||
470 | |||
471 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
472 | |||
473 | chid = ch_data->chid; | ||
474 | |||
475 | /* If there's a profiler ctx reservation record associated with this | ||
476 | * session/channel pair, release it. | ||
477 | */ | ||
478 | nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, | ||
479 | dbg_profiler_object_data, prof_obj_entry) { | ||
480 | if ((prof_obj->session_id == dbg_s->id) && | ||
481 | (prof_obj->ch->chid == chid)) { | ||
482 | if (prof_obj->has_reservation) { | ||
483 | g->ops.dbg_session_ops. | ||
484 | release_profiler_reservation(dbg_s, prof_obj); | ||
485 | } | ||
486 | nvgpu_list_del(&prof_obj->prof_obj_entry); | ||
487 | nvgpu_kfree(g, prof_obj); | ||
488 | } | ||
489 | } | ||
490 | |||
491 | nvgpu_list_del(&ch_data->ch_entry); | ||
492 | |||
493 | session_data = ch_data->session_data; | ||
494 | nvgpu_list_del(&session_data->dbg_s_entry); | ||
495 | nvgpu_kfree(dbg_s->g, session_data); | ||
496 | |||
497 | ch_data_linux = container_of(ch_data, struct dbg_session_channel_data_linux, | ||
498 | ch_data); | ||
499 | |||
500 | fput(ch_data_linux->ch_f); | ||
501 | nvgpu_kfree(dbg_s->g, ch_data_linux); | ||
502 | |||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
507 | struct nvgpu_dbg_gpu_bind_channel_args *args) | ||
508 | { | ||
509 | struct file *f; | ||
510 | struct gk20a *g = dbg_s->g; | ||
511 | struct channel_gk20a *ch; | ||
512 | struct dbg_session_channel_data_linux *ch_data_linux; | ||
513 | struct dbg_session_data *session_data; | ||
514 | int err = 0; | ||
515 | |||
516 | nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", | ||
517 | g->name, args->channel_fd); | ||
518 | |||
519 | /* | ||
520 | * Although gk20a_get_channel_from_file gives us a channel ref, need to | ||
521 | * hold a ref to the file during the session lifetime. See comment in | ||
522 | * struct dbg_session_channel_data. | ||
523 | */ | ||
524 | f = fget(args->channel_fd); | ||
525 | if (!f) | ||
526 | return -ENODEV; | ||
527 | |||
528 | ch = gk20a_get_channel_from_file(args->channel_fd); | ||
529 | if (!ch) { | ||
530 | nvgpu_log_fn(g, "no channel found for fd"); | ||
531 | err = -EINVAL; | ||
532 | goto out_fput; | ||
533 | } | ||
534 | |||
535 | nvgpu_log_fn(g, "%s hwchid=%d", g->name, ch->chid); | ||
536 | |||
537 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
538 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | ||
539 | |||
540 | ch_data_linux = nvgpu_kzalloc(g, sizeof(*ch_data_linux)); | ||
541 | if (!ch_data_linux) { | ||
542 | err = -ENOMEM; | ||
543 | goto out_chput; | ||
544 | } | ||
545 | ch_data_linux->ch_f = f; | ||
546 | ch_data_linux->ch_data.channel_fd = args->channel_fd; | ||
547 | ch_data_linux->ch_data.chid = ch->chid; | ||
548 | ch_data_linux->ch_data.unbind_single_channel = dbg_unbind_single_channel_gk20a; | ||
549 | nvgpu_init_list_node(&ch_data_linux->ch_data.ch_entry); | ||
550 | |||
551 | session_data = nvgpu_kzalloc(g, sizeof(*session_data)); | ||
552 | if (!session_data) { | ||
553 | err = -ENOMEM; | ||
554 | goto out_kfree; | ||
555 | } | ||
556 | session_data->dbg_s = dbg_s; | ||
557 | nvgpu_init_list_node(&session_data->dbg_s_entry); | ||
558 | ch_data_linux->ch_data.session_data = session_data; | ||
559 | |||
560 | nvgpu_list_add(&session_data->dbg_s_entry, &ch->dbg_s_list); | ||
561 | |||
562 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
563 | nvgpu_list_add_tail(&ch_data_linux->ch_data.ch_entry, &dbg_s->ch_list); | ||
564 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
565 | |||
566 | nvgpu_mutex_release(&ch->dbg_s_lock); | ||
567 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
568 | |||
569 | gk20a_channel_put(ch); | ||
570 | |||
571 | return 0; | ||
572 | |||
573 | out_kfree: | ||
574 | nvgpu_kfree(g, ch_data_linux); | ||
575 | out_chput: | ||
576 | gk20a_channel_put(ch); | ||
577 | nvgpu_mutex_release(&ch->dbg_s_lock); | ||
578 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
579 | out_fput: | ||
580 | fput(f); | ||
581 | return err; | ||
582 | } | ||
583 | |||
584 | static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s) | ||
585 | { | ||
586 | struct dbg_session_channel_data *ch_data, *tmp; | ||
587 | struct gk20a *g = dbg_s->g; | ||
588 | |||
589 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
590 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
591 | nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, | ||
592 | dbg_session_channel_data, ch_entry) | ||
593 | ch_data->unbind_single_channel(dbg_s, ch_data); | ||
594 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
595 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
596 | |||
597 | return 0; | ||
598 | } | ||
599 | |||
600 | /* | ||
601 | * Convert common regops op values of the form of NVGPU_DBG_REG_OP_* | ||
602 | * into linux regops op values of the form of NVGPU_DBG_GPU_REG_OP_* | ||
603 | */ | ||
604 | static u32 nvgpu_get_regops_op_values_linux(u32 regops_op) | ||
605 | { | ||
606 | switch (regops_op) { | ||
607 | case REGOP(READ_32): | ||
608 | return REGOP_LINUX(READ_32); | ||
609 | case REGOP(WRITE_32): | ||
610 | return REGOP_LINUX(WRITE_32); | ||
611 | case REGOP(READ_64): | ||
612 | return REGOP_LINUX(READ_64); | ||
613 | case REGOP(WRITE_64): | ||
614 | return REGOP_LINUX(WRITE_64); | ||
615 | case REGOP(READ_08): | ||
616 | return REGOP_LINUX(READ_08); | ||
617 | case REGOP(WRITE_08): | ||
618 | return REGOP_LINUX(WRITE_08); | ||
619 | } | ||
620 | |||
621 | return regops_op; | ||
622 | } | ||
623 | |||
624 | /* | ||
625 | * Convert linux regops op values of the form of NVGPU_DBG_GPU_REG_OP_* | ||
626 | * into common regops op values of the form of NVGPU_DBG_REG_OP_* | ||
627 | */ | ||
628 | static u32 nvgpu_get_regops_op_values_common(u32 regops_op) | ||
629 | { | ||
630 | switch (regops_op) { | ||
631 | case REGOP_LINUX(READ_32): | ||
632 | return REGOP(READ_32); | ||
633 | case REGOP_LINUX(WRITE_32): | ||
634 | return REGOP(WRITE_32); | ||
635 | case REGOP_LINUX(READ_64): | ||
636 | return REGOP(READ_64); | ||
637 | case REGOP_LINUX(WRITE_64): | ||
638 | return REGOP(WRITE_64); | ||
639 | case REGOP_LINUX(READ_08): | ||
640 | return REGOP(READ_08); | ||
641 | case REGOP_LINUX(WRITE_08): | ||
642 | return REGOP(WRITE_08); | ||
643 | } | ||
644 | |||
645 | return regops_op; | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * Convert common regops type values of the form of NVGPU_DBG_REG_OP_TYPE_* | ||
650 | * into linux regops type values of the form of NVGPU_DBG_GPU_REG_OP_TYPE_* | ||
651 | */ | ||
652 | static u32 nvgpu_get_regops_type_values_linux(u32 regops_type) | ||
653 | { | ||
654 | switch (regops_type) { | ||
655 | case REGOP(TYPE_GLOBAL): | ||
656 | return REGOP_LINUX(TYPE_GLOBAL); | ||
657 | case REGOP(TYPE_GR_CTX): | ||
658 | return REGOP_LINUX(TYPE_GR_CTX); | ||
659 | case REGOP(TYPE_GR_CTX_TPC): | ||
660 | return REGOP_LINUX(TYPE_GR_CTX_TPC); | ||
661 | case REGOP(TYPE_GR_CTX_SM): | ||
662 | return REGOP_LINUX(TYPE_GR_CTX_SM); | ||
663 | case REGOP(TYPE_GR_CTX_CROP): | ||
664 | return REGOP_LINUX(TYPE_GR_CTX_CROP); | ||
665 | case REGOP(TYPE_GR_CTX_ZROP): | ||
666 | return REGOP_LINUX(TYPE_GR_CTX_ZROP); | ||
667 | case REGOP(TYPE_GR_CTX_QUAD): | ||
668 | return REGOP_LINUX(TYPE_GR_CTX_QUAD); | ||
669 | } | ||
670 | |||
671 | return regops_type; | ||
672 | } | ||
673 | |||
674 | /* | ||
675 | * Convert linux regops type values of the form of NVGPU_DBG_GPU_REG_OP_TYPE_* | ||
676 | * into common regops type values of the form of NVGPU_DBG_REG_OP_TYPE_* | ||
677 | */ | ||
678 | static u32 nvgpu_get_regops_type_values_common(u32 regops_type) | ||
679 | { | ||
680 | switch (regops_type) { | ||
681 | case REGOP_LINUX(TYPE_GLOBAL): | ||
682 | return REGOP(TYPE_GLOBAL); | ||
683 | case REGOP_LINUX(TYPE_GR_CTX): | ||
684 | return REGOP(TYPE_GR_CTX); | ||
685 | case REGOP_LINUX(TYPE_GR_CTX_TPC): | ||
686 | return REGOP(TYPE_GR_CTX_TPC); | ||
687 | case REGOP_LINUX(TYPE_GR_CTX_SM): | ||
688 | return REGOP(TYPE_GR_CTX_SM); | ||
689 | case REGOP_LINUX(TYPE_GR_CTX_CROP): | ||
690 | return REGOP(TYPE_GR_CTX_CROP); | ||
691 | case REGOP_LINUX(TYPE_GR_CTX_ZROP): | ||
692 | return REGOP(TYPE_GR_CTX_ZROP); | ||
693 | case REGOP_LINUX(TYPE_GR_CTX_QUAD): | ||
694 | return REGOP(TYPE_GR_CTX_QUAD); | ||
695 | } | ||
696 | |||
697 | return regops_type; | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * Convert common regops status values of the form of NVGPU_DBG_REG_OP_STATUS_* | ||
702 | * into linux regops type values of the form of NVGPU_DBG_GPU_REG_OP_STATUS_* | ||
703 | */ | ||
704 | static u32 nvgpu_get_regops_status_values_linux(u32 regops_status) | ||
705 | { | ||
706 | switch (regops_status) { | ||
707 | case REGOP(STATUS_SUCCESS): | ||
708 | return REGOP_LINUX(STATUS_SUCCESS); | ||
709 | case REGOP(STATUS_INVALID_OP): | ||
710 | return REGOP_LINUX(STATUS_INVALID_OP); | ||
711 | case REGOP(STATUS_INVALID_TYPE): | ||
712 | return REGOP_LINUX(STATUS_INVALID_TYPE); | ||
713 | case REGOP(STATUS_INVALID_OFFSET): | ||
714 | return REGOP_LINUX(STATUS_INVALID_OFFSET); | ||
715 | case REGOP(STATUS_UNSUPPORTED_OP): | ||
716 | return REGOP_LINUX(STATUS_UNSUPPORTED_OP); | ||
717 | case REGOP(STATUS_INVALID_MASK ): | ||
718 | return REGOP_LINUX(STATUS_INVALID_MASK); | ||
719 | } | ||
720 | |||
721 | return regops_status; | ||
722 | } | ||
723 | |||
724 | /* | ||
725 | * Convert linux regops status values of the form of NVGPU_DBG_GPU_REG_OP_STATUS_* | ||
726 | * into common regops type values of the form of NVGPU_DBG_REG_OP_STATUS_* | ||
727 | */ | ||
728 | static u32 nvgpu_get_regops_status_values_common(u32 regops_status) | ||
729 | { | ||
730 | switch (regops_status) { | ||
731 | case REGOP_LINUX(STATUS_SUCCESS): | ||
732 | return REGOP(STATUS_SUCCESS); | ||
733 | case REGOP_LINUX(STATUS_INVALID_OP): | ||
734 | return REGOP(STATUS_INVALID_OP); | ||
735 | case REGOP_LINUX(STATUS_INVALID_TYPE): | ||
736 | return REGOP(STATUS_INVALID_TYPE); | ||
737 | case REGOP_LINUX(STATUS_INVALID_OFFSET): | ||
738 | return REGOP(STATUS_INVALID_OFFSET); | ||
739 | case REGOP_LINUX(STATUS_UNSUPPORTED_OP): | ||
740 | return REGOP(STATUS_UNSUPPORTED_OP); | ||
741 | case REGOP_LINUX(STATUS_INVALID_MASK ): | ||
742 | return REGOP(STATUS_INVALID_MASK); | ||
743 | } | ||
744 | |||
745 | return regops_status; | ||
746 | } | ||
747 | |||
748 | static int nvgpu_get_regops_data_common(struct nvgpu_dbg_gpu_reg_op *in, | ||
749 | struct nvgpu_dbg_reg_op *out, u32 num_ops) | ||
750 | { | ||
751 | u32 i; | ||
752 | |||
753 | if(in == NULL || out == NULL) | ||
754 | return -ENOMEM; | ||
755 | |||
756 | for (i = 0; i < num_ops; i++) { | ||
757 | out[i].op = nvgpu_get_regops_op_values_common(in[i].op); | ||
758 | out[i].type = nvgpu_get_regops_type_values_common(in[i].type); | ||
759 | out[i].status = nvgpu_get_regops_status_values_common(in[i].status); | ||
760 | out[i].quad = in[i].quad; | ||
761 | out[i].group_mask = in[i].group_mask; | ||
762 | out[i].sub_group_mask = in[i].sub_group_mask; | ||
763 | out[i].offset = in[i].offset; | ||
764 | out[i].value_lo = in[i].value_lo; | ||
765 | out[i].value_hi = in[i].value_hi; | ||
766 | out[i].and_n_mask_lo = in[i].and_n_mask_lo; | ||
767 | out[i].and_n_mask_hi = in[i].and_n_mask_hi; | ||
768 | } | ||
769 | |||
770 | return 0; | ||
771 | } | ||
772 | |||
773 | static int nvgpu_get_regops_data_linux(struct nvgpu_dbg_reg_op *in, | ||
774 | struct nvgpu_dbg_gpu_reg_op *out, u32 num_ops) | ||
775 | { | ||
776 | u32 i; | ||
777 | |||
778 | if(in == NULL || out == NULL) | ||
779 | return -ENOMEM; | ||
780 | |||
781 | for (i = 0; i < num_ops; i++) { | ||
782 | out[i].op = nvgpu_get_regops_op_values_linux(in[i].op); | ||
783 | out[i].type = nvgpu_get_regops_type_values_linux(in[i].type); | ||
784 | out[i].status = nvgpu_get_regops_status_values_linux(in[i].status); | ||
785 | out[i].quad = in[i].quad; | ||
786 | out[i].group_mask = in[i].group_mask; | ||
787 | out[i].sub_group_mask = in[i].sub_group_mask; | ||
788 | out[i].offset = in[i].offset; | ||
789 | out[i].value_lo = in[i].value_lo; | ||
790 | out[i].value_hi = in[i].value_hi; | ||
791 | out[i].and_n_mask_lo = in[i].and_n_mask_lo; | ||
792 | out[i].and_n_mask_hi = in[i].and_n_mask_hi; | ||
793 | } | ||
794 | |||
795 | return 0; | ||
796 | } | ||
797 | |||
798 | static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | ||
799 | struct nvgpu_dbg_gpu_exec_reg_ops_args *args) | ||
800 | { | ||
801 | int err = 0, powergate_err = 0; | ||
802 | bool is_pg_disabled = false; | ||
803 | |||
804 | struct gk20a *g = dbg_s->g; | ||
805 | struct channel_gk20a *ch; | ||
806 | |||
807 | bool is_current_ctx; | ||
808 | |||
809 | |||
810 | nvgpu_log_fn(g, "%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); | ||
811 | |||
812 | if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) { | ||
813 | nvgpu_err(g, "regops limit exceeded"); | ||
814 | return -EINVAL; | ||
815 | } | ||
816 | |||
817 | if (args->num_ops == 0) { | ||
818 | /* Nothing to do */ | ||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) { | ||
823 | nvgpu_err(g, "reg ops work buffer not allocated"); | ||
824 | return -ENODEV; | ||
825 | } | ||
826 | |||
827 | if (!dbg_s->id) { | ||
828 | nvgpu_err(g, "can't call reg_ops on an unbound debugger session"); | ||
829 | return -EINVAL; | ||
830 | } | ||
831 | |||
832 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
833 | if (!dbg_s->is_profiler && !ch) { | ||
834 | nvgpu_err(g, "bind a channel before regops for a debugging session"); | ||
835 | return -EINVAL; | ||
836 | } | ||
837 | |||
838 | /* since exec_reg_ops sends methods to the ucode, it must take the | ||
839 | * global gpu lock to protect against mixing methods from debug sessions | ||
840 | * on other channels */ | ||
841 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
842 | |||
843 | if (!dbg_s->is_pg_disabled && !g->is_virtual) { | ||
844 | /* In the virtual case, the server will handle | ||
845 | * disabling/enabling powergating when processing reg ops | ||
846 | */ | ||
847 | powergate_err = nvgpu_set_powergate_locked(dbg_s, true); | ||
848 | if (!powergate_err) { | ||
849 | is_pg_disabled = true; | ||
850 | } | ||
851 | } | ||
852 | |||
853 | if (!powergate_err) { | ||
854 | u64 ops_offset = 0; /* index offset */ | ||
855 | |||
856 | struct nvgpu_dbg_gpu_reg_op *linux_fragment = NULL; | ||
857 | |||
858 | linux_fragment = nvgpu_kzalloc(g, g->dbg_regops_tmp_buf_ops * | ||
859 | sizeof(struct nvgpu_dbg_gpu_reg_op)); | ||
860 | |||
861 | if (!linux_fragment) | ||
862 | return -ENOMEM; | ||
863 | |||
864 | while (ops_offset < args->num_ops && !err) { | ||
865 | const u64 num_ops = | ||
866 | min(args->num_ops - ops_offset, | ||
867 | (u64)(g->dbg_regops_tmp_buf_ops)); | ||
868 | const u64 fragment_size = | ||
869 | num_ops * sizeof(struct nvgpu_dbg_gpu_reg_op); | ||
870 | |||
871 | void __user *const fragment = | ||
872 | (void __user *)(uintptr_t) | ||
873 | (args->ops + | ||
874 | ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op)); | ||
875 | |||
876 | nvgpu_log_fn(g, "Regops fragment: start_op=%llu ops=%llu", | ||
877 | ops_offset, num_ops); | ||
878 | |||
879 | nvgpu_log_fn(g, "Copying regops from userspace"); | ||
880 | |||
881 | if (copy_from_user(linux_fragment, | ||
882 | fragment, fragment_size)) { | ||
883 | nvgpu_err(g, "copy_from_user failed!"); | ||
884 | err = -EFAULT; | ||
885 | break; | ||
886 | } | ||
887 | |||
888 | err = nvgpu_get_regops_data_common(linux_fragment, | ||
889 | g->dbg_regops_tmp_buf, num_ops); | ||
890 | |||
891 | if (err) | ||
892 | break; | ||
893 | |||
894 | err = g->ops.regops.exec_regops( | ||
895 | dbg_s, g->dbg_regops_tmp_buf, num_ops, &is_current_ctx); | ||
896 | |||
897 | if (err) { | ||
898 | break; | ||
899 | } | ||
900 | |||
901 | if (ops_offset == 0) { | ||
902 | args->gr_ctx_resident = is_current_ctx; | ||
903 | } | ||
904 | |||
905 | err = nvgpu_get_regops_data_linux(g->dbg_regops_tmp_buf, | ||
906 | linux_fragment, num_ops); | ||
907 | |||
908 | if (err) | ||
909 | break; | ||
910 | |||
911 | nvgpu_log_fn(g, "Copying result to userspace"); | ||
912 | |||
913 | if (copy_to_user(fragment, linux_fragment, | ||
914 | fragment_size)) { | ||
915 | nvgpu_err(g, "copy_to_user failed!"); | ||
916 | err = -EFAULT; | ||
917 | break; | ||
918 | } | ||
919 | |||
920 | ops_offset += num_ops; | ||
921 | } | ||
922 | |||
923 | nvgpu_speculation_barrier(); | ||
924 | nvgpu_kfree(g, linux_fragment); | ||
925 | |||
926 | /* enable powergate, if previously disabled */ | ||
927 | if (is_pg_disabled) { | ||
928 | powergate_err = nvgpu_set_powergate_locked(dbg_s, | ||
929 | false); | ||
930 | } | ||
931 | } | ||
932 | |||
933 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
934 | |||
935 | if (!err && powergate_err) | ||
936 | err = powergate_err; | ||
937 | |||
938 | if (err) | ||
939 | nvgpu_err(g, "dbg regops failed"); | ||
940 | |||
941 | return err; | ||
942 | } | ||
943 | |||
944 | static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, | ||
945 | struct nvgpu_dbg_gpu_powergate_args *args) | ||
946 | { | ||
947 | int err; | ||
948 | struct gk20a *g = dbg_s->g; | ||
949 | nvgpu_log_fn(g, "%s powergate mode = %d", | ||
950 | g->name, args->mode); | ||
951 | |||
952 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
953 | if ((args->mode != NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE) && | ||
954 | (args->mode != NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE)) { | ||
955 | nvgpu_err(g, "invalid powergate mode"); | ||
956 | err = -EINVAL; | ||
957 | goto pg_err_end; | ||
958 | } | ||
959 | |||
960 | err = nvgpu_set_powergate_locked(dbg_s, | ||
961 | args->mode == NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE); | ||
962 | pg_err_end: | ||
963 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
964 | return err; | ||
965 | } | ||
966 | |||
967 | static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
968 | struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args) | ||
969 | { | ||
970 | int err; | ||
971 | struct gk20a *g = dbg_s->g; | ||
972 | struct channel_gk20a *ch_gk20a; | ||
973 | |||
974 | nvgpu_log_fn(g, "%s smpc ctxsw mode = %d", | ||
975 | g->name, args->mode); | ||
976 | |||
977 | err = gk20a_busy(g); | ||
978 | if (err) { | ||
979 | nvgpu_err(g, "failed to poweron"); | ||
980 | return err; | ||
981 | } | ||
982 | |||
983 | /* Take the global lock, since we'll be doing global regops */ | ||
984 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
985 | |||
986 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
987 | if (!ch_gk20a) { | ||
988 | nvgpu_err(g, | ||
989 | "no bound channel for smpc ctxsw mode update"); | ||
990 | err = -EINVAL; | ||
991 | goto clean_up; | ||
992 | } | ||
993 | |||
994 | err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a, | ||
995 | args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); | ||
996 | if (err) { | ||
997 | nvgpu_err(g, | ||
998 | "error (%d) during smpc ctxsw mode update", err); | ||
999 | } | ||
1000 | |||
1001 | clean_up: | ||
1002 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1003 | gk20a_idle(g); | ||
1004 | return err; | ||
1005 | } | ||
1006 | |||
1007 | /* | ||
1008 | * Convert linux hwpm ctxsw mode type of the form of NVGPU_DBG_GPU_HWPM_CTXSW_MODE_* | ||
1009 | * into common hwpm ctxsw mode type of the form of NVGPU_DBG_HWPM_CTXSW_MODE_* | ||
1010 | */ | ||
1011 | |||
1012 | static u32 nvgpu_hwpm_ctxsw_mode_to_common_mode(u32 mode) | ||
1013 | { | ||
1014 | nvgpu_speculation_barrier(); | ||
1015 | switch (mode){ | ||
1016 | case NVGPU_DBG_GPU_HWPM_CTXSW_MODE_NO_CTXSW: | ||
1017 | return NVGPU_DBG_HWPM_CTXSW_MODE_NO_CTXSW; | ||
1018 | case NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW: | ||
1019 | return NVGPU_DBG_HWPM_CTXSW_MODE_CTXSW; | ||
1020 | case NVGPU_DBG_GPU_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW: | ||
1021 | return NVGPU_DBG_HWPM_CTXSW_MODE_STREAM_OUT_CTXSW; | ||
1022 | } | ||
1023 | |||
1024 | return mode; | ||
1025 | } | ||
1026 | |||
1027 | |||
1028 | static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
1029 | struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args) | ||
1030 | { | ||
1031 | int err; | ||
1032 | struct gk20a *g = dbg_s->g; | ||
1033 | struct channel_gk20a *ch_gk20a; | ||
1034 | u32 mode = nvgpu_hwpm_ctxsw_mode_to_common_mode(args->mode); | ||
1035 | |||
1036 | nvgpu_log_fn(g, "%s pm ctxsw mode = %d", g->name, args->mode); | ||
1037 | |||
1038 | /* Must have a valid reservation to enable/disable hwpm cxtsw. | ||
1039 | * Just print an error message for now, but eventually this should | ||
1040 | * return an error, at the point where all client sw has been | ||
1041 | * cleaned up. | ||
1042 | */ | ||
1043 | if (!dbg_s->has_profiler_reservation) { | ||
1044 | nvgpu_err(g, | ||
1045 | "session doesn't have a valid reservation"); | ||
1046 | } | ||
1047 | |||
1048 | err = gk20a_busy(g); | ||
1049 | if (err) { | ||
1050 | nvgpu_err(g, "failed to poweron"); | ||
1051 | return err; | ||
1052 | } | ||
1053 | |||
1054 | /* Take the global lock, since we'll be doing global regops */ | ||
1055 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1056 | |||
1057 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1058 | if (!ch_gk20a) { | ||
1059 | nvgpu_err(g, | ||
1060 | "no bound channel for pm ctxsw mode update"); | ||
1061 | err = -EINVAL; | ||
1062 | goto clean_up; | ||
1063 | } | ||
1064 | if (g->dbg_powergating_disabled_refcount == 0) { | ||
1065 | nvgpu_err(g, "powergate is not disabled"); | ||
1066 | err = -ENOSYS; | ||
1067 | goto clean_up; | ||
1068 | } | ||
1069 | err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a, 0, | ||
1070 | mode); | ||
1071 | |||
1072 | if (err) | ||
1073 | nvgpu_err(g, | ||
1074 | "error (%d) during pm ctxsw mode update", err); | ||
1075 | /* gk20a would require a WAR to set the core PM_ENABLE bit, not | ||
1076 | * added here with gk20a being deprecated | ||
1077 | */ | ||
1078 | clean_up: | ||
1079 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1080 | gk20a_idle(g); | ||
1081 | return err; | ||
1082 | } | ||
1083 | |||
1084 | static int nvgpu_dbg_gpu_ioctl_set_mmu_debug_mode( | ||
1085 | struct dbg_session_gk20a *dbg_s, | ||
1086 | struct nvgpu_dbg_gpu_set_ctx_mmu_debug_mode_args *args) | ||
1087 | { | ||
1088 | int err; | ||
1089 | struct gk20a *g = dbg_s->g; | ||
1090 | struct channel_gk20a *ch; | ||
1091 | bool enable = (args->mode == NVGPU_DBG_GPU_CTX_MMU_DEBUG_MODE_ENABLED); | ||
1092 | |||
1093 | nvgpu_log_fn(g, "mode=%u", args->mode); | ||
1094 | |||
1095 | if (args->reserved != 0U) { | ||
1096 | return -EINVAL; | ||
1097 | } | ||
1098 | |||
1099 | if ((g->ops.fb.set_mmu_debug_mode == NULL) && | ||
1100 | (g->ops.gr.set_mmu_debug_mode == NULL)) { | ||
1101 | return -ENOSYS; | ||
1102 | } | ||
1103 | |||
1104 | err = gk20a_busy(g); | ||
1105 | if (err) { | ||
1106 | nvgpu_err(g, "failed to poweron"); | ||
1107 | return err; | ||
1108 | } | ||
1109 | |||
1110 | /* Take the global lock, since we'll be doing global regops */ | ||
1111 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1112 | |||
1113 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1114 | if (!ch) { | ||
1115 | nvgpu_err(g, "no bound channel for mmu debug mode"); | ||
1116 | err = -EINVAL; | ||
1117 | goto clean_up; | ||
1118 | } | ||
1119 | |||
1120 | err = nvgpu_tsg_set_mmu_debug_mode(ch, enable); | ||
1121 | if (err) { | ||
1122 | nvgpu_err(g, "set mmu debug mode failed, err=%d", err); | ||
1123 | } | ||
1124 | |||
1125 | clean_up: | ||
1126 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1127 | gk20a_idle(g); | ||
1128 | return err; | ||
1129 | } | ||
1130 | |||
1131 | static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | ||
1132 | struct dbg_session_gk20a *dbg_s, | ||
1133 | struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args) | ||
1134 | { | ||
1135 | struct gk20a *g = dbg_s->g; | ||
1136 | struct channel_gk20a *ch; | ||
1137 | int err = 0, action = args->mode; | ||
1138 | |||
1139 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); | ||
1140 | |||
1141 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1142 | if (!ch) | ||
1143 | return -EINVAL; | ||
1144 | |||
1145 | err = gk20a_busy(g); | ||
1146 | if (err) { | ||
1147 | nvgpu_err(g, "failed to poweron"); | ||
1148 | return err; | ||
1149 | } | ||
1150 | |||
1151 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1152 | |||
1153 | /* Suspend GPU context switching */ | ||
1154 | err = gr_gk20a_disable_ctxsw(g); | ||
1155 | if (err) { | ||
1156 | nvgpu_err(g, "unable to stop gr ctxsw"); | ||
1157 | /* this should probably be ctx-fatal... */ | ||
1158 | goto clean_up; | ||
1159 | } | ||
1160 | |||
1161 | nvgpu_speculation_barrier(); | ||
1162 | switch (action) { | ||
1163 | case NVGPU_DBG_GPU_SUSPEND_ALL_SMS: | ||
1164 | gr_gk20a_suspend_context(ch); | ||
1165 | break; | ||
1166 | |||
1167 | case NVGPU_DBG_GPU_RESUME_ALL_SMS: | ||
1168 | gr_gk20a_resume_context(ch); | ||
1169 | break; | ||
1170 | } | ||
1171 | |||
1172 | err = gr_gk20a_enable_ctxsw(g); | ||
1173 | if (err) | ||
1174 | nvgpu_err(g, "unable to restart ctxsw!"); | ||
1175 | |||
1176 | clean_up: | ||
1177 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1178 | gk20a_idle(g); | ||
1179 | |||
1180 | return err; | ||
1181 | } | ||
1182 | |||
1183 | static int nvgpu_ioctl_allocate_profiler_object( | ||
1184 | struct dbg_session_gk20a_linux *dbg_session_linux, | ||
1185 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args) | ||
1186 | { | ||
1187 | int err = 0; | ||
1188 | struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; | ||
1189 | struct gk20a *g = get_gk20a(dbg_session_linux->dev); | ||
1190 | struct dbg_profiler_object_data *prof_obj; | ||
1191 | |||
1192 | nvgpu_log_fn(g, "%s", g->name); | ||
1193 | |||
1194 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1195 | |||
1196 | err = alloc_profiler(g, &prof_obj); | ||
1197 | if (err) | ||
1198 | goto clean_up; | ||
1199 | |||
1200 | prof_obj->session_id = dbg_s->id; | ||
1201 | |||
1202 | if (dbg_s->is_profiler) | ||
1203 | prof_obj->ch = NULL; | ||
1204 | else { | ||
1205 | prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1206 | if (prof_obj->ch == NULL) { | ||
1207 | nvgpu_err(g, | ||
1208 | "bind a channel for dbg session"); | ||
1209 | nvgpu_kfree(g, prof_obj); | ||
1210 | err = -EINVAL; | ||
1211 | goto clean_up; | ||
1212 | } | ||
1213 | } | ||
1214 | |||
1215 | /* Return handle to client */ | ||
1216 | args->profiler_handle = prof_obj->prof_handle; | ||
1217 | |||
1218 | nvgpu_init_list_node(&prof_obj->prof_obj_entry); | ||
1219 | |||
1220 | nvgpu_list_add(&prof_obj->prof_obj_entry, &g->profiler_objects); | ||
1221 | clean_up: | ||
1222 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1223 | return err; | ||
1224 | } | ||
1225 | |||
1226 | static int nvgpu_ioctl_free_profiler_object( | ||
1227 | struct dbg_session_gk20a_linux *dbg_s_linux, | ||
1228 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args) | ||
1229 | { | ||
1230 | int err = 0; | ||
1231 | struct dbg_session_gk20a *dbg_s = &dbg_s_linux->dbg_s; | ||
1232 | struct gk20a *g = get_gk20a(dbg_s_linux->dev); | ||
1233 | struct dbg_profiler_object_data *prof_obj, *tmp_obj; | ||
1234 | bool obj_found = false; | ||
1235 | |||
1236 | nvgpu_log_fn(g, "%s session_id = %d profiler_handle = %x", | ||
1237 | g->name, dbg_s->id, args->profiler_handle); | ||
1238 | |||
1239 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1240 | |||
1241 | /* Remove profiler object from the list, if a match is found */ | ||
1242 | nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, | ||
1243 | dbg_profiler_object_data, prof_obj_entry) { | ||
1244 | if (prof_obj->prof_handle == args->profiler_handle) { | ||
1245 | if (prof_obj->session_id != dbg_s->id) { | ||
1246 | nvgpu_err(g, | ||
1247 | "invalid handle %x", | ||
1248 | args->profiler_handle); | ||
1249 | err = -EINVAL; | ||
1250 | break; | ||
1251 | } | ||
1252 | if (prof_obj->has_reservation) | ||
1253 | g->ops.dbg_session_ops. | ||
1254 | release_profiler_reservation(dbg_s, prof_obj); | ||
1255 | nvgpu_list_del(&prof_obj->prof_obj_entry); | ||
1256 | nvgpu_kfree(g, prof_obj); | ||
1257 | obj_found = true; | ||
1258 | break; | ||
1259 | } | ||
1260 | } | ||
1261 | if (!obj_found) { | ||
1262 | nvgpu_err(g, "profiler %x not found", | ||
1263 | args->profiler_handle); | ||
1264 | err = -EINVAL; | ||
1265 | } | ||
1266 | |||
1267 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1268 | return err; | ||
1269 | } | ||
1270 | |||
1271 | static struct dbg_profiler_object_data *find_matching_prof_obj( | ||
1272 | struct dbg_session_gk20a *dbg_s, | ||
1273 | u32 profiler_handle) | ||
1274 | { | ||
1275 | struct gk20a *g = dbg_s->g; | ||
1276 | struct dbg_profiler_object_data *prof_obj; | ||
1277 | |||
1278 | nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects, | ||
1279 | dbg_profiler_object_data, prof_obj_entry) { | ||
1280 | if (prof_obj->prof_handle == profiler_handle) { | ||
1281 | if (prof_obj->session_id != dbg_s->id) { | ||
1282 | nvgpu_err(g, | ||
1283 | "invalid handle %x", | ||
1284 | profiler_handle); | ||
1285 | return NULL; | ||
1286 | } | ||
1287 | return prof_obj; | ||
1288 | } | ||
1289 | } | ||
1290 | return NULL; | ||
1291 | } | ||
1292 | |||
1293 | /* used in scenarios where the debugger session can take just the inter-session | ||
1294 | * lock for performance, but the profiler session must take the per-gpu lock | ||
1295 | * since it might not have an associated channel. */ | ||
1296 | static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s) | ||
1297 | { | ||
1298 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1299 | |||
1300 | if (dbg_s->is_profiler || !ch) | ||
1301 | nvgpu_mutex_acquire(&dbg_s->g->dbg_sessions_lock); | ||
1302 | else | ||
1303 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | ||
1304 | } | ||
1305 | |||
1306 | static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s) | ||
1307 | { | ||
1308 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1309 | |||
1310 | if (dbg_s->is_profiler || !ch) | ||
1311 | nvgpu_mutex_release(&dbg_s->g->dbg_sessions_lock); | ||
1312 | else | ||
1313 | nvgpu_mutex_release(&ch->dbg_s_lock); | ||
1314 | } | ||
1315 | |||
1316 | static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) | ||
1317 | { | ||
1318 | struct gk20a *g = dbg_s->g; | ||
1319 | |||
1320 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
1321 | |||
1322 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
1323 | |||
1324 | dbg_s->dbg_events.events_enabled = true; | ||
1325 | dbg_s->dbg_events.num_pending_events = 0; | ||
1326 | |||
1327 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
1328 | } | ||
1329 | |||
1330 | static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) | ||
1331 | { | ||
1332 | struct gk20a *g = dbg_s->g; | ||
1333 | |||
1334 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
1335 | |||
1336 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
1337 | |||
1338 | dbg_s->dbg_events.events_enabled = false; | ||
1339 | dbg_s->dbg_events.num_pending_events = 0; | ||
1340 | |||
1341 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
1342 | } | ||
1343 | |||
1344 | static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) | ||
1345 | { | ||
1346 | struct gk20a *g = dbg_s->g; | ||
1347 | |||
1348 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
1349 | |||
1350 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
1351 | |||
1352 | if (dbg_s->dbg_events.events_enabled && | ||
1353 | dbg_s->dbg_events.num_pending_events > 0) | ||
1354 | dbg_s->dbg_events.num_pending_events--; | ||
1355 | |||
1356 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
1357 | } | ||
1358 | |||
1359 | |||
1360 | static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, | ||
1361 | struct nvgpu_dbg_gpu_events_ctrl_args *args) | ||
1362 | { | ||
1363 | int ret = 0; | ||
1364 | struct channel_gk20a *ch; | ||
1365 | struct gk20a *g = dbg_s->g; | ||
1366 | |||
1367 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); | ||
1368 | |||
1369 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1370 | if (!ch) { | ||
1371 | nvgpu_err(g, "no channel bound to dbg session"); | ||
1372 | return -EINVAL; | ||
1373 | } | ||
1374 | |||
1375 | nvgpu_speculation_barrier(); | ||
1376 | switch (args->cmd) { | ||
1377 | case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE: | ||
1378 | gk20a_dbg_gpu_events_enable(dbg_s); | ||
1379 | break; | ||
1380 | |||
1381 | case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE: | ||
1382 | gk20a_dbg_gpu_events_disable(dbg_s); | ||
1383 | break; | ||
1384 | |||
1385 | case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR: | ||
1386 | gk20a_dbg_gpu_events_clear(dbg_s); | ||
1387 | break; | ||
1388 | |||
1389 | default: | ||
1390 | nvgpu_err(g, "unrecognized dbg gpu events ctrl cmd: 0x%x", | ||
1391 | args->cmd); | ||
1392 | ret = -EINVAL; | ||
1393 | break; | ||
1394 | } | ||
1395 | |||
1396 | return ret; | ||
1397 | } | ||
1398 | |||
1399 | static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, | ||
1400 | struct nvgpu_dbg_gpu_perfbuf_map_args *args) | ||
1401 | { | ||
1402 | struct gk20a *g = dbg_s->g; | ||
1403 | struct mm_gk20a *mm = &g->mm; | ||
1404 | int err; | ||
1405 | u32 virt_size; | ||
1406 | u32 big_page_size = g->ops.mm.get_default_big_page_size(); | ||
1407 | |||
1408 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1409 | |||
1410 | if (g->perfbuf.owner) { | ||
1411 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1412 | return -EBUSY; | ||
1413 | } | ||
1414 | |||
1415 | mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size, | ||
1416 | big_page_size << 10, | ||
1417 | NV_MM_DEFAULT_KERNEL_SIZE, | ||
1418 | NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, | ||
1419 | false, false, "perfbuf"); | ||
1420 | if (!mm->perfbuf.vm) { | ||
1421 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1422 | return -ENOMEM; | ||
1423 | } | ||
1424 | |||
1425 | err = nvgpu_vm_map_buffer(mm->perfbuf.vm, | ||
1426 | args->dmabuf_fd, | ||
1427 | &args->offset, | ||
1428 | 0, | ||
1429 | SZ_4K, | ||
1430 | 0, | ||
1431 | 0, | ||
1432 | 0, | ||
1433 | 0, | ||
1434 | NULL); | ||
1435 | if (err) | ||
1436 | goto err_remove_vm; | ||
1437 | |||
1438 | /* perf output buffer may not cross a 4GB boundary */ | ||
1439 | virt_size = u64_lo32(args->mapping_size); | ||
1440 | if (u64_hi32(args->offset) != u64_hi32(args->offset + virt_size - 1)) { | ||
1441 | err = -EINVAL; | ||
1442 | goto err_unmap; | ||
1443 | } | ||
1444 | |||
1445 | err = g->ops.dbg_session_ops.perfbuffer_enable(g, | ||
1446 | args->offset, virt_size); | ||
1447 | if (err) | ||
1448 | goto err_unmap; | ||
1449 | |||
1450 | g->perfbuf.owner = dbg_s; | ||
1451 | g->perfbuf.offset = args->offset; | ||
1452 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1453 | |||
1454 | return 0; | ||
1455 | |||
1456 | err_unmap: | ||
1457 | nvgpu_vm_unmap(mm->perfbuf.vm, args->offset, NULL); | ||
1458 | err_remove_vm: | ||
1459 | nvgpu_vm_put(mm->perfbuf.vm); | ||
1460 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1461 | return err; | ||
1462 | } | ||
1463 | |||
1464 | static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s, | ||
1465 | struct nvgpu_dbg_gpu_perfbuf_unmap_args *args) | ||
1466 | { | ||
1467 | struct gk20a *g = dbg_s->g; | ||
1468 | int err; | ||
1469 | |||
1470 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1471 | if ((g->perfbuf.owner != dbg_s) || | ||
1472 | (g->perfbuf.offset != args->offset)) { | ||
1473 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1474 | return -EINVAL; | ||
1475 | } | ||
1476 | |||
1477 | err = gk20a_perfbuf_release_locked(g, args->offset); | ||
1478 | |||
1479 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1480 | |||
1481 | return err; | ||
1482 | } | ||
1483 | |||
1484 | static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s, | ||
1485 | struct nvgpu_dbg_gpu_pc_sampling_args *args) | ||
1486 | { | ||
1487 | struct channel_gk20a *ch; | ||
1488 | struct gk20a *g = dbg_s->g; | ||
1489 | |||
1490 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1491 | if (!ch) | ||
1492 | return -EINVAL; | ||
1493 | |||
1494 | nvgpu_log_fn(g, " "); | ||
1495 | |||
1496 | return g->ops.gr.update_pc_sampling ? | ||
1497 | g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL; | ||
1498 | } | ||
1499 | |||
1500 | static int nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state( | ||
1501 | struct dbg_session_gk20a *dbg_s, | ||
1502 | struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *args) | ||
1503 | { | ||
1504 | struct gk20a *g = dbg_s->g; | ||
1505 | struct gr_gk20a *gr = &g->gr; | ||
1506 | u32 sm_id; | ||
1507 | struct channel_gk20a *ch; | ||
1508 | int err = 0; | ||
1509 | |||
1510 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1511 | if (ch == NULL) { | ||
1512 | return -EINVAL; | ||
1513 | } | ||
1514 | |||
1515 | sm_id = args->sm_id; | ||
1516 | if (sm_id >= gr->no_of_sm) | ||
1517 | return -EINVAL; | ||
1518 | |||
1519 | nvgpu_speculation_barrier(); | ||
1520 | |||
1521 | err = gk20a_busy(g); | ||
1522 | if (err != 0) { | ||
1523 | return err; | ||
1524 | } | ||
1525 | |||
1526 | err = gr_gk20a_elpg_protected_call(g, | ||
1527 | g->ops.gr.clear_sm_error_state(g, ch, sm_id)); | ||
1528 | |||
1529 | gk20a_idle(g); | ||
1530 | |||
1531 | return err; | ||
1532 | } | ||
1533 | |||
1534 | static int | ||
1535 | nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s, | ||
1536 | struct nvgpu_dbg_gpu_suspend_resume_contexts_args *args) | ||
1537 | { | ||
1538 | struct gk20a *g = dbg_s->g; | ||
1539 | int err = 0; | ||
1540 | int ctx_resident_ch_fd = -1; | ||
1541 | |||
1542 | err = gk20a_busy(g); | ||
1543 | if (err) | ||
1544 | return err; | ||
1545 | |||
1546 | nvgpu_speculation_barrier(); | ||
1547 | switch (args->action) { | ||
1548 | case NVGPU_DBG_GPU_SUSPEND_ALL_CONTEXTS: | ||
1549 | err = g->ops.gr.suspend_contexts(g, dbg_s, | ||
1550 | &ctx_resident_ch_fd); | ||
1551 | break; | ||
1552 | |||
1553 | case NVGPU_DBG_GPU_RESUME_ALL_CONTEXTS: | ||
1554 | err = g->ops.gr.resume_contexts(g, dbg_s, | ||
1555 | &ctx_resident_ch_fd); | ||
1556 | break; | ||
1557 | } | ||
1558 | |||
1559 | if (ctx_resident_ch_fd < 0) { | ||
1560 | args->is_resident_context = 0; | ||
1561 | } else { | ||
1562 | args->is_resident_context = 1; | ||
1563 | args->resident_context_fd = ctx_resident_ch_fd; | ||
1564 | } | ||
1565 | |||
1566 | gk20a_idle(g); | ||
1567 | |||
1568 | return err; | ||
1569 | } | ||
1570 | |||
1571 | static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s, | ||
1572 | struct nvgpu_dbg_gpu_access_fb_memory_args *args) | ||
1573 | { | ||
1574 | struct gk20a *g = dbg_s->g; | ||
1575 | struct dma_buf *dmabuf; | ||
1576 | void __user *user_buffer = (void __user *)(uintptr_t)args->buffer; | ||
1577 | void *buffer; | ||
1578 | u64 size, access_size, offset; | ||
1579 | u64 access_limit_size = SZ_4K; | ||
1580 | int err = 0; | ||
1581 | |||
1582 | if ((args->offset & 3) || (!args->size) || (args->size & 3)) | ||
1583 | return -EINVAL; | ||
1584 | |||
1585 | dmabuf = dma_buf_get(args->dmabuf_fd); | ||
1586 | if (IS_ERR(dmabuf)) | ||
1587 | return -EINVAL; | ||
1588 | |||
1589 | if ((args->offset > dmabuf->size) || | ||
1590 | (args->size > dmabuf->size) || | ||
1591 | (args->offset + args->size > dmabuf->size)) { | ||
1592 | err = -EINVAL; | ||
1593 | goto fail_dmabuf_put; | ||
1594 | } | ||
1595 | |||
1596 | buffer = nvgpu_big_zalloc(g, access_limit_size); | ||
1597 | if (!buffer) { | ||
1598 | err = -ENOMEM; | ||
1599 | goto fail_dmabuf_put; | ||
1600 | } | ||
1601 | |||
1602 | size = args->size; | ||
1603 | offset = 0; | ||
1604 | |||
1605 | err = gk20a_busy(g); | ||
1606 | if (err) | ||
1607 | goto fail_free_buffer; | ||
1608 | |||
1609 | while (size) { | ||
1610 | /* Max access size of access_limit_size in one loop */ | ||
1611 | access_size = min(access_limit_size, size); | ||
1612 | |||
1613 | if (args->cmd == | ||
1614 | NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE) { | ||
1615 | err = copy_from_user(buffer, user_buffer + offset, | ||
1616 | access_size); | ||
1617 | if (err) | ||
1618 | goto fail_idle; | ||
1619 | } | ||
1620 | |||
1621 | err = nvgpu_vidmem_buf_access_memory(g, dmabuf, buffer, | ||
1622 | args->offset + offset, access_size, | ||
1623 | args->cmd); | ||
1624 | if (err) | ||
1625 | goto fail_idle; | ||
1626 | |||
1627 | if (args->cmd == | ||
1628 | NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ) { | ||
1629 | err = copy_to_user(user_buffer + offset, | ||
1630 | buffer, access_size); | ||
1631 | if (err) | ||
1632 | goto fail_idle; | ||
1633 | } | ||
1634 | |||
1635 | size -= access_size; | ||
1636 | offset += access_size; | ||
1637 | } | ||
1638 | nvgpu_speculation_barrier(); | ||
1639 | |||
1640 | fail_idle: | ||
1641 | gk20a_idle(g); | ||
1642 | fail_free_buffer: | ||
1643 | nvgpu_big_free(g, buffer); | ||
1644 | fail_dmabuf_put: | ||
1645 | dma_buf_put(dmabuf); | ||
1646 | |||
1647 | return err; | ||
1648 | } | ||
1649 | |||
1650 | static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s, | ||
1651 | struct nvgpu_dbg_gpu_profiler_reserve_args *args) | ||
1652 | { | ||
1653 | if (args->acquire) | ||
1654 | return nvgpu_profiler_reserve_acquire(dbg_s, args->profiler_handle); | ||
1655 | |||
1656 | return nvgpu_profiler_reserve_release(dbg_s, args->profiler_handle); | ||
1657 | } | ||
1658 | |||
1659 | static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s, | ||
1660 | struct nvgpu_dbg_gpu_timeout_args *args) | ||
1661 | { | ||
1662 | bool status; | ||
1663 | struct gk20a *g = dbg_s->g; | ||
1664 | |||
1665 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1666 | status = nvgpu_is_timeouts_enabled(g); | ||
1667 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1668 | |||
1669 | if (status) | ||
1670 | args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE; | ||
1671 | else | ||
1672 | args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE; | ||
1673 | } | ||
1674 | |||
1675 | static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset) | ||
1676 | { | ||
1677 | struct mm_gk20a *mm = &g->mm; | ||
1678 | struct vm_gk20a *vm = mm->perfbuf.vm; | ||
1679 | int err; | ||
1680 | |||
1681 | err = g->ops.dbg_session_ops.perfbuffer_disable(g); | ||
1682 | |||
1683 | nvgpu_vm_unmap(vm, offset, NULL); | ||
1684 | nvgpu_free_inst_block(g, &mm->perfbuf.inst_block); | ||
1685 | nvgpu_vm_put(vm); | ||
1686 | |||
1687 | g->perfbuf.owner = NULL; | ||
1688 | g->perfbuf.offset = 0; | ||
1689 | return err; | ||
1690 | } | ||
1691 | |||
1692 | static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, | ||
1693 | u32 profiler_handle) | ||
1694 | { | ||
1695 | struct gk20a *g = dbg_s->g; | ||
1696 | struct dbg_profiler_object_data *prof_obj; | ||
1697 | int err = 0; | ||
1698 | |||
1699 | nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle); | ||
1700 | |||
1701 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1702 | |||
1703 | /* Find matching object. */ | ||
1704 | prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); | ||
1705 | |||
1706 | if (!prof_obj) { | ||
1707 | nvgpu_err(g, "object not found"); | ||
1708 | err = -EINVAL; | ||
1709 | goto exit; | ||
1710 | } | ||
1711 | |||
1712 | if (prof_obj->has_reservation) | ||
1713 | g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj); | ||
1714 | else { | ||
1715 | nvgpu_err(g, "No reservation found"); | ||
1716 | err = -EINVAL; | ||
1717 | goto exit; | ||
1718 | } | ||
1719 | exit: | ||
1720 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1721 | return err; | ||
1722 | } | ||
1723 | |||
1724 | static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | ||
1725 | u32 profiler_handle) | ||
1726 | { | ||
1727 | struct gk20a *g = dbg_s->g; | ||
1728 | struct dbg_profiler_object_data *prof_obj, *my_prof_obj; | ||
1729 | int err = 0; | ||
1730 | struct tsg_gk20a *tsg; | ||
1731 | |||
1732 | nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle); | ||
1733 | |||
1734 | if (g->profiler_reservation_count < 0) { | ||
1735 | nvgpu_err(g, "Negative reservation count!"); | ||
1736 | return -EINVAL; | ||
1737 | } | ||
1738 | |||
1739 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1740 | |||
1741 | /* Find matching object. */ | ||
1742 | my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); | ||
1743 | |||
1744 | if (!my_prof_obj) { | ||
1745 | nvgpu_err(g, "object not found"); | ||
1746 | err = -EINVAL; | ||
1747 | goto exit; | ||
1748 | } | ||
1749 | |||
1750 | /* If we already have the reservation, we're done */ | ||
1751 | if (my_prof_obj->has_reservation) { | ||
1752 | err = 0; | ||
1753 | goto exit; | ||
1754 | } | ||
1755 | |||
1756 | if (my_prof_obj->ch == NULL) { | ||
1757 | /* Global reservations are only allowed if there are no other | ||
1758 | * global or per-context reservations currently held | ||
1759 | */ | ||
1760 | if (!g->ops.dbg_session_ops.check_and_set_global_reservation( | ||
1761 | dbg_s, my_prof_obj)) { | ||
1762 | nvgpu_err(g, | ||
1763 | "global reserve: have existing reservation"); | ||
1764 | err = -EBUSY; | ||
1765 | } | ||
1766 | } else if (g->global_profiler_reservation_held) { | ||
1767 | /* If there's a global reservation, | ||
1768 | * we can't take a per-context one. | ||
1769 | */ | ||
1770 | nvgpu_err(g, | ||
1771 | "per-ctxt reserve: global reservation in effect"); | ||
1772 | err = -EBUSY; | ||
1773 | } else if ((tsg = tsg_gk20a_from_ch(my_prof_obj->ch)) != NULL) { | ||
1774 | /* TSG: check that another channel in the TSG | ||
1775 | * doesn't already have the reservation | ||
1776 | */ | ||
1777 | u32 my_tsgid = tsg->tsgid; | ||
1778 | |||
1779 | nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects, | ||
1780 | dbg_profiler_object_data, prof_obj_entry) { | ||
1781 | if (prof_obj->has_reservation && | ||
1782 | (prof_obj->ch->tsgid == my_tsgid)) { | ||
1783 | nvgpu_err(g, | ||
1784 | "per-ctxt reserve (tsg): already reserved"); | ||
1785 | err = -EBUSY; | ||
1786 | goto exit; | ||
1787 | } | ||
1788 | } | ||
1789 | |||
1790 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( | ||
1791 | dbg_s, my_prof_obj)) { | ||
1792 | /* Another guest OS has the global reservation */ | ||
1793 | nvgpu_err(g, | ||
1794 | "per-ctxt reserve: global reservation in effect"); | ||
1795 | err = -EBUSY; | ||
1796 | } | ||
1797 | } else { | ||
1798 | /* channel: check that some other profiler object doesn't | ||
1799 | * already have the reservation. | ||
1800 | */ | ||
1801 | struct channel_gk20a *my_ch = my_prof_obj->ch; | ||
1802 | |||
1803 | nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects, | ||
1804 | dbg_profiler_object_data, prof_obj_entry) { | ||
1805 | if (prof_obj->has_reservation && | ||
1806 | (prof_obj->ch == my_ch)) { | ||
1807 | nvgpu_err(g, | ||
1808 | "per-ctxt reserve (ch): already reserved"); | ||
1809 | err = -EBUSY; | ||
1810 | goto exit; | ||
1811 | } | ||
1812 | } | ||
1813 | |||
1814 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( | ||
1815 | dbg_s, my_prof_obj)) { | ||
1816 | /* Another guest OS has the global reservation */ | ||
1817 | nvgpu_err(g, | ||
1818 | "per-ctxt reserve: global reservation in effect"); | ||
1819 | err = -EBUSY; | ||
1820 | } | ||
1821 | } | ||
1822 | exit: | ||
1823 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1824 | return err; | ||
1825 | } | ||
1826 | |||
1827 | static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
1828 | struct nvgpu_dbg_gpu_unbind_channel_args *args) | ||
1829 | { | ||
1830 | struct dbg_session_channel_data *ch_data; | ||
1831 | struct gk20a *g = dbg_s->g; | ||
1832 | bool channel_found = false; | ||
1833 | struct channel_gk20a *ch; | ||
1834 | int err; | ||
1835 | |||
1836 | nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", | ||
1837 | g->name, args->channel_fd); | ||
1838 | |||
1839 | ch = gk20a_get_channel_from_file(args->channel_fd); | ||
1840 | if (!ch) { | ||
1841 | nvgpu_log_fn(g, "no channel found for fd"); | ||
1842 | return -EINVAL; | ||
1843 | } | ||
1844 | |||
1845 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
1846 | nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, | ||
1847 | dbg_session_channel_data, ch_entry) { | ||
1848 | if (ch->chid == ch_data->chid) { | ||
1849 | channel_found = true; | ||
1850 | break; | ||
1851 | } | ||
1852 | } | ||
1853 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
1854 | |||
1855 | if (!channel_found) { | ||
1856 | nvgpu_log_fn(g, "channel not bounded, fd=%d\n", args->channel_fd); | ||
1857 | err = -EINVAL; | ||
1858 | goto out; | ||
1859 | } | ||
1860 | |||
1861 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1862 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
1863 | err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data); | ||
1864 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
1865 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1866 | |||
1867 | out: | ||
1868 | gk20a_channel_put(ch); | ||
1869 | return err; | ||
1870 | } | ||
1871 | |||
1872 | static int nvgpu_set_sm_exception_type_mask_locked( | ||
1873 | struct dbg_session_gk20a *dbg_s, | ||
1874 | u32 exception_mask) | ||
1875 | { | ||
1876 | struct gk20a *g = dbg_s->g; | ||
1877 | int err = 0; | ||
1878 | struct channel_gk20a *ch = NULL; | ||
1879 | |||
1880 | /* | ||
1881 | * Obtain the fisrt channel from the channel list in | ||
1882 | * dbg_session, find the context associated with channel | ||
1883 | * and set the sm_mask_type to that context | ||
1884 | */ | ||
1885 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1886 | if (ch != NULL) { | ||
1887 | struct tsg_gk20a *tsg; | ||
1888 | |||
1889 | tsg = tsg_gk20a_from_ch(ch); | ||
1890 | if (tsg != NULL) { | ||
1891 | tsg->sm_exception_mask_type = exception_mask; | ||
1892 | goto type_mask_end; | ||
1893 | } | ||
1894 | } | ||
1895 | |||
1896 | nvgpu_log_fn(g, "unable to find the TSG\n"); | ||
1897 | err = -EINVAL; | ||
1898 | |||
1899 | type_mask_end: | ||
1900 | return err; | ||
1901 | } | ||
1902 | |||
1903 | static int nvgpu_dbg_gpu_set_sm_exception_type_mask( | ||
1904 | struct dbg_session_gk20a *dbg_s, | ||
1905 | struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args *args) | ||
1906 | { | ||
1907 | int err = 0; | ||
1908 | struct gk20a *g = dbg_s->g; | ||
1909 | u32 sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; | ||
1910 | |||
1911 | nvgpu_speculation_barrier(); | ||
1912 | switch (args->exception_type_mask) { | ||
1913 | case NVGPU_DBG_GPU_IOCTL_SET_SM_EXCEPTION_TYPE_MASK_FATAL: | ||
1914 | sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_FATAL; | ||
1915 | break; | ||
1916 | case NVGPU_DBG_GPU_IOCTL_SET_SM_EXCEPTION_TYPE_MASK_NONE: | ||
1917 | sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; | ||
1918 | break; | ||
1919 | default: | ||
1920 | nvgpu_err(g, | ||
1921 | "unrecognized dbg sm exception type mask: 0x%x", | ||
1922 | args->exception_type_mask); | ||
1923 | err = -EINVAL; | ||
1924 | break; | ||
1925 | } | ||
1926 | |||
1927 | if (err != 0) { | ||
1928 | return err; | ||
1929 | } | ||
1930 | |||
1931 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1932 | err = nvgpu_set_sm_exception_type_mask_locked(dbg_s, | ||
1933 | sm_exception_mask_type); | ||
1934 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1935 | |||
1936 | return err; | ||
1937 | } | ||
1938 | |||
1939 | #if defined(CONFIG_GK20A_CYCLE_STATS) | ||
1940 | static int nvgpu_dbg_gpu_cycle_stats(struct dbg_session_gk20a *dbg_s, | ||
1941 | struct nvgpu_dbg_gpu_cycle_stats_args *args) | ||
1942 | { | ||
1943 | struct channel_gk20a *ch = NULL; | ||
1944 | int err; | ||
1945 | |||
1946 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1947 | if (ch == NULL) { | ||
1948 | return -EINVAL; | ||
1949 | } | ||
1950 | |||
1951 | err = gk20a_busy(ch->g); | ||
1952 | if (err != 0) { | ||
1953 | return err; | ||
1954 | } | ||
1955 | |||
1956 | err = gk20a_channel_cycle_stats(ch, args->dmabuf_fd); | ||
1957 | |||
1958 | gk20a_idle(ch->g); | ||
1959 | return err; | ||
1960 | } | ||
1961 | |||
1962 | static int nvgpu_dbg_gpu_cycle_stats_snapshot(struct dbg_session_gk20a *dbg_s, | ||
1963 | struct nvgpu_dbg_gpu_cycle_stats_snapshot_args *args) | ||
1964 | { | ||
1965 | struct channel_gk20a *ch = NULL; | ||
1966 | int err; | ||
1967 | |||
1968 | if (!args->dmabuf_fd) { | ||
1969 | return -EINVAL; | ||
1970 | } | ||
1971 | |||
1972 | nvgpu_speculation_barrier(); | ||
1973 | |||
1974 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1975 | if (ch == NULL) { | ||
1976 | return -EINVAL; | ||
1977 | } | ||
1978 | |||
1979 | /* is it allowed to handle calls for current GPU? */ | ||
1980 | if (!nvgpu_is_enabled(ch->g, NVGPU_SUPPORT_CYCLE_STATS_SNAPSHOT)) { | ||
1981 | return -ENOSYS; | ||
1982 | } | ||
1983 | |||
1984 | err = gk20a_busy(ch->g); | ||
1985 | if (err != 0) { | ||
1986 | return err; | ||
1987 | } | ||
1988 | |||
1989 | /* handle the command (most frequent cases first) */ | ||
1990 | switch (args->cmd) { | ||
1991 | case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT_CMD_FLUSH: | ||
1992 | err = gk20a_flush_cycle_stats_snapshot(ch); | ||
1993 | args->extra = 0; | ||
1994 | break; | ||
1995 | |||
1996 | case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT_CMD_ATTACH: | ||
1997 | err = gk20a_attach_cycle_stats_snapshot(ch, | ||
1998 | args->dmabuf_fd, | ||
1999 | args->extra, | ||
2000 | &args->extra); | ||
2001 | break; | ||
2002 | |||
2003 | case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT_CMD_DETACH: | ||
2004 | err = gk20a_channel_free_cycle_stats_snapshot(ch); | ||
2005 | args->extra = 0; | ||
2006 | break; | ||
2007 | |||
2008 | default: | ||
2009 | pr_err("cyclestats: unknown command %u\n", args->cmd); | ||
2010 | err = -EINVAL; | ||
2011 | break; | ||
2012 | } | ||
2013 | |||
2014 | gk20a_idle(ch->g); | ||
2015 | return err; | ||
2016 | } | ||
2017 | |||
2018 | #endif | ||
2019 | |||
2020 | int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp) | ||
2021 | { | ||
2022 | struct nvgpu_os_linux *l = container_of(inode->i_cdev, | ||
2023 | struct nvgpu_os_linux, dbg.cdev); | ||
2024 | struct gk20a *g = &l->g; | ||
2025 | |||
2026 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
2027 | return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */); | ||
2028 | } | ||
2029 | |||
2030 | long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, | ||
2031 | unsigned long arg) | ||
2032 | { | ||
2033 | struct dbg_session_gk20a_linux *dbg_s_linux = filp->private_data; | ||
2034 | struct dbg_session_gk20a *dbg_s = &dbg_s_linux->dbg_s; | ||
2035 | struct gk20a *g = dbg_s->g; | ||
2036 | u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE]; | ||
2037 | int err = 0; | ||
2038 | |||
2039 | nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " "); | ||
2040 | |||
2041 | if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) || | ||
2042 | (_IOC_NR(cmd) == 0) || | ||
2043 | (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST) || | ||
2044 | (_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE)) | ||
2045 | return -EINVAL; | ||
2046 | |||
2047 | memset(buf, 0, sizeof(buf)); | ||
2048 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | ||
2049 | if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) | ||
2050 | return -EFAULT; | ||
2051 | } | ||
2052 | |||
2053 | if (!g->sw_ready) { | ||
2054 | err = gk20a_busy(g); | ||
2055 | if (err) | ||
2056 | return err; | ||
2057 | |||
2058 | gk20a_idle(g); | ||
2059 | } | ||
2060 | |||
2061 | /* protect from threaded user space calls */ | ||
2062 | nvgpu_mutex_acquire(&dbg_s->ioctl_lock); | ||
2063 | |||
2064 | nvgpu_speculation_barrier(); | ||
2065 | switch (cmd) { | ||
2066 | case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL: | ||
2067 | err = dbg_bind_channel_gk20a(dbg_s, | ||
2068 | (struct nvgpu_dbg_gpu_bind_channel_args *)buf); | ||
2069 | break; | ||
2070 | |||
2071 | case NVGPU_DBG_GPU_IOCTL_REG_OPS: | ||
2072 | err = nvgpu_ioctl_channel_reg_ops(dbg_s, | ||
2073 | (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf); | ||
2074 | break; | ||
2075 | |||
2076 | case NVGPU_DBG_GPU_IOCTL_POWERGATE: | ||
2077 | err = nvgpu_ioctl_powergate_gk20a(dbg_s, | ||
2078 | (struct nvgpu_dbg_gpu_powergate_args *)buf); | ||
2079 | break; | ||
2080 | |||
2081 | case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL: | ||
2082 | err = gk20a_dbg_gpu_events_ctrl(dbg_s, | ||
2083 | (struct nvgpu_dbg_gpu_events_ctrl_args *)buf); | ||
2084 | break; | ||
2085 | |||
2086 | case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE: | ||
2087 | err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s, | ||
2088 | (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf); | ||
2089 | break; | ||
2090 | |||
2091 | case NVGPU_DBG_GPU_IOCTL_HWPM_CTXSW_MODE: | ||
2092 | err = nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(dbg_s, | ||
2093 | (struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *)buf); | ||
2094 | break; | ||
2095 | |||
2096 | case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_ALL_SMS: | ||
2097 | err = nvgpu_dbg_gpu_ioctl_suspend_resume_sm(dbg_s, | ||
2098 | (struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *)buf); | ||
2099 | break; | ||
2100 | |||
2101 | case NVGPU_DBG_GPU_IOCTL_PERFBUF_MAP: | ||
2102 | err = gk20a_perfbuf_map(dbg_s, | ||
2103 | (struct nvgpu_dbg_gpu_perfbuf_map_args *)buf); | ||
2104 | break; | ||
2105 | |||
2106 | case NVGPU_DBG_GPU_IOCTL_PERFBUF_UNMAP: | ||
2107 | err = gk20a_perfbuf_unmap(dbg_s, | ||
2108 | (struct nvgpu_dbg_gpu_perfbuf_unmap_args *)buf); | ||
2109 | break; | ||
2110 | |||
2111 | case NVGPU_DBG_GPU_IOCTL_PC_SAMPLING: | ||
2112 | err = gk20a_dbg_pc_sampling(dbg_s, | ||
2113 | (struct nvgpu_dbg_gpu_pc_sampling_args *)buf); | ||
2114 | break; | ||
2115 | |||
2116 | case NVGPU_DBG_GPU_IOCTL_SET_NEXT_STOP_TRIGGER_TYPE: | ||
2117 | err = nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(dbg_s, | ||
2118 | (struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *)buf); | ||
2119 | break; | ||
2120 | |||
2121 | case NVGPU_DBG_GPU_IOCTL_TIMEOUT: | ||
2122 | err = nvgpu_dbg_gpu_ioctl_timeout(dbg_s, | ||
2123 | (struct nvgpu_dbg_gpu_timeout_args *)buf); | ||
2124 | break; | ||
2125 | |||
2126 | case NVGPU_DBG_GPU_IOCTL_GET_TIMEOUT: | ||
2127 | nvgpu_dbg_gpu_ioctl_get_timeout(dbg_s, | ||
2128 | (struct nvgpu_dbg_gpu_timeout_args *)buf); | ||
2129 | break; | ||
2130 | |||
2131 | case NVGPU_DBG_GPU_IOCTL_READ_SINGLE_SM_ERROR_STATE: | ||
2132 | err = nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(dbg_s, | ||
2133 | (struct nvgpu_dbg_gpu_read_single_sm_error_state_args *)buf); | ||
2134 | break; | ||
2135 | |||
2136 | case NVGPU_DBG_GPU_IOCTL_CLEAR_SINGLE_SM_ERROR_STATE: | ||
2137 | err = nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(dbg_s, | ||
2138 | (struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *)buf); | ||
2139 | break; | ||
2140 | |||
2141 | case NVGPU_DBG_GPU_IOCTL_UNBIND_CHANNEL: | ||
2142 | err = dbg_unbind_channel_gk20a(dbg_s, | ||
2143 | (struct nvgpu_dbg_gpu_unbind_channel_args *)buf); | ||
2144 | break; | ||
2145 | |||
2146 | case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_CONTEXTS: | ||
2147 | err = nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(dbg_s, | ||
2148 | (struct nvgpu_dbg_gpu_suspend_resume_contexts_args *)buf); | ||
2149 | break; | ||
2150 | |||
2151 | case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY: | ||
2152 | err = nvgpu_dbg_gpu_ioctl_access_fb_memory(dbg_s, | ||
2153 | (struct nvgpu_dbg_gpu_access_fb_memory_args *)buf); | ||
2154 | break; | ||
2155 | |||
2156 | case NVGPU_DBG_GPU_IOCTL_PROFILER_ALLOCATE: | ||
2157 | err = nvgpu_ioctl_allocate_profiler_object(dbg_s_linux, | ||
2158 | (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf); | ||
2159 | break; | ||
2160 | |||
2161 | case NVGPU_DBG_GPU_IOCTL_PROFILER_FREE: | ||
2162 | err = nvgpu_ioctl_free_profiler_object(dbg_s_linux, | ||
2163 | (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf); | ||
2164 | break; | ||
2165 | |||
2166 | case NVGPU_DBG_GPU_IOCTL_PROFILER_RESERVE: | ||
2167 | err = nvgpu_ioctl_profiler_reserve(dbg_s, | ||
2168 | (struct nvgpu_dbg_gpu_profiler_reserve_args *)buf); | ||
2169 | break; | ||
2170 | |||
2171 | case NVGPU_DBG_GPU_IOCTL_SET_SM_EXCEPTION_TYPE_MASK: | ||
2172 | err = nvgpu_dbg_gpu_set_sm_exception_type_mask(dbg_s, | ||
2173 | (struct nvgpu_dbg_gpu_set_sm_exception_type_mask_args *)buf); | ||
2174 | break; | ||
2175 | |||
2176 | case NVGPU_DBG_GPU_IOCTL_SET_CTX_MMU_DEBUG_MODE: | ||
2177 | err = nvgpu_dbg_gpu_ioctl_set_mmu_debug_mode(dbg_s, | ||
2178 | (struct nvgpu_dbg_gpu_set_ctx_mmu_debug_mode_args *)buf); | ||
2179 | break; | ||
2180 | |||
2181 | #ifdef CONFIG_GK20A_CYCLE_STATS | ||
2182 | case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS: | ||
2183 | err = nvgpu_dbg_gpu_cycle_stats(dbg_s, | ||
2184 | (struct nvgpu_dbg_gpu_cycle_stats_args *)buf); | ||
2185 | break; | ||
2186 | |||
2187 | case NVGPU_DBG_GPU_IOCTL_CYCLE_STATS_SNAPSHOT: | ||
2188 | err = nvgpu_dbg_gpu_cycle_stats_snapshot(dbg_s, | ||
2189 | (struct nvgpu_dbg_gpu_cycle_stats_snapshot_args *)buf); | ||
2190 | break; | ||
2191 | #endif | ||
2192 | |||
2193 | default: | ||
2194 | nvgpu_err(g, | ||
2195 | "unrecognized dbg gpu ioctl cmd: 0x%x", | ||
2196 | cmd); | ||
2197 | err = -ENOTTY; | ||
2198 | break; | ||
2199 | } | ||
2200 | |||
2201 | nvgpu_mutex_release(&dbg_s->ioctl_lock); | ||
2202 | |||
2203 | nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err); | ||
2204 | |||
2205 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | ||
2206 | err = copy_to_user((void __user *)arg, | ||
2207 | buf, _IOC_SIZE(cmd)); | ||
2208 | |||
2209 | return err; | ||
2210 | } | ||