diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_dbg.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/ioctl_dbg.c | 1751 |
1 files changed, 1751 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c new file mode 100644 index 00000000..304fd71f --- /dev/null +++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c | |||
@@ -0,0 +1,1751 @@ | |||
1 | /* | ||
2 | * Tegra GK20A GPU Debugger/Profiler Driver | ||
3 | * | ||
4 | * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/fs.h> | ||
20 | #include <linux/file.h> | ||
21 | #include <linux/cdev.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/dma-buf.h> | ||
24 | #include <uapi/linux/nvgpu.h> | ||
25 | |||
26 | #include <nvgpu/kmem.h> | ||
27 | #include <nvgpu/log.h> | ||
28 | #include <nvgpu/vm.h> | ||
29 | #include <nvgpu/atomic.h> | ||
30 | #include <nvgpu/cond.h> | ||
31 | |||
32 | #include <nvgpu/linux/vidmem.h> | ||
33 | #include <nvgpu/linux/vm.h> | ||
34 | |||
35 | #include "gk20a/gk20a.h" | ||
36 | #include "gk20a/gr_gk20a.h" | ||
37 | #include "gk20a/regops_gk20a.h" | ||
38 | #include "gk20a/dbg_gpu_gk20a.h" | ||
39 | #include "os_linux.h" | ||
40 | #include "platform_gk20a.h" | ||
41 | #include "ioctl_dbg.h" | ||
42 | |||
43 | |||
44 | /* silly allocator - just increment id */ | ||
45 | static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0); | ||
46 | static int generate_unique_id(void) | ||
47 | { | ||
48 | return nvgpu_atomic_add_return(1, &unique_id); | ||
49 | } | ||
50 | |||
51 | static int alloc_profiler(struct gk20a *g, | ||
52 | struct dbg_profiler_object_data **_prof) | ||
53 | { | ||
54 | struct dbg_profiler_object_data *prof; | ||
55 | *_prof = NULL; | ||
56 | |||
57 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
58 | |||
59 | prof = nvgpu_kzalloc(g, sizeof(*prof)); | ||
60 | if (!prof) | ||
61 | return -ENOMEM; | ||
62 | |||
63 | prof->prof_handle = generate_unique_id(); | ||
64 | *_prof = prof; | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_s_linux) | ||
69 | { | ||
70 | struct dbg_session_gk20a_linux *dbg_s_linux; | ||
71 | *_dbg_s_linux = NULL; | ||
72 | |||
73 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
74 | |||
75 | dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux)); | ||
76 | if (!dbg_s_linux) | ||
77 | return -ENOMEM; | ||
78 | |||
79 | dbg_s_linux->dbg_s.id = generate_unique_id(); | ||
80 | *_dbg_s_linux = dbg_s_linux; | ||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s, | ||
85 | struct gr_gk20a *gr); | ||
86 | |||
87 | static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset); | ||
88 | |||
89 | static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | ||
90 | struct nvgpu_dbg_gpu_exec_reg_ops_args *args); | ||
91 | |||
92 | static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, | ||
93 | struct nvgpu_dbg_gpu_powergate_args *args); | ||
94 | |||
95 | static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
96 | struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args); | ||
97 | |||
98 | static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
99 | struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args); | ||
100 | |||
101 | static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | ||
102 | struct dbg_session_gk20a *dbg_s, | ||
103 | struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args); | ||
104 | |||
105 | static int nvgpu_ioctl_allocate_profiler_object(struct dbg_session_gk20a_linux *dbg_s, | ||
106 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args); | ||
107 | |||
108 | static int nvgpu_ioctl_free_profiler_object(struct dbg_session_gk20a_linux *dbg_s_linux, | ||
109 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args); | ||
110 | |||
111 | static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s, | ||
112 | struct nvgpu_dbg_gpu_profiler_reserve_args *args); | ||
113 | |||
114 | static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, | ||
115 | struct nvgpu_dbg_gpu_perfbuf_map_args *args); | ||
116 | |||
117 | static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s, | ||
118 | struct nvgpu_dbg_gpu_perfbuf_unmap_args *args); | ||
119 | |||
120 | static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s, | ||
121 | int timeout_mode); | ||
122 | |||
123 | static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | ||
124 | u32 profiler_handle); | ||
125 | |||
126 | static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s); | ||
127 | |||
128 | static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s); | ||
129 | |||
130 | static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, | ||
131 | u32 profiler_handle); | ||
132 | |||
133 | static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s); | ||
134 | |||
135 | static int gk20a_dbg_gpu_do_dev_open(struct inode *inode, | ||
136 | struct file *filp, bool is_profiler); | ||
137 | |||
138 | unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait) | ||
139 | { | ||
140 | unsigned int mask = 0; | ||
141 | struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data; | ||
142 | struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; | ||
143 | |||
144 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
145 | |||
146 | poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait); | ||
147 | |||
148 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
149 | |||
150 | if (dbg_s->dbg_events.events_enabled && | ||
151 | dbg_s->dbg_events.num_pending_events > 0) { | ||
152 | gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d", | ||
153 | dbg_s->id); | ||
154 | gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", | ||
155 | dbg_s->dbg_events.num_pending_events); | ||
156 | mask = (POLLPRI | POLLIN); | ||
157 | } | ||
158 | |||
159 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
160 | |||
161 | return mask; | ||
162 | } | ||
163 | |||
164 | int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp) | ||
165 | { | ||
166 | struct dbg_session_gk20a_linux *dbg_session_linux = filp->private_data; | ||
167 | struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; | ||
168 | struct gk20a *g = dbg_s->g; | ||
169 | struct dbg_profiler_object_data *prof_obj, *tmp_obj; | ||
170 | |||
171 | gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name); | ||
172 | |||
173 | /* unbind channels */ | ||
174 | dbg_unbind_all_channels_gk20a(dbg_s); | ||
175 | |||
176 | /* Powergate/Timeout enable is called here as possibility of dbg_session | ||
177 | * which called powergate/timeout disable ioctl, to be killed without | ||
178 | * calling powergate/timeout enable ioctl | ||
179 | */ | ||
180 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
181 | g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, false); | ||
182 | nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE); | ||
183 | |||
184 | /* If this session owned the perf buffer, release it */ | ||
185 | if (g->perfbuf.owner == dbg_s) | ||
186 | gk20a_perfbuf_release_locked(g, g->perfbuf.offset); | ||
187 | |||
188 | /* Per-context profiler objects were released when we called | ||
189 | * dbg_unbind_all_channels. We could still have global ones. | ||
190 | */ | ||
191 | nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, | ||
192 | dbg_profiler_object_data, prof_obj_entry) { | ||
193 | if (prof_obj->session_id == dbg_s->id) { | ||
194 | if (prof_obj->has_reservation) | ||
195 | g->ops.dbg_session_ops. | ||
196 | release_profiler_reservation(dbg_s, prof_obj); | ||
197 | nvgpu_list_del(&prof_obj->prof_obj_entry); | ||
198 | nvgpu_kfree(g, prof_obj); | ||
199 | } | ||
200 | } | ||
201 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
202 | |||
203 | nvgpu_mutex_destroy(&dbg_s->ch_list_lock); | ||
204 | nvgpu_mutex_destroy(&dbg_s->ioctl_lock); | ||
205 | |||
206 | nvgpu_kfree(g, dbg_session_linux); | ||
207 | gk20a_put(g); | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp) | ||
213 | { | ||
214 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
215 | return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */); | ||
216 | } | ||
217 | |||
218 | static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s, | ||
219 | struct nvgpu_dbg_gpu_timeout_args *args) | ||
220 | { | ||
221 | int err; | ||
222 | struct gk20a *g = dbg_s->g; | ||
223 | |||
224 | gk20a_dbg_fn("powergate mode = %d", args->enable); | ||
225 | |||
226 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
227 | err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); | ||
228 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
229 | |||
230 | return err; | ||
231 | } | ||
232 | |||
233 | static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state( | ||
234 | struct dbg_session_gk20a *dbg_s, | ||
235 | struct nvgpu_dbg_gpu_write_single_sm_error_state_args *args) | ||
236 | { | ||
237 | struct gk20a *g = dbg_s->g; | ||
238 | struct gr_gk20a *gr = &g->gr; | ||
239 | u32 sm_id; | ||
240 | struct channel_gk20a *ch; | ||
241 | struct nvgpu_dbg_gpu_sm_error_state_record sm_error_state_record; | ||
242 | struct nvgpu_gr_sm_error_state sm_error_state; | ||
243 | int err = 0; | ||
244 | |||
245 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
246 | if (!ch) | ||
247 | return -EINVAL; | ||
248 | |||
249 | sm_id = args->sm_id; | ||
250 | if (sm_id >= gr->no_of_sm) | ||
251 | return -EINVAL; | ||
252 | |||
253 | if (args->sm_error_state_record_size > 0) { | ||
254 | size_t read_size = sizeof(sm_error_state_record); | ||
255 | |||
256 | if (read_size > args->sm_error_state_record_size) | ||
257 | read_size = args->sm_error_state_record_size; | ||
258 | |||
259 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
260 | err = copy_from_user(&sm_error_state_record, | ||
261 | (void __user *)(uintptr_t) | ||
262 | args->sm_error_state_record_mem, | ||
263 | read_size); | ||
264 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
265 | if (err) | ||
266 | return -ENOMEM; | ||
267 | } | ||
268 | |||
269 | err = gk20a_busy(g); | ||
270 | if (err) | ||
271 | return err; | ||
272 | |||
273 | sm_error_state.hww_global_esr = | ||
274 | sm_error_state_record.hww_global_esr; | ||
275 | sm_error_state.hww_warp_esr = | ||
276 | sm_error_state_record.hww_warp_esr; | ||
277 | sm_error_state.hww_warp_esr_pc = | ||
278 | sm_error_state_record.hww_warp_esr_pc; | ||
279 | sm_error_state.hww_global_esr_report_mask = | ||
280 | sm_error_state_record.hww_global_esr_report_mask; | ||
281 | sm_error_state.hww_warp_esr_report_mask = | ||
282 | sm_error_state_record.hww_warp_esr_report_mask; | ||
283 | |||
284 | err = gr_gk20a_elpg_protected_call(g, | ||
285 | g->ops.gr.update_sm_error_state(g, ch, | ||
286 | sm_id, &sm_error_state)); | ||
287 | |||
288 | gk20a_idle(g); | ||
289 | |||
290 | return err; | ||
291 | } | ||
292 | |||
293 | |||
294 | static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state( | ||
295 | struct dbg_session_gk20a *dbg_s, | ||
296 | struct nvgpu_dbg_gpu_read_single_sm_error_state_args *args) | ||
297 | { | ||
298 | struct gk20a *g = dbg_s->g; | ||
299 | struct gr_gk20a *gr = &g->gr; | ||
300 | struct nvgpu_gr_sm_error_state *sm_error_state; | ||
301 | struct nvgpu_dbg_gpu_sm_error_state_record sm_error_state_record; | ||
302 | u32 sm_id; | ||
303 | int err = 0; | ||
304 | |||
305 | sm_id = args->sm_id; | ||
306 | if (sm_id >= gr->no_of_sm) | ||
307 | return -EINVAL; | ||
308 | |||
309 | sm_error_state = gr->sm_error_states + sm_id; | ||
310 | sm_error_state_record.hww_global_esr = | ||
311 | sm_error_state->hww_global_esr; | ||
312 | sm_error_state_record.hww_warp_esr = | ||
313 | sm_error_state->hww_warp_esr; | ||
314 | sm_error_state_record.hww_warp_esr_pc = | ||
315 | sm_error_state->hww_warp_esr_pc; | ||
316 | sm_error_state_record.hww_global_esr_report_mask = | ||
317 | sm_error_state->hww_global_esr_report_mask; | ||
318 | sm_error_state_record.hww_warp_esr_report_mask = | ||
319 | sm_error_state->hww_warp_esr_report_mask; | ||
320 | |||
321 | if (args->sm_error_state_record_size > 0) { | ||
322 | size_t write_size = sizeof(*sm_error_state); | ||
323 | |||
324 | if (write_size > args->sm_error_state_record_size) | ||
325 | write_size = args->sm_error_state_record_size; | ||
326 | |||
327 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
328 | err = copy_to_user((void __user *)(uintptr_t) | ||
329 | args->sm_error_state_record_mem, | ||
330 | &sm_error_state_record, | ||
331 | write_size); | ||
332 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
333 | if (err) { | ||
334 | nvgpu_err(g, "copy_to_user failed!"); | ||
335 | return err; | ||
336 | } | ||
337 | |||
338 | args->sm_error_state_record_size = write_size; | ||
339 | } | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | |||
345 | static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type( | ||
346 | struct dbg_session_gk20a *dbg_s, | ||
347 | struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args) | ||
348 | { | ||
349 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
350 | |||
351 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
352 | |||
353 | dbg_s->broadcast_stop_trigger = (args->broadcast != 0); | ||
354 | |||
355 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
356 | |||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s, | ||
361 | int timeout_mode) | ||
362 | { | ||
363 | struct gk20a *g = dbg_s->g; | ||
364 | int err = 0; | ||
365 | |||
366 | gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d", | ||
367 | timeout_mode); | ||
368 | |||
369 | switch (timeout_mode) { | ||
370 | case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE: | ||
371 | if (dbg_s->is_timeout_disabled && | ||
372 | --g->dbg_timeout_disabled_refcount == 0) { | ||
373 | g->timeouts_enabled = true; | ||
374 | } | ||
375 | dbg_s->is_timeout_disabled = false; | ||
376 | break; | ||
377 | |||
378 | case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE: | ||
379 | if ((dbg_s->is_timeout_disabled == false) && | ||
380 | (g->dbg_timeout_disabled_refcount++ == 0)) { | ||
381 | g->timeouts_enabled = false; | ||
382 | } | ||
383 | dbg_s->is_timeout_disabled = true; | ||
384 | break; | ||
385 | |||
386 | default: | ||
387 | nvgpu_err(g, | ||
388 | "unrecognized dbg gpu timeout mode : 0x%x", | ||
389 | timeout_mode); | ||
390 | err = -EINVAL; | ||
391 | break; | ||
392 | } | ||
393 | |||
394 | gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s", | ||
395 | g->timeouts_enabled ? "Yes" : "No"); | ||
396 | |||
397 | return err; | ||
398 | } | ||
399 | |||
400 | static int gk20a_dbg_gpu_do_dev_open(struct inode *inode, | ||
401 | struct file *filp, bool is_profiler) | ||
402 | { | ||
403 | struct nvgpu_os_linux *l; | ||
404 | struct dbg_session_gk20a_linux *dbg_session_linux; | ||
405 | struct dbg_session_gk20a *dbg_s; | ||
406 | struct gk20a *g; | ||
407 | |||
408 | struct device *dev; | ||
409 | |||
410 | int err; | ||
411 | |||
412 | if (!is_profiler) | ||
413 | l = container_of(inode->i_cdev, | ||
414 | struct nvgpu_os_linux, dbg.cdev); | ||
415 | else | ||
416 | l = container_of(inode->i_cdev, | ||
417 | struct nvgpu_os_linux, prof.cdev); | ||
418 | g = gk20a_get(&l->g); | ||
419 | if (!g) | ||
420 | return -ENODEV; | ||
421 | |||
422 | dev = dev_from_gk20a(g); | ||
423 | |||
424 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name); | ||
425 | |||
426 | err = alloc_session(g, &dbg_session_linux); | ||
427 | if (err) | ||
428 | goto free_ref; | ||
429 | |||
430 | dbg_s = &dbg_session_linux->dbg_s; | ||
431 | |||
432 | filp->private_data = dbg_session_linux; | ||
433 | dbg_session_linux->dev = dev; | ||
434 | dbg_s->g = g; | ||
435 | dbg_s->is_profiler = is_profiler; | ||
436 | dbg_s->is_pg_disabled = false; | ||
437 | dbg_s->is_timeout_disabled = false; | ||
438 | |||
439 | nvgpu_cond_init(&dbg_s->dbg_events.wait_queue); | ||
440 | nvgpu_init_list_node(&dbg_s->ch_list); | ||
441 | err = nvgpu_mutex_init(&dbg_s->ch_list_lock); | ||
442 | if (err) | ||
443 | goto err_free_session; | ||
444 | err = nvgpu_mutex_init(&dbg_s->ioctl_lock); | ||
445 | if (err) | ||
446 | goto err_destroy_lock; | ||
447 | dbg_s->dbg_events.events_enabled = false; | ||
448 | dbg_s->dbg_events.num_pending_events = 0; | ||
449 | |||
450 | return 0; | ||
451 | |||
452 | err_destroy_lock: | ||
453 | nvgpu_mutex_destroy(&dbg_s->ch_list_lock); | ||
454 | err_free_session: | ||
455 | nvgpu_kfree(g, dbg_session_linux); | ||
456 | free_ref: | ||
457 | gk20a_put(g); | ||
458 | return err; | ||
459 | } | ||
460 | |||
461 | static int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
462 | struct dbg_session_channel_data *ch_data) | ||
463 | { | ||
464 | struct gk20a *g = dbg_s->g; | ||
465 | int chid; | ||
466 | struct dbg_session_data *session_data; | ||
467 | struct dbg_profiler_object_data *prof_obj, *tmp_obj; | ||
468 | struct dbg_session_channel_data_linux *ch_data_linux; | ||
469 | |||
470 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
471 | |||
472 | chid = ch_data->chid; | ||
473 | |||
474 | /* If there's a profiler ctx reservation record associated with this | ||
475 | * session/channel pair, release it. | ||
476 | */ | ||
477 | nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, | ||
478 | dbg_profiler_object_data, prof_obj_entry) { | ||
479 | if ((prof_obj->session_id == dbg_s->id) && | ||
480 | (prof_obj->ch->chid == chid)) { | ||
481 | if (prof_obj->has_reservation) { | ||
482 | g->ops.dbg_session_ops. | ||
483 | release_profiler_reservation(dbg_s, prof_obj); | ||
484 | } | ||
485 | nvgpu_list_del(&prof_obj->prof_obj_entry); | ||
486 | nvgpu_kfree(g, prof_obj); | ||
487 | } | ||
488 | } | ||
489 | |||
490 | nvgpu_list_del(&ch_data->ch_entry); | ||
491 | |||
492 | session_data = ch_data->session_data; | ||
493 | nvgpu_list_del(&session_data->dbg_s_entry); | ||
494 | nvgpu_kfree(dbg_s->g, session_data); | ||
495 | |||
496 | ch_data_linux = container_of(ch_data, struct dbg_session_channel_data_linux, | ||
497 | ch_data); | ||
498 | |||
499 | fput(ch_data_linux->ch_f); | ||
500 | nvgpu_kfree(dbg_s->g, ch_data_linux); | ||
501 | |||
502 | return 0; | ||
503 | } | ||
504 | |||
505 | static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
506 | struct nvgpu_dbg_gpu_bind_channel_args *args) | ||
507 | { | ||
508 | struct file *f; | ||
509 | struct gk20a *g = dbg_s->g; | ||
510 | struct channel_gk20a *ch; | ||
511 | struct dbg_session_channel_data_linux *ch_data_linux; | ||
512 | struct dbg_session_data *session_data; | ||
513 | int err = 0; | ||
514 | |||
515 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", | ||
516 | g->name, args->channel_fd); | ||
517 | |||
518 | /* | ||
519 | * Although gk20a_get_channel_from_file gives us a channel ref, need to | ||
520 | * hold a ref to the file during the session lifetime. See comment in | ||
521 | * struct dbg_session_channel_data. | ||
522 | */ | ||
523 | f = fget(args->channel_fd); | ||
524 | if (!f) | ||
525 | return -ENODEV; | ||
526 | |||
527 | ch = gk20a_get_channel_from_file(args->channel_fd); | ||
528 | if (!ch) { | ||
529 | gk20a_dbg_fn("no channel found for fd"); | ||
530 | err = -EINVAL; | ||
531 | goto out_fput; | ||
532 | } | ||
533 | |||
534 | gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid); | ||
535 | |||
536 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
537 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | ||
538 | |||
539 | ch_data_linux = nvgpu_kzalloc(g, sizeof(*ch_data_linux)); | ||
540 | if (!ch_data_linux) { | ||
541 | err = -ENOMEM; | ||
542 | goto out_chput; | ||
543 | } | ||
544 | ch_data_linux->ch_f = f; | ||
545 | ch_data_linux->ch_data.channel_fd = args->channel_fd; | ||
546 | ch_data_linux->ch_data.chid = ch->chid; | ||
547 | ch_data_linux->ch_data.unbind_single_channel = dbg_unbind_single_channel_gk20a; | ||
548 | nvgpu_init_list_node(&ch_data_linux->ch_data.ch_entry); | ||
549 | |||
550 | session_data = nvgpu_kzalloc(g, sizeof(*session_data)); | ||
551 | if (!session_data) { | ||
552 | err = -ENOMEM; | ||
553 | goto out_kfree; | ||
554 | } | ||
555 | session_data->dbg_s = dbg_s; | ||
556 | nvgpu_init_list_node(&session_data->dbg_s_entry); | ||
557 | ch_data_linux->ch_data.session_data = session_data; | ||
558 | |||
559 | nvgpu_list_add(&session_data->dbg_s_entry, &ch->dbg_s_list); | ||
560 | |||
561 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
562 | nvgpu_list_add_tail(&ch_data_linux->ch_data.ch_entry, &dbg_s->ch_list); | ||
563 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
564 | |||
565 | nvgpu_mutex_release(&ch->dbg_s_lock); | ||
566 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
567 | |||
568 | gk20a_channel_put(ch); | ||
569 | |||
570 | return 0; | ||
571 | |||
572 | out_kfree: | ||
573 | nvgpu_kfree(g, ch_data_linux); | ||
574 | out_chput: | ||
575 | gk20a_channel_put(ch); | ||
576 | nvgpu_mutex_release(&ch->dbg_s_lock); | ||
577 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
578 | out_fput: | ||
579 | fput(f); | ||
580 | return err; | ||
581 | } | ||
582 | |||
583 | static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s) | ||
584 | { | ||
585 | struct dbg_session_channel_data *ch_data, *tmp; | ||
586 | struct gk20a *g = dbg_s->g; | ||
587 | |||
588 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
589 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
590 | nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, | ||
591 | dbg_session_channel_data, ch_entry) | ||
592 | ch_data->unbind_single_channel(dbg_s, ch_data); | ||
593 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
594 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | ||
600 | struct nvgpu_dbg_gpu_exec_reg_ops_args *args) | ||
601 | { | ||
602 | int err = 0, powergate_err = 0; | ||
603 | bool is_pg_disabled = false; | ||
604 | |||
605 | struct gk20a *g = dbg_s->g; | ||
606 | struct channel_gk20a *ch; | ||
607 | |||
608 | gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); | ||
609 | |||
610 | if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) { | ||
611 | nvgpu_err(g, "regops limit exceeded"); | ||
612 | return -EINVAL; | ||
613 | } | ||
614 | |||
615 | if (args->num_ops == 0) { | ||
616 | /* Nothing to do */ | ||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) { | ||
621 | nvgpu_err(g, "reg ops work buffer not allocated"); | ||
622 | return -ENODEV; | ||
623 | } | ||
624 | |||
625 | if (!dbg_s->id) { | ||
626 | nvgpu_err(g, "can't call reg_ops on an unbound debugger session"); | ||
627 | return -EINVAL; | ||
628 | } | ||
629 | |||
630 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
631 | if (!dbg_s->is_profiler && !ch) { | ||
632 | nvgpu_err(g, "bind a channel before regops for a debugging session"); | ||
633 | return -EINVAL; | ||
634 | } | ||
635 | |||
636 | /* be sure that ctx info is in place */ | ||
637 | if (!g->is_virtual && | ||
638 | !gr_context_info_available(dbg_s, &g->gr)) { | ||
639 | nvgpu_err(g, "gr context data not available"); | ||
640 | return -ENODEV; | ||
641 | } | ||
642 | |||
643 | /* since exec_reg_ops sends methods to the ucode, it must take the | ||
644 | * global gpu lock to protect against mixing methods from debug sessions | ||
645 | * on other channels */ | ||
646 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
647 | |||
648 | if (!dbg_s->is_pg_disabled && !g->is_virtual) { | ||
649 | /* In the virtual case, the server will handle | ||
650 | * disabling/enabling powergating when processing reg ops | ||
651 | */ | ||
652 | powergate_err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, | ||
653 | true); | ||
654 | is_pg_disabled = true; | ||
655 | } | ||
656 | |||
657 | if (!powergate_err) { | ||
658 | u64 ops_offset = 0; /* index offset */ | ||
659 | |||
660 | while (ops_offset < args->num_ops && !err) { | ||
661 | const u64 num_ops = | ||
662 | min(args->num_ops - ops_offset, | ||
663 | (u64)(g->dbg_regops_tmp_buf_ops)); | ||
664 | const u64 fragment_size = | ||
665 | num_ops * sizeof(g->dbg_regops_tmp_buf[0]); | ||
666 | |||
667 | void __user *const fragment = | ||
668 | (void __user *)(uintptr_t) | ||
669 | (args->ops + | ||
670 | ops_offset * sizeof(g->dbg_regops_tmp_buf[0])); | ||
671 | |||
672 | gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu", | ||
673 | ops_offset, num_ops); | ||
674 | |||
675 | gk20a_dbg_fn("Copying regops from userspace"); | ||
676 | |||
677 | if (copy_from_user(g->dbg_regops_tmp_buf, | ||
678 | fragment, fragment_size)) { | ||
679 | nvgpu_err(g, "copy_from_user failed!"); | ||
680 | err = -EFAULT; | ||
681 | break; | ||
682 | } | ||
683 | |||
684 | err = g->ops.dbg_session_ops.exec_reg_ops( | ||
685 | dbg_s, g->dbg_regops_tmp_buf, num_ops); | ||
686 | |||
687 | gk20a_dbg_fn("Copying result to userspace"); | ||
688 | |||
689 | if (copy_to_user(fragment, g->dbg_regops_tmp_buf, | ||
690 | fragment_size)) { | ||
691 | nvgpu_err(g, "copy_to_user failed!"); | ||
692 | err = -EFAULT; | ||
693 | break; | ||
694 | } | ||
695 | |||
696 | ops_offset += num_ops; | ||
697 | } | ||
698 | |||
699 | /* enable powergate, if previously disabled */ | ||
700 | if (is_pg_disabled) { | ||
701 | powergate_err = | ||
702 | g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, | ||
703 | false); | ||
704 | } | ||
705 | } | ||
706 | |||
707 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
708 | |||
709 | if (!err && powergate_err) | ||
710 | err = powergate_err; | ||
711 | |||
712 | if (err) | ||
713 | nvgpu_err(g, "dbg regops failed"); | ||
714 | |||
715 | return err; | ||
716 | } | ||
717 | |||
718 | static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, | ||
719 | struct nvgpu_dbg_gpu_powergate_args *args) | ||
720 | { | ||
721 | int err; | ||
722 | struct gk20a *g = dbg_s->g; | ||
723 | gk20a_dbg_fn("%s powergate mode = %d", | ||
724 | g->name, args->mode); | ||
725 | |||
726 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
727 | if (args->mode == NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE) { | ||
728 | err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, true); | ||
729 | } else if (args->mode == NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE) { | ||
730 | err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, false); | ||
731 | } else { | ||
732 | nvgpu_err(g, "invalid powergate mode"); | ||
733 | err = -EINVAL; | ||
734 | } | ||
735 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
736 | return err; | ||
737 | } | ||
738 | |||
739 | static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
740 | struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args) | ||
741 | { | ||
742 | int err; | ||
743 | struct gk20a *g = dbg_s->g; | ||
744 | struct channel_gk20a *ch_gk20a; | ||
745 | |||
746 | gk20a_dbg_fn("%s smpc ctxsw mode = %d", | ||
747 | g->name, args->mode); | ||
748 | |||
749 | err = gk20a_busy(g); | ||
750 | if (err) { | ||
751 | nvgpu_err(g, "failed to poweron"); | ||
752 | return err; | ||
753 | } | ||
754 | |||
755 | /* Take the global lock, since we'll be doing global regops */ | ||
756 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
757 | |||
758 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
759 | if (!ch_gk20a) { | ||
760 | nvgpu_err(g, | ||
761 | "no bound channel for smpc ctxsw mode update"); | ||
762 | err = -EINVAL; | ||
763 | goto clean_up; | ||
764 | } | ||
765 | |||
766 | err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a, | ||
767 | args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); | ||
768 | if (err) { | ||
769 | nvgpu_err(g, | ||
770 | "error (%d) during smpc ctxsw mode update", err); | ||
771 | goto clean_up; | ||
772 | } | ||
773 | |||
774 | err = g->ops.regops.apply_smpc_war(dbg_s); | ||
775 | clean_up: | ||
776 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
777 | gk20a_idle(g); | ||
778 | return err; | ||
779 | } | ||
780 | |||
781 | static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
782 | struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args) | ||
783 | { | ||
784 | int err; | ||
785 | struct gk20a *g = dbg_s->g; | ||
786 | struct channel_gk20a *ch_gk20a; | ||
787 | |||
788 | gk20a_dbg_fn("%s pm ctxsw mode = %d", | ||
789 | g->name, args->mode); | ||
790 | |||
791 | /* Must have a valid reservation to enable/disable hwpm cxtsw. | ||
792 | * Just print an error message for now, but eventually this should | ||
793 | * return an error, at the point where all client sw has been | ||
794 | * cleaned up. | ||
795 | */ | ||
796 | if (!dbg_s->has_profiler_reservation) { | ||
797 | nvgpu_err(g, | ||
798 | "session doesn't have a valid reservation"); | ||
799 | } | ||
800 | |||
801 | err = gk20a_busy(g); | ||
802 | if (err) { | ||
803 | nvgpu_err(g, "failed to poweron"); | ||
804 | return err; | ||
805 | } | ||
806 | |||
807 | /* Take the global lock, since we'll be doing global regops */ | ||
808 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
809 | |||
810 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
811 | if (!ch_gk20a) { | ||
812 | nvgpu_err(g, | ||
813 | "no bound channel for pm ctxsw mode update"); | ||
814 | err = -EINVAL; | ||
815 | goto clean_up; | ||
816 | } | ||
817 | |||
818 | err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a, | ||
819 | args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW); | ||
820 | if (err) | ||
821 | nvgpu_err(g, | ||
822 | "error (%d) during pm ctxsw mode update", err); | ||
823 | |||
824 | /* gk20a would require a WAR to set the core PM_ENABLE bit, not | ||
825 | * added here with gk20a being deprecated | ||
826 | */ | ||
827 | clean_up: | ||
828 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
829 | gk20a_idle(g); | ||
830 | return err; | ||
831 | } | ||
832 | |||
833 | static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | ||
834 | struct dbg_session_gk20a *dbg_s, | ||
835 | struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args) | ||
836 | { | ||
837 | struct gk20a *g = dbg_s->g; | ||
838 | struct channel_gk20a *ch; | ||
839 | int err = 0, action = args->mode; | ||
840 | |||
841 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); | ||
842 | |||
843 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
844 | if (!ch) | ||
845 | return -EINVAL; | ||
846 | |||
847 | err = gk20a_busy(g); | ||
848 | if (err) { | ||
849 | nvgpu_err(g, "failed to poweron"); | ||
850 | return err; | ||
851 | } | ||
852 | |||
853 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
854 | |||
855 | /* Suspend GPU context switching */ | ||
856 | err = gr_gk20a_disable_ctxsw(g); | ||
857 | if (err) { | ||
858 | nvgpu_err(g, "unable to stop gr ctxsw"); | ||
859 | /* this should probably be ctx-fatal... */ | ||
860 | goto clean_up; | ||
861 | } | ||
862 | |||
863 | switch (action) { | ||
864 | case NVGPU_DBG_GPU_SUSPEND_ALL_SMS: | ||
865 | gr_gk20a_suspend_context(ch); | ||
866 | break; | ||
867 | |||
868 | case NVGPU_DBG_GPU_RESUME_ALL_SMS: | ||
869 | gr_gk20a_resume_context(ch); | ||
870 | break; | ||
871 | } | ||
872 | |||
873 | err = gr_gk20a_enable_ctxsw(g); | ||
874 | if (err) | ||
875 | nvgpu_err(g, "unable to restart ctxsw!"); | ||
876 | |||
877 | clean_up: | ||
878 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
879 | gk20a_idle(g); | ||
880 | |||
881 | return err; | ||
882 | } | ||
883 | |||
884 | static int nvgpu_ioctl_allocate_profiler_object( | ||
885 | struct dbg_session_gk20a_linux *dbg_session_linux, | ||
886 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args) | ||
887 | { | ||
888 | int err = 0; | ||
889 | struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; | ||
890 | struct gk20a *g = get_gk20a(dbg_session_linux->dev); | ||
891 | struct dbg_profiler_object_data *prof_obj; | ||
892 | |||
893 | gk20a_dbg_fn("%s", g->name); | ||
894 | |||
895 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
896 | |||
897 | err = alloc_profiler(g, &prof_obj); | ||
898 | if (err) | ||
899 | goto clean_up; | ||
900 | |||
901 | prof_obj->session_id = dbg_s->id; | ||
902 | |||
903 | if (dbg_s->is_profiler) | ||
904 | prof_obj->ch = NULL; | ||
905 | else { | ||
906 | prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
907 | if (prof_obj->ch == NULL) { | ||
908 | nvgpu_err(g, | ||
909 | "bind a channel for dbg session"); | ||
910 | nvgpu_kfree(g, prof_obj); | ||
911 | err = -EINVAL; | ||
912 | goto clean_up; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | /* Return handle to client */ | ||
917 | args->profiler_handle = prof_obj->prof_handle; | ||
918 | |||
919 | nvgpu_init_list_node(&prof_obj->prof_obj_entry); | ||
920 | |||
921 | nvgpu_list_add(&prof_obj->prof_obj_entry, &g->profiler_objects); | ||
922 | clean_up: | ||
923 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
924 | return err; | ||
925 | } | ||
926 | |||
927 | static int nvgpu_ioctl_free_profiler_object( | ||
928 | struct dbg_session_gk20a_linux *dbg_s_linux, | ||
929 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args) | ||
930 | { | ||
931 | int err = 0; | ||
932 | struct dbg_session_gk20a *dbg_s = &dbg_s_linux->dbg_s; | ||
933 | struct gk20a *g = get_gk20a(dbg_s_linux->dev); | ||
934 | struct dbg_profiler_object_data *prof_obj, *tmp_obj; | ||
935 | bool obj_found = false; | ||
936 | |||
937 | gk20a_dbg_fn("%s session_id = %d profiler_handle = %x", | ||
938 | g->name, dbg_s->id, args->profiler_handle); | ||
939 | |||
940 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
941 | |||
942 | /* Remove profiler object from the list, if a match is found */ | ||
943 | nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, | ||
944 | dbg_profiler_object_data, prof_obj_entry) { | ||
945 | if (prof_obj->prof_handle == args->profiler_handle) { | ||
946 | if (prof_obj->session_id != dbg_s->id) { | ||
947 | nvgpu_err(g, | ||
948 | "invalid handle %x", | ||
949 | args->profiler_handle); | ||
950 | err = -EINVAL; | ||
951 | break; | ||
952 | } | ||
953 | if (prof_obj->has_reservation) | ||
954 | g->ops.dbg_session_ops. | ||
955 | release_profiler_reservation(dbg_s, prof_obj); | ||
956 | nvgpu_list_del(&prof_obj->prof_obj_entry); | ||
957 | nvgpu_kfree(g, prof_obj); | ||
958 | obj_found = true; | ||
959 | break; | ||
960 | } | ||
961 | } | ||
962 | if (!obj_found) { | ||
963 | nvgpu_err(g, "profiler %x not found", | ||
964 | args->profiler_handle); | ||
965 | err = -EINVAL; | ||
966 | } | ||
967 | |||
968 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
969 | return err; | ||
970 | } | ||
971 | |||
972 | static struct dbg_profiler_object_data *find_matching_prof_obj( | ||
973 | struct dbg_session_gk20a *dbg_s, | ||
974 | u32 profiler_handle) | ||
975 | { | ||
976 | struct gk20a *g = dbg_s->g; | ||
977 | struct dbg_profiler_object_data *prof_obj; | ||
978 | |||
979 | nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects, | ||
980 | dbg_profiler_object_data, prof_obj_entry) { | ||
981 | if (prof_obj->prof_handle == profiler_handle) { | ||
982 | if (prof_obj->session_id != dbg_s->id) { | ||
983 | nvgpu_err(g, | ||
984 | "invalid handle %x", | ||
985 | profiler_handle); | ||
986 | return NULL; | ||
987 | } | ||
988 | return prof_obj; | ||
989 | } | ||
990 | } | ||
991 | return NULL; | ||
992 | } | ||
993 | |||
994 | /* used in scenarios where the debugger session can take just the inter-session | ||
995 | * lock for performance, but the profiler session must take the per-gpu lock | ||
996 | * since it might not have an associated channel. */ | ||
997 | static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s) | ||
998 | { | ||
999 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1000 | |||
1001 | if (dbg_s->is_profiler || !ch) | ||
1002 | nvgpu_mutex_acquire(&dbg_s->g->dbg_sessions_lock); | ||
1003 | else | ||
1004 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | ||
1005 | } | ||
1006 | |||
1007 | static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s) | ||
1008 | { | ||
1009 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1010 | |||
1011 | if (dbg_s->is_profiler || !ch) | ||
1012 | nvgpu_mutex_release(&dbg_s->g->dbg_sessions_lock); | ||
1013 | else | ||
1014 | nvgpu_mutex_release(&ch->dbg_s_lock); | ||
1015 | } | ||
1016 | |||
1017 | static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) | ||
1018 | { | ||
1019 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
1020 | |||
1021 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
1022 | |||
1023 | dbg_s->dbg_events.events_enabled = true; | ||
1024 | dbg_s->dbg_events.num_pending_events = 0; | ||
1025 | |||
1026 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
1027 | } | ||
1028 | |||
1029 | static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) | ||
1030 | { | ||
1031 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
1032 | |||
1033 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
1034 | |||
1035 | dbg_s->dbg_events.events_enabled = false; | ||
1036 | dbg_s->dbg_events.num_pending_events = 0; | ||
1037 | |||
1038 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
1039 | } | ||
1040 | |||
1041 | static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) | ||
1042 | { | ||
1043 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
1044 | |||
1045 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
1046 | |||
1047 | if (dbg_s->dbg_events.events_enabled && | ||
1048 | dbg_s->dbg_events.num_pending_events > 0) | ||
1049 | dbg_s->dbg_events.num_pending_events--; | ||
1050 | |||
1051 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
1052 | } | ||
1053 | |||
1054 | |||
1055 | static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, | ||
1056 | struct nvgpu_dbg_gpu_events_ctrl_args *args) | ||
1057 | { | ||
1058 | int ret = 0; | ||
1059 | struct channel_gk20a *ch; | ||
1060 | |||
1061 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); | ||
1062 | |||
1063 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1064 | if (!ch) { | ||
1065 | nvgpu_err(dbg_s->g, | ||
1066 | "no channel bound to dbg session"); | ||
1067 | return -EINVAL; | ||
1068 | } | ||
1069 | |||
1070 | switch (args->cmd) { | ||
1071 | case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE: | ||
1072 | gk20a_dbg_gpu_events_enable(dbg_s); | ||
1073 | break; | ||
1074 | |||
1075 | case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE: | ||
1076 | gk20a_dbg_gpu_events_disable(dbg_s); | ||
1077 | break; | ||
1078 | |||
1079 | case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR: | ||
1080 | gk20a_dbg_gpu_events_clear(dbg_s); | ||
1081 | break; | ||
1082 | |||
1083 | default: | ||
1084 | nvgpu_err(dbg_s->g, | ||
1085 | "unrecognized dbg gpu events ctrl cmd: 0x%x", | ||
1086 | args->cmd); | ||
1087 | ret = -EINVAL; | ||
1088 | break; | ||
1089 | } | ||
1090 | |||
1091 | return ret; | ||
1092 | } | ||
1093 | |||
1094 | static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, | ||
1095 | struct nvgpu_dbg_gpu_perfbuf_map_args *args) | ||
1096 | { | ||
1097 | struct gk20a *g = dbg_s->g; | ||
1098 | struct mm_gk20a *mm = &g->mm; | ||
1099 | int err; | ||
1100 | u32 virt_size; | ||
1101 | u32 big_page_size = g->ops.mm.get_default_big_page_size(); | ||
1102 | |||
1103 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1104 | |||
1105 | if (g->perfbuf.owner) { | ||
1106 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1107 | return -EBUSY; | ||
1108 | } | ||
1109 | |||
1110 | mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size, | ||
1111 | big_page_size << 10, | ||
1112 | NV_MM_DEFAULT_KERNEL_SIZE, | ||
1113 | NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, | ||
1114 | false, false, "perfbuf"); | ||
1115 | if (!mm->perfbuf.vm) { | ||
1116 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1117 | return -ENOMEM; | ||
1118 | } | ||
1119 | |||
1120 | err = nvgpu_vm_map_buffer(mm->perfbuf.vm, | ||
1121 | args->dmabuf_fd, | ||
1122 | &args->offset, | ||
1123 | 0, | ||
1124 | 0, | ||
1125 | 0, | ||
1126 | 0, | ||
1127 | args->mapping_size, | ||
1128 | NULL); | ||
1129 | if (err) | ||
1130 | goto err_remove_vm; | ||
1131 | |||
1132 | /* perf output buffer may not cross a 4GB boundary */ | ||
1133 | virt_size = u64_lo32(args->mapping_size); | ||
1134 | if (u64_hi32(args->offset) != u64_hi32(args->offset + virt_size)) { | ||
1135 | err = -EINVAL; | ||
1136 | goto err_unmap; | ||
1137 | } | ||
1138 | |||
1139 | err = g->ops.dbg_session_ops.perfbuffer_enable(g, | ||
1140 | args->offset, virt_size); | ||
1141 | if (err) | ||
1142 | goto err_unmap; | ||
1143 | |||
1144 | g->perfbuf.owner = dbg_s; | ||
1145 | g->perfbuf.offset = args->offset; | ||
1146 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1147 | |||
1148 | return 0; | ||
1149 | |||
1150 | err_unmap: | ||
1151 | nvgpu_vm_unmap(mm->perfbuf.vm, args->offset, NULL); | ||
1152 | err_remove_vm: | ||
1153 | nvgpu_vm_put(mm->perfbuf.vm); | ||
1154 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1155 | return err; | ||
1156 | } | ||
1157 | |||
1158 | static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s, | ||
1159 | struct nvgpu_dbg_gpu_perfbuf_unmap_args *args) | ||
1160 | { | ||
1161 | struct gk20a *g = dbg_s->g; | ||
1162 | int err; | ||
1163 | |||
1164 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1165 | if ((g->perfbuf.owner != dbg_s) || | ||
1166 | (g->perfbuf.offset != args->offset)) { | ||
1167 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1168 | return -EINVAL; | ||
1169 | } | ||
1170 | |||
1171 | err = gk20a_perfbuf_release_locked(g, args->offset); | ||
1172 | |||
1173 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1174 | |||
1175 | return err; | ||
1176 | } | ||
1177 | |||
1178 | static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s, | ||
1179 | struct nvgpu_dbg_gpu_pc_sampling_args *args) | ||
1180 | { | ||
1181 | struct channel_gk20a *ch; | ||
1182 | struct gk20a *g = dbg_s->g; | ||
1183 | |||
1184 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1185 | if (!ch) | ||
1186 | return -EINVAL; | ||
1187 | |||
1188 | gk20a_dbg_fn(""); | ||
1189 | |||
1190 | return g->ops.gr.update_pc_sampling ? | ||
1191 | g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL; | ||
1192 | } | ||
1193 | |||
1194 | static int nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state( | ||
1195 | struct dbg_session_gk20a *dbg_s, | ||
1196 | struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *args) | ||
1197 | { | ||
1198 | struct gk20a *g = dbg_s->g; | ||
1199 | struct gr_gk20a *gr = &g->gr; | ||
1200 | u32 sm_id; | ||
1201 | struct channel_gk20a *ch; | ||
1202 | int err = 0; | ||
1203 | |||
1204 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1205 | if (!ch) | ||
1206 | return -EINVAL; | ||
1207 | |||
1208 | sm_id = args->sm_id; | ||
1209 | |||
1210 | if (sm_id >= gr->no_of_sm) | ||
1211 | return -EINVAL; | ||
1212 | |||
1213 | err = gk20a_busy(g); | ||
1214 | if (err) | ||
1215 | return err; | ||
1216 | |||
1217 | err = gr_gk20a_elpg_protected_call(g, | ||
1218 | g->ops.gr.clear_sm_error_state(g, ch, sm_id)); | ||
1219 | |||
1220 | gk20a_idle(g); | ||
1221 | |||
1222 | return err; | ||
1223 | } | ||
1224 | |||
1225 | static int | ||
1226 | nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s, | ||
1227 | struct nvgpu_dbg_gpu_suspend_resume_contexts_args *args) | ||
1228 | { | ||
1229 | struct gk20a *g = dbg_s->g; | ||
1230 | int err = 0; | ||
1231 | int ctx_resident_ch_fd = -1; | ||
1232 | |||
1233 | err = gk20a_busy(g); | ||
1234 | if (err) | ||
1235 | return err; | ||
1236 | |||
1237 | switch (args->action) { | ||
1238 | case NVGPU_DBG_GPU_SUSPEND_ALL_CONTEXTS: | ||
1239 | err = g->ops.gr.suspend_contexts(g, dbg_s, | ||
1240 | &ctx_resident_ch_fd); | ||
1241 | break; | ||
1242 | |||
1243 | case NVGPU_DBG_GPU_RESUME_ALL_CONTEXTS: | ||
1244 | err = g->ops.gr.resume_contexts(g, dbg_s, | ||
1245 | &ctx_resident_ch_fd); | ||
1246 | break; | ||
1247 | } | ||
1248 | |||
1249 | if (ctx_resident_ch_fd < 0) { | ||
1250 | args->is_resident_context = 0; | ||
1251 | } else { | ||
1252 | args->is_resident_context = 1; | ||
1253 | args->resident_context_fd = ctx_resident_ch_fd; | ||
1254 | } | ||
1255 | |||
1256 | gk20a_idle(g); | ||
1257 | |||
1258 | return err; | ||
1259 | } | ||
1260 | |||
1261 | static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s, | ||
1262 | struct nvgpu_dbg_gpu_access_fb_memory_args *args) | ||
1263 | { | ||
1264 | struct gk20a *g = dbg_s->g; | ||
1265 | struct dma_buf *dmabuf; | ||
1266 | void __user *user_buffer = (void __user *)(uintptr_t)args->buffer; | ||
1267 | void *buffer; | ||
1268 | u64 size, access_size, offset; | ||
1269 | u64 access_limit_size = SZ_4K; | ||
1270 | int err = 0; | ||
1271 | |||
1272 | if ((args->offset & 3) || (!args->size) || (args->size & 3)) | ||
1273 | return -EINVAL; | ||
1274 | |||
1275 | dmabuf = dma_buf_get(args->dmabuf_fd); | ||
1276 | if (IS_ERR(dmabuf)) | ||
1277 | return -EINVAL; | ||
1278 | |||
1279 | if ((args->offset > dmabuf->size) || | ||
1280 | (args->size > dmabuf->size) || | ||
1281 | (args->offset + args->size > dmabuf->size)) { | ||
1282 | err = -EINVAL; | ||
1283 | goto fail_dmabuf_put; | ||
1284 | } | ||
1285 | |||
1286 | buffer = nvgpu_big_zalloc(g, access_limit_size); | ||
1287 | if (!buffer) { | ||
1288 | err = -ENOMEM; | ||
1289 | goto fail_dmabuf_put; | ||
1290 | } | ||
1291 | |||
1292 | size = args->size; | ||
1293 | offset = 0; | ||
1294 | |||
1295 | err = gk20a_busy(g); | ||
1296 | if (err) | ||
1297 | goto fail_free_buffer; | ||
1298 | |||
1299 | while (size) { | ||
1300 | /* Max access size of access_limit_size in one loop */ | ||
1301 | access_size = min(access_limit_size, size); | ||
1302 | |||
1303 | if (args->cmd == | ||
1304 | NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE) { | ||
1305 | err = copy_from_user(buffer, user_buffer + offset, | ||
1306 | access_size); | ||
1307 | if (err) | ||
1308 | goto fail_idle; | ||
1309 | } | ||
1310 | |||
1311 | err = nvgpu_vidmem_buf_access_memory(g, dmabuf, buffer, | ||
1312 | args->offset + offset, access_size, | ||
1313 | args->cmd); | ||
1314 | if (err) | ||
1315 | goto fail_idle; | ||
1316 | |||
1317 | if (args->cmd == | ||
1318 | NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ) { | ||
1319 | err = copy_to_user(user_buffer + offset, | ||
1320 | buffer, access_size); | ||
1321 | if (err) | ||
1322 | goto fail_idle; | ||
1323 | } | ||
1324 | |||
1325 | size -= access_size; | ||
1326 | offset += access_size; | ||
1327 | } | ||
1328 | |||
1329 | fail_idle: | ||
1330 | gk20a_idle(g); | ||
1331 | fail_free_buffer: | ||
1332 | nvgpu_big_free(g, buffer); | ||
1333 | fail_dmabuf_put: | ||
1334 | dma_buf_put(dmabuf); | ||
1335 | |||
1336 | return err; | ||
1337 | } | ||
1338 | |||
1339 | static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s, | ||
1340 | struct nvgpu_dbg_gpu_profiler_reserve_args *args) | ||
1341 | { | ||
1342 | if (args->acquire) | ||
1343 | return nvgpu_profiler_reserve_acquire(dbg_s, args->profiler_handle); | ||
1344 | |||
1345 | return nvgpu_profiler_reserve_release(dbg_s, args->profiler_handle); | ||
1346 | } | ||
1347 | |||
1348 | static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s, | ||
1349 | struct nvgpu_dbg_gpu_timeout_args *args) | ||
1350 | { | ||
1351 | int status; | ||
1352 | struct gk20a *g = dbg_s->g; | ||
1353 | |||
1354 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1355 | status = g->timeouts_enabled; | ||
1356 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1357 | |||
1358 | if (status) | ||
1359 | args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE; | ||
1360 | else | ||
1361 | args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE; | ||
1362 | } | ||
1363 | |||
1364 | /* In order to perform a context relative op the context has | ||
1365 | * to be created already... which would imply that the | ||
1366 | * context switch mechanism has already been put in place. | ||
1367 | * So by the time we perform such an opertation it should always | ||
1368 | * be possible to query for the appropriate context offsets, etc. | ||
1369 | * | ||
1370 | * But note: while the dbg_gpu bind requires the a channel fd, | ||
1371 | * it doesn't require an allocated gr/compute obj at that point... | ||
1372 | */ | ||
1373 | static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s, | ||
1374 | struct gr_gk20a *gr) | ||
1375 | { | ||
1376 | int err; | ||
1377 | |||
1378 | nvgpu_mutex_acquire(&gr->ctx_mutex); | ||
1379 | err = !gr->ctx_vars.golden_image_initialized; | ||
1380 | nvgpu_mutex_release(&gr->ctx_mutex); | ||
1381 | if (err) | ||
1382 | return false; | ||
1383 | return true; | ||
1384 | |||
1385 | } | ||
1386 | |||
1387 | static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset) | ||
1388 | { | ||
1389 | struct mm_gk20a *mm = &g->mm; | ||
1390 | struct vm_gk20a *vm = mm->perfbuf.vm; | ||
1391 | int err; | ||
1392 | |||
1393 | err = g->ops.dbg_session_ops.perfbuffer_disable(g); | ||
1394 | |||
1395 | nvgpu_vm_unmap(vm, offset, NULL); | ||
1396 | nvgpu_free_inst_block(g, &mm->perfbuf.inst_block); | ||
1397 | nvgpu_vm_put(vm); | ||
1398 | |||
1399 | g->perfbuf.owner = NULL; | ||
1400 | g->perfbuf.offset = 0; | ||
1401 | return err; | ||
1402 | } | ||
1403 | |||
1404 | static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, | ||
1405 | u32 profiler_handle) | ||
1406 | { | ||
1407 | struct gk20a *g = dbg_s->g; | ||
1408 | struct dbg_profiler_object_data *prof_obj; | ||
1409 | int err = 0; | ||
1410 | |||
1411 | gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); | ||
1412 | |||
1413 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1414 | |||
1415 | /* Find matching object. */ | ||
1416 | prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); | ||
1417 | |||
1418 | if (!prof_obj) { | ||
1419 | nvgpu_err(g, "object not found"); | ||
1420 | err = -EINVAL; | ||
1421 | goto exit; | ||
1422 | } | ||
1423 | |||
1424 | if (prof_obj->has_reservation) | ||
1425 | g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj); | ||
1426 | else { | ||
1427 | nvgpu_err(g, "No reservation found"); | ||
1428 | err = -EINVAL; | ||
1429 | goto exit; | ||
1430 | } | ||
1431 | exit: | ||
1432 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1433 | return err; | ||
1434 | } | ||
1435 | |||
1436 | static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | ||
1437 | u32 profiler_handle) | ||
1438 | { | ||
1439 | struct gk20a *g = dbg_s->g; | ||
1440 | struct dbg_profiler_object_data *prof_obj, *my_prof_obj; | ||
1441 | int err = 0; | ||
1442 | |||
1443 | gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); | ||
1444 | |||
1445 | if (g->profiler_reservation_count < 0) { | ||
1446 | nvgpu_err(g, "Negative reservation count!"); | ||
1447 | return -EINVAL; | ||
1448 | } | ||
1449 | |||
1450 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1451 | |||
1452 | /* Find matching object. */ | ||
1453 | my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); | ||
1454 | |||
1455 | if (!my_prof_obj) { | ||
1456 | nvgpu_err(g, "object not found"); | ||
1457 | err = -EINVAL; | ||
1458 | goto exit; | ||
1459 | } | ||
1460 | |||
1461 | /* If we already have the reservation, we're done */ | ||
1462 | if (my_prof_obj->has_reservation) { | ||
1463 | err = 0; | ||
1464 | goto exit; | ||
1465 | } | ||
1466 | |||
1467 | if (my_prof_obj->ch == NULL) { | ||
1468 | /* Global reservations are only allowed if there are no other | ||
1469 | * global or per-context reservations currently held | ||
1470 | */ | ||
1471 | if (!g->ops.dbg_session_ops.check_and_set_global_reservation( | ||
1472 | dbg_s, my_prof_obj)) { | ||
1473 | nvgpu_err(g, | ||
1474 | "global reserve: have existing reservation"); | ||
1475 | err = -EBUSY; | ||
1476 | } | ||
1477 | } else if (g->global_profiler_reservation_held) { | ||
1478 | /* If there's a global reservation, | ||
1479 | * we can't take a per-context one. | ||
1480 | */ | ||
1481 | nvgpu_err(g, | ||
1482 | "per-ctxt reserve: global reservation in effect"); | ||
1483 | err = -EBUSY; | ||
1484 | } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) { | ||
1485 | /* TSG: check that another channel in the TSG | ||
1486 | * doesn't already have the reservation | ||
1487 | */ | ||
1488 | int my_tsgid = my_prof_obj->ch->tsgid; | ||
1489 | |||
1490 | nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects, | ||
1491 | dbg_profiler_object_data, prof_obj_entry) { | ||
1492 | if (prof_obj->has_reservation && | ||
1493 | (prof_obj->ch->tsgid == my_tsgid)) { | ||
1494 | nvgpu_err(g, | ||
1495 | "per-ctxt reserve (tsg): already reserved"); | ||
1496 | err = -EBUSY; | ||
1497 | goto exit; | ||
1498 | } | ||
1499 | } | ||
1500 | |||
1501 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( | ||
1502 | dbg_s, my_prof_obj)) { | ||
1503 | /* Another guest OS has the global reservation */ | ||
1504 | nvgpu_err(g, | ||
1505 | "per-ctxt reserve: global reservation in effect"); | ||
1506 | err = -EBUSY; | ||
1507 | } | ||
1508 | } else { | ||
1509 | /* channel: check that some other profiler object doesn't | ||
1510 | * already have the reservation. | ||
1511 | */ | ||
1512 | struct channel_gk20a *my_ch = my_prof_obj->ch; | ||
1513 | |||
1514 | nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects, | ||
1515 | dbg_profiler_object_data, prof_obj_entry) { | ||
1516 | if (prof_obj->has_reservation && | ||
1517 | (prof_obj->ch == my_ch)) { | ||
1518 | nvgpu_err(g, | ||
1519 | "per-ctxt reserve (ch): already reserved"); | ||
1520 | err = -EBUSY; | ||
1521 | goto exit; | ||
1522 | } | ||
1523 | } | ||
1524 | |||
1525 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( | ||
1526 | dbg_s, my_prof_obj)) { | ||
1527 | /* Another guest OS has the global reservation */ | ||
1528 | nvgpu_err(g, | ||
1529 | "per-ctxt reserve: global reservation in effect"); | ||
1530 | err = -EBUSY; | ||
1531 | } | ||
1532 | } | ||
1533 | exit: | ||
1534 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1535 | return err; | ||
1536 | } | ||
1537 | |||
1538 | static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
1539 | struct nvgpu_dbg_gpu_unbind_channel_args *args) | ||
1540 | { | ||
1541 | struct dbg_session_channel_data *ch_data; | ||
1542 | struct gk20a *g = dbg_s->g; | ||
1543 | bool channel_found = false; | ||
1544 | struct channel_gk20a *ch; | ||
1545 | int err; | ||
1546 | |||
1547 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", | ||
1548 | g->name, args->channel_fd); | ||
1549 | |||
1550 | ch = gk20a_get_channel_from_file(args->channel_fd); | ||
1551 | if (!ch) { | ||
1552 | gk20a_dbg_fn("no channel found for fd"); | ||
1553 | return -EINVAL; | ||
1554 | } | ||
1555 | |||
1556 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
1557 | nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, | ||
1558 | dbg_session_channel_data, ch_entry) { | ||
1559 | if (ch->chid == ch_data->chid) { | ||
1560 | channel_found = true; | ||
1561 | break; | ||
1562 | } | ||
1563 | } | ||
1564 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
1565 | |||
1566 | if (!channel_found) { | ||
1567 | gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); | ||
1568 | err = -EINVAL; | ||
1569 | goto out; | ||
1570 | } | ||
1571 | |||
1572 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1573 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
1574 | err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data); | ||
1575 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
1576 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1577 | |||
1578 | out: | ||
1579 | gk20a_channel_put(ch); | ||
1580 | return err; | ||
1581 | } | ||
1582 | |||
1583 | int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp) | ||
1584 | { | ||
1585 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
1586 | return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */); | ||
1587 | } | ||
1588 | |||
1589 | long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, | ||
1590 | unsigned long arg) | ||
1591 | { | ||
1592 | struct dbg_session_gk20a_linux *dbg_s_linux = filp->private_data; | ||
1593 | struct dbg_session_gk20a *dbg_s = &dbg_s_linux->dbg_s; | ||
1594 | struct gk20a *g = dbg_s->g; | ||
1595 | u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE]; | ||
1596 | int err = 0; | ||
1597 | |||
1598 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
1599 | |||
1600 | if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) || | ||
1601 | (_IOC_NR(cmd) == 0) || | ||
1602 | (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST) || | ||
1603 | (_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE)) | ||
1604 | return -EINVAL; | ||
1605 | |||
1606 | memset(buf, 0, sizeof(buf)); | ||
1607 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | ||
1608 | if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) | ||
1609 | return -EFAULT; | ||
1610 | } | ||
1611 | |||
1612 | if (!g->gr.sw_ready) { | ||
1613 | err = gk20a_busy(g); | ||
1614 | if (err) | ||
1615 | return err; | ||
1616 | |||
1617 | gk20a_idle(g); | ||
1618 | } | ||
1619 | |||
1620 | /* protect from threaded user space calls */ | ||
1621 | nvgpu_mutex_acquire(&dbg_s->ioctl_lock); | ||
1622 | |||
1623 | switch (cmd) { | ||
1624 | case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL: | ||
1625 | err = dbg_bind_channel_gk20a(dbg_s, | ||
1626 | (struct nvgpu_dbg_gpu_bind_channel_args *)buf); | ||
1627 | break; | ||
1628 | |||
1629 | case NVGPU_DBG_GPU_IOCTL_REG_OPS: | ||
1630 | err = nvgpu_ioctl_channel_reg_ops(dbg_s, | ||
1631 | (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf); | ||
1632 | break; | ||
1633 | |||
1634 | case NVGPU_DBG_GPU_IOCTL_POWERGATE: | ||
1635 | err = nvgpu_ioctl_powergate_gk20a(dbg_s, | ||
1636 | (struct nvgpu_dbg_gpu_powergate_args *)buf); | ||
1637 | break; | ||
1638 | |||
1639 | case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL: | ||
1640 | err = gk20a_dbg_gpu_events_ctrl(dbg_s, | ||
1641 | (struct nvgpu_dbg_gpu_events_ctrl_args *)buf); | ||
1642 | break; | ||
1643 | |||
1644 | case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE: | ||
1645 | err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s, | ||
1646 | (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf); | ||
1647 | break; | ||
1648 | |||
1649 | case NVGPU_DBG_GPU_IOCTL_HWPM_CTXSW_MODE: | ||
1650 | err = nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(dbg_s, | ||
1651 | (struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *)buf); | ||
1652 | break; | ||
1653 | |||
1654 | case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_ALL_SMS: | ||
1655 | err = nvgpu_dbg_gpu_ioctl_suspend_resume_sm(dbg_s, | ||
1656 | (struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *)buf); | ||
1657 | break; | ||
1658 | |||
1659 | case NVGPU_DBG_GPU_IOCTL_PERFBUF_MAP: | ||
1660 | err = gk20a_perfbuf_map(dbg_s, | ||
1661 | (struct nvgpu_dbg_gpu_perfbuf_map_args *)buf); | ||
1662 | break; | ||
1663 | |||
1664 | case NVGPU_DBG_GPU_IOCTL_PERFBUF_UNMAP: | ||
1665 | err = gk20a_perfbuf_unmap(dbg_s, | ||
1666 | (struct nvgpu_dbg_gpu_perfbuf_unmap_args *)buf); | ||
1667 | break; | ||
1668 | |||
1669 | case NVGPU_DBG_GPU_IOCTL_PC_SAMPLING: | ||
1670 | err = gk20a_dbg_pc_sampling(dbg_s, | ||
1671 | (struct nvgpu_dbg_gpu_pc_sampling_args *)buf); | ||
1672 | break; | ||
1673 | |||
1674 | case NVGPU_DBG_GPU_IOCTL_SET_NEXT_STOP_TRIGGER_TYPE: | ||
1675 | err = nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(dbg_s, | ||
1676 | (struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *)buf); | ||
1677 | break; | ||
1678 | |||
1679 | case NVGPU_DBG_GPU_IOCTL_TIMEOUT: | ||
1680 | err = nvgpu_dbg_gpu_ioctl_timeout(dbg_s, | ||
1681 | (struct nvgpu_dbg_gpu_timeout_args *)buf); | ||
1682 | break; | ||
1683 | |||
1684 | case NVGPU_DBG_GPU_IOCTL_GET_TIMEOUT: | ||
1685 | nvgpu_dbg_gpu_ioctl_get_timeout(dbg_s, | ||
1686 | (struct nvgpu_dbg_gpu_timeout_args *)buf); | ||
1687 | break; | ||
1688 | |||
1689 | case NVGPU_DBG_GPU_IOCTL_READ_SINGLE_SM_ERROR_STATE: | ||
1690 | err = nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(dbg_s, | ||
1691 | (struct nvgpu_dbg_gpu_read_single_sm_error_state_args *)buf); | ||
1692 | break; | ||
1693 | |||
1694 | case NVGPU_DBG_GPU_IOCTL_CLEAR_SINGLE_SM_ERROR_STATE: | ||
1695 | err = nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(dbg_s, | ||
1696 | (struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *)buf); | ||
1697 | break; | ||
1698 | |||
1699 | case NVGPU_DBG_GPU_IOCTL_WRITE_SINGLE_SM_ERROR_STATE: | ||
1700 | err = nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(dbg_s, | ||
1701 | (struct nvgpu_dbg_gpu_write_single_sm_error_state_args *)buf); | ||
1702 | break; | ||
1703 | |||
1704 | case NVGPU_DBG_GPU_IOCTL_UNBIND_CHANNEL: | ||
1705 | err = dbg_unbind_channel_gk20a(dbg_s, | ||
1706 | (struct nvgpu_dbg_gpu_unbind_channel_args *)buf); | ||
1707 | break; | ||
1708 | |||
1709 | case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_CONTEXTS: | ||
1710 | err = nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(dbg_s, | ||
1711 | (struct nvgpu_dbg_gpu_suspend_resume_contexts_args *)buf); | ||
1712 | break; | ||
1713 | |||
1714 | case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY: | ||
1715 | err = nvgpu_dbg_gpu_ioctl_access_fb_memory(dbg_s, | ||
1716 | (struct nvgpu_dbg_gpu_access_fb_memory_args *)buf); | ||
1717 | break; | ||
1718 | |||
1719 | case NVGPU_DBG_GPU_IOCTL_PROFILER_ALLOCATE: | ||
1720 | err = nvgpu_ioctl_allocate_profiler_object(dbg_s_linux, | ||
1721 | (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf); | ||
1722 | break; | ||
1723 | |||
1724 | case NVGPU_DBG_GPU_IOCTL_PROFILER_FREE: | ||
1725 | err = nvgpu_ioctl_free_profiler_object(dbg_s_linux, | ||
1726 | (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf); | ||
1727 | break; | ||
1728 | |||
1729 | case NVGPU_DBG_GPU_IOCTL_PROFILER_RESERVE: | ||
1730 | err = nvgpu_ioctl_profiler_reserve(dbg_s, | ||
1731 | (struct nvgpu_dbg_gpu_profiler_reserve_args *)buf); | ||
1732 | break; | ||
1733 | |||
1734 | default: | ||
1735 | nvgpu_err(g, | ||
1736 | "unrecognized dbg gpu ioctl cmd: 0x%x", | ||
1737 | cmd); | ||
1738 | err = -ENOTTY; | ||
1739 | break; | ||
1740 | } | ||
1741 | |||
1742 | nvgpu_mutex_release(&dbg_s->ioctl_lock); | ||
1743 | |||
1744 | gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); | ||
1745 | |||
1746 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | ||
1747 | err = copy_to_user((void __user *)arg, | ||
1748 | buf, _IOC_SIZE(cmd)); | ||
1749 | |||
1750 | return err; | ||
1751 | } | ||