diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 9 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | 1682 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h | 29 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/regops_gk20a.c | 3 |
4 files changed, 14 insertions, 1709 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index 713c4215..e3fc61c0 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c | |||
@@ -51,6 +51,13 @@ | |||
51 | #include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> | 51 | #include <nvgpu/hw/gk20a/hw_pbdma_gk20a.h> |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Note | ||
55 | * This is added for all the copy_from_user methods in this file which needs to | ||
56 | * be moved lated to reduce depenedency on Linux | ||
57 | */ | ||
58 | #include <linux/uaccess.h> | ||
59 | |||
60 | /* | ||
54 | * This is required for nvgpu_vm_find_buffer() which is used in the tracing | 61 | * This is required for nvgpu_vm_find_buffer() which is used in the tracing |
55 | * code. Once we can get and access userspace buffers without requiring | 62 | * code. Once we can get and access userspace buffers without requiring |
56 | * direct dma_buf usage this can be removed. | 63 | * direct dma_buf usage this can be removed. |
@@ -623,7 +630,7 @@ unbind: | |||
623 | list_for_each_entry_safe(ch_data, tmp, | 630 | list_for_each_entry_safe(ch_data, tmp, |
624 | &dbg_s->ch_list, ch_entry) { | 631 | &dbg_s->ch_list, ch_entry) { |
625 | if (ch_data->chid == ch->chid) | 632 | if (ch_data->chid == ch->chid) |
626 | dbg_unbind_single_channel_gk20a(dbg_s, ch_data); | 633 | ch_data->unbind_single_channel(dbg_s, ch_data); |
627 | } | 634 | } |
628 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | 635 | nvgpu_mutex_release(&dbg_s->ch_list_lock); |
629 | } | 636 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c index 135cb1e9..8c39ecb7 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c | |||
@@ -22,40 +22,22 @@ | |||
22 | * DEALINGS IN THE SOFTWARE. | 22 | * DEALINGS IN THE SOFTWARE. |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/fs.h> | ||
26 | #include <linux/file.h> | ||
27 | #include <linux/cdev.h> | ||
28 | #include <linux/uaccess.h> | ||
29 | #include <linux/dma-buf.h> | ||
30 | #include <uapi/linux/nvgpu.h> | ||
31 | |||
32 | #include <nvgpu/kmem.h> | 25 | #include <nvgpu/kmem.h> |
33 | #include <nvgpu/log.h> | 26 | #include <nvgpu/log.h> |
34 | #include <nvgpu/vm.h> | 27 | #include <nvgpu/vm.h> |
35 | #include <nvgpu/atomic.h> | 28 | #include <nvgpu/atomic.h> |
36 | 29 | ||
37 | #include <nvgpu/linux/vidmem.h> | ||
38 | |||
39 | #include "gk20a.h" | 30 | #include "gk20a.h" |
40 | #include "gk20a/platform_gk20a.h" | 31 | #include "gk20a/platform_gk20a.h" |
41 | #include "gr_gk20a.h" | 32 | #include "gr_gk20a.h" |
42 | #include "dbg_gpu_gk20a.h" | 33 | #include "dbg_gpu_gk20a.h" |
43 | #include "regops_gk20a.h" | 34 | #include "regops_gk20a.h" |
44 | #include "common/linux/os_linux.h" | ||
45 | 35 | ||
46 | #include <nvgpu/hw/gk20a/hw_therm_gk20a.h> | 36 | #include <nvgpu/hw/gk20a/hw_therm_gk20a.h> |
47 | #include <nvgpu/hw/gk20a/hw_gr_gk20a.h> | 37 | #include <nvgpu/hw/gk20a/hw_gr_gk20a.h> |
48 | #include <nvgpu/hw/gk20a/hw_perf_gk20a.h> | 38 | #include <nvgpu/hw/gk20a/hw_perf_gk20a.h> |
49 | 39 | ||
50 | /* | 40 | /* |
51 | * Currently this code uses nvgpu_vm_map_buffer() since it takes dmabuf FDs from | ||
52 | * the dbg ioctls. That has to change; this needs to hide the usage of dmabufs | ||
53 | * in Linux specific code. All core driver usage of mapping must be done through | ||
54 | * nvgpu_gmmu_map(). | ||
55 | */ | ||
56 | #include "common/linux/vm_priv.h" | ||
57 | |||
58 | /* | ||
59 | * API to get first channel from the list of all channels | 41 | * API to get first channel from the list of all channels |
60 | * bound to the debug session | 42 | * bound to the debug session |
61 | */ | 43 | */ |
@@ -82,240 +64,6 @@ nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s) | |||
82 | return ch; | 64 | return ch; |
83 | } | 65 | } |
84 | 66 | ||
85 | /* silly allocator - just increment id */ | ||
86 | static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0); | ||
87 | static int generate_unique_id(void) | ||
88 | { | ||
89 | return nvgpu_atomic_add_return(1, &unique_id); | ||
90 | } | ||
91 | |||
92 | static int alloc_session(struct gk20a *g, struct dbg_session_gk20a **_dbg_s) | ||
93 | { | ||
94 | struct dbg_session_gk20a *dbg_s; | ||
95 | *_dbg_s = NULL; | ||
96 | |||
97 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
98 | |||
99 | dbg_s = nvgpu_kzalloc(g, sizeof(*dbg_s)); | ||
100 | if (!dbg_s) | ||
101 | return -ENOMEM; | ||
102 | |||
103 | dbg_s->id = generate_unique_id(); | ||
104 | *_dbg_s = dbg_s; | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static int alloc_profiler(struct gk20a *g, | ||
109 | struct dbg_profiler_object_data **_prof) | ||
110 | { | ||
111 | struct dbg_profiler_object_data *prof; | ||
112 | *_prof = NULL; | ||
113 | |||
114 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
115 | |||
116 | prof = nvgpu_kzalloc(g, sizeof(*prof)); | ||
117 | if (!prof) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | prof->prof_handle = generate_unique_id(); | ||
121 | *_prof = prof; | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static int gk20a_dbg_gpu_do_dev_open(struct inode *inode, | ||
126 | struct file *filp, bool is_profiler) | ||
127 | { | ||
128 | struct nvgpu_os_linux *l; | ||
129 | struct dbg_session_gk20a *dbg_session; | ||
130 | struct gk20a *g; | ||
131 | |||
132 | struct device *dev; | ||
133 | |||
134 | int err; | ||
135 | |||
136 | if (!is_profiler) | ||
137 | l = container_of(inode->i_cdev, | ||
138 | struct nvgpu_os_linux, dbg.cdev); | ||
139 | else | ||
140 | l = container_of(inode->i_cdev, | ||
141 | struct nvgpu_os_linux, prof.cdev); | ||
142 | g = gk20a_get(&l->g); | ||
143 | if (!g) | ||
144 | return -ENODEV; | ||
145 | |||
146 | dev = dev_from_gk20a(g); | ||
147 | |||
148 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name); | ||
149 | |||
150 | err = alloc_session(g, &dbg_session); | ||
151 | if (err) | ||
152 | goto free_ref; | ||
153 | |||
154 | filp->private_data = dbg_session; | ||
155 | dbg_session->dev = dev; | ||
156 | dbg_session->g = g; | ||
157 | dbg_session->is_profiler = is_profiler; | ||
158 | dbg_session->is_pg_disabled = false; | ||
159 | dbg_session->is_timeout_disabled = false; | ||
160 | |||
161 | nvgpu_cond_init(&dbg_session->dbg_events.wait_queue); | ||
162 | nvgpu_init_list_node(&dbg_session->ch_list); | ||
163 | err = nvgpu_mutex_init(&dbg_session->ch_list_lock); | ||
164 | if (err) | ||
165 | goto err_free_session; | ||
166 | err = nvgpu_mutex_init(&dbg_session->ioctl_lock); | ||
167 | if (err) | ||
168 | goto err_destroy_lock; | ||
169 | dbg_session->dbg_events.events_enabled = false; | ||
170 | dbg_session->dbg_events.num_pending_events = 0; | ||
171 | |||
172 | return 0; | ||
173 | |||
174 | err_destroy_lock: | ||
175 | nvgpu_mutex_destroy(&dbg_session->ch_list_lock); | ||
176 | err_free_session: | ||
177 | nvgpu_kfree(g, dbg_session); | ||
178 | free_ref: | ||
179 | gk20a_put(g); | ||
180 | return err; | ||
181 | } | ||
182 | |||
183 | /* used in scenarios where the debugger session can take just the inter-session | ||
184 | * lock for performance, but the profiler session must take the per-gpu lock | ||
185 | * since it might not have an associated channel. */ | ||
186 | static void gk20a_dbg_session_nvgpu_mutex_acquire(struct dbg_session_gk20a *dbg_s) | ||
187 | { | ||
188 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
189 | |||
190 | if (dbg_s->is_profiler || !ch) | ||
191 | nvgpu_mutex_acquire(&dbg_s->g->dbg_sessions_lock); | ||
192 | else | ||
193 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | ||
194 | } | ||
195 | |||
196 | static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_s) | ||
197 | { | ||
198 | struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
199 | |||
200 | if (dbg_s->is_profiler || !ch) | ||
201 | nvgpu_mutex_release(&dbg_s->g->dbg_sessions_lock); | ||
202 | else | ||
203 | nvgpu_mutex_release(&ch->dbg_s_lock); | ||
204 | } | ||
205 | |||
206 | static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) | ||
207 | { | ||
208 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
209 | |||
210 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
211 | |||
212 | dbg_s->dbg_events.events_enabled = true; | ||
213 | dbg_s->dbg_events.num_pending_events = 0; | ||
214 | |||
215 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
216 | } | ||
217 | |||
218 | static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) | ||
219 | { | ||
220 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
221 | |||
222 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
223 | |||
224 | dbg_s->dbg_events.events_enabled = false; | ||
225 | dbg_s->dbg_events.num_pending_events = 0; | ||
226 | |||
227 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
228 | } | ||
229 | |||
230 | static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) | ||
231 | { | ||
232 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
233 | |||
234 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
235 | |||
236 | if (dbg_s->dbg_events.events_enabled && | ||
237 | dbg_s->dbg_events.num_pending_events > 0) | ||
238 | dbg_s->dbg_events.num_pending_events--; | ||
239 | |||
240 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
241 | } | ||
242 | |||
243 | static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, | ||
244 | struct nvgpu_dbg_gpu_events_ctrl_args *args) | ||
245 | { | ||
246 | int ret = 0; | ||
247 | struct channel_gk20a *ch; | ||
248 | |||
249 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); | ||
250 | |||
251 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
252 | if (!ch) { | ||
253 | nvgpu_err(dbg_s->g, | ||
254 | "no channel bound to dbg session"); | ||
255 | return -EINVAL; | ||
256 | } | ||
257 | |||
258 | switch (args->cmd) { | ||
259 | case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE: | ||
260 | gk20a_dbg_gpu_events_enable(dbg_s); | ||
261 | break; | ||
262 | |||
263 | case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE: | ||
264 | gk20a_dbg_gpu_events_disable(dbg_s); | ||
265 | break; | ||
266 | |||
267 | case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR: | ||
268 | gk20a_dbg_gpu_events_clear(dbg_s); | ||
269 | break; | ||
270 | |||
271 | default: | ||
272 | nvgpu_err(dbg_s->g, | ||
273 | "unrecognized dbg gpu events ctrl cmd: 0x%x", | ||
274 | args->cmd); | ||
275 | ret = -EINVAL; | ||
276 | break; | ||
277 | } | ||
278 | |||
279 | return ret; | ||
280 | } | ||
281 | |||
282 | unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait) | ||
283 | { | ||
284 | unsigned int mask = 0; | ||
285 | struct dbg_session_gk20a *dbg_s = filep->private_data; | ||
286 | |||
287 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
288 | |||
289 | poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait); | ||
290 | |||
291 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
292 | |||
293 | if (dbg_s->dbg_events.events_enabled && | ||
294 | dbg_s->dbg_events.num_pending_events > 0) { | ||
295 | gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d", | ||
296 | dbg_s->id); | ||
297 | gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", | ||
298 | dbg_s->dbg_events.num_pending_events); | ||
299 | mask = (POLLPRI | POLLIN); | ||
300 | } | ||
301 | |||
302 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
303 | |||
304 | return mask; | ||
305 | } | ||
306 | |||
307 | int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp) | ||
308 | { | ||
309 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
310 | return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */); | ||
311 | } | ||
312 | |||
313 | int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp) | ||
314 | { | ||
315 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
316 | return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */); | ||
317 | } | ||
318 | |||
319 | void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) | 67 | void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) |
320 | { | 68 | { |
321 | struct dbg_session_data *session_data; | 69 | struct dbg_session_data *session_data; |
@@ -396,917 +144,6 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) | |||
396 | return 0; | 144 | return 0; |
397 | } | 145 | } |
398 | 146 | ||
399 | static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s, | ||
400 | int timeout_mode) | ||
401 | { | ||
402 | struct gk20a *g = dbg_s->g; | ||
403 | int err = 0; | ||
404 | |||
405 | gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d", | ||
406 | timeout_mode); | ||
407 | |||
408 | switch (timeout_mode) { | ||
409 | case NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE: | ||
410 | if (dbg_s->is_timeout_disabled && | ||
411 | --g->dbg_timeout_disabled_refcount == 0) { | ||
412 | g->timeouts_enabled = true; | ||
413 | } | ||
414 | dbg_s->is_timeout_disabled = false; | ||
415 | break; | ||
416 | |||
417 | case NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE: | ||
418 | if ((dbg_s->is_timeout_disabled == false) && | ||
419 | (g->dbg_timeout_disabled_refcount++ == 0)) { | ||
420 | g->timeouts_enabled = false; | ||
421 | } | ||
422 | dbg_s->is_timeout_disabled = true; | ||
423 | break; | ||
424 | |||
425 | default: | ||
426 | nvgpu_err(g, | ||
427 | "unrecognized dbg gpu timeout mode : 0x%x", | ||
428 | timeout_mode); | ||
429 | err = -EINVAL; | ||
430 | break; | ||
431 | } | ||
432 | |||
433 | gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s", | ||
434 | g->timeouts_enabled ? "Yes" : "No"); | ||
435 | |||
436 | return err; | ||
437 | } | ||
438 | |||
439 | int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
440 | struct dbg_session_channel_data *ch_data) | ||
441 | { | ||
442 | struct gk20a *g = dbg_s->g; | ||
443 | int chid; | ||
444 | struct dbg_session_data *session_data; | ||
445 | struct dbg_profiler_object_data *prof_obj, *tmp_obj; | ||
446 | |||
447 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
448 | |||
449 | chid = ch_data->chid; | ||
450 | |||
451 | /* If there's a profiler ctx reservation record associated with this | ||
452 | * session/channel pair, release it. | ||
453 | */ | ||
454 | nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, | ||
455 | dbg_profiler_object_data, prof_obj_entry) { | ||
456 | if ((prof_obj->session_id == dbg_s->id) && | ||
457 | (prof_obj->ch->chid == chid)) { | ||
458 | if (prof_obj->has_reservation) { | ||
459 | g->ops.dbg_session_ops. | ||
460 | release_profiler_reservation(dbg_s, prof_obj); | ||
461 | } | ||
462 | nvgpu_list_del(&prof_obj->prof_obj_entry); | ||
463 | nvgpu_kfree(g, prof_obj); | ||
464 | } | ||
465 | } | ||
466 | |||
467 | nvgpu_list_del(&ch_data->ch_entry); | ||
468 | |||
469 | session_data = ch_data->session_data; | ||
470 | nvgpu_list_del(&session_data->dbg_s_entry); | ||
471 | nvgpu_kfree(dbg_s->g, session_data); | ||
472 | |||
473 | fput(ch_data->ch_f); | ||
474 | nvgpu_kfree(dbg_s->g, ch_data); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s) | ||
480 | { | ||
481 | struct dbg_session_channel_data *ch_data, *tmp; | ||
482 | struct gk20a *g = dbg_s->g; | ||
483 | |||
484 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
485 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
486 | nvgpu_list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, | ||
487 | dbg_session_channel_data, ch_entry) | ||
488 | dbg_unbind_single_channel_gk20a(dbg_s, ch_data); | ||
489 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
490 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
491 | |||
492 | return 0; | ||
493 | } | ||
494 | |||
495 | static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
496 | struct nvgpu_dbg_gpu_unbind_channel_args *args) | ||
497 | { | ||
498 | struct dbg_session_channel_data *ch_data; | ||
499 | struct gk20a *g = dbg_s->g; | ||
500 | bool channel_found = false; | ||
501 | struct channel_gk20a *ch; | ||
502 | int err; | ||
503 | |||
504 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", | ||
505 | g->name, args->channel_fd); | ||
506 | |||
507 | ch = gk20a_get_channel_from_file(args->channel_fd); | ||
508 | if (!ch) { | ||
509 | gk20a_dbg_fn("no channel found for fd"); | ||
510 | return -EINVAL; | ||
511 | } | ||
512 | |||
513 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
514 | nvgpu_list_for_each_entry(ch_data, &dbg_s->ch_list, | ||
515 | dbg_session_channel_data, ch_entry) { | ||
516 | if (ch->chid == ch_data->chid) { | ||
517 | channel_found = true; | ||
518 | break; | ||
519 | } | ||
520 | } | ||
521 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
522 | |||
523 | if (!channel_found) { | ||
524 | gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); | ||
525 | err = -EINVAL; | ||
526 | goto out; | ||
527 | } | ||
528 | |||
529 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
530 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
531 | err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data); | ||
532 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
533 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
534 | |||
535 | out: | ||
536 | gk20a_channel_put(ch); | ||
537 | return err; | ||
538 | } | ||
539 | |||
540 | static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset); | ||
541 | |||
542 | int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp) | ||
543 | { | ||
544 | struct dbg_session_gk20a *dbg_s = filp->private_data; | ||
545 | struct gk20a *g = dbg_s->g; | ||
546 | struct dbg_profiler_object_data *prof_obj, *tmp_obj; | ||
547 | |||
548 | gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name); | ||
549 | |||
550 | /* unbind channels */ | ||
551 | dbg_unbind_all_channels_gk20a(dbg_s); | ||
552 | |||
553 | /* Powergate/Timeout enable is called here as possibility of dbg_session | ||
554 | * which called powergate/timeout disable ioctl, to be killed without | ||
555 | * calling powergate/timeout enable ioctl | ||
556 | */ | ||
557 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
558 | g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, | ||
559 | NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE); | ||
560 | nvgpu_dbg_timeout_enable(dbg_s, NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE); | ||
561 | |||
562 | /* If this session owned the perf buffer, release it */ | ||
563 | if (g->perfbuf.owner == dbg_s) | ||
564 | gk20a_perfbuf_release_locked(g, g->perfbuf.offset); | ||
565 | |||
566 | /* Per-context profiler objects were released when we called | ||
567 | * dbg_unbind_all_channels. We could still have global ones. | ||
568 | */ | ||
569 | nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, | ||
570 | dbg_profiler_object_data, prof_obj_entry) { | ||
571 | if (prof_obj->session_id == dbg_s->id) { | ||
572 | if (prof_obj->has_reservation) | ||
573 | g->ops.dbg_session_ops. | ||
574 | release_profiler_reservation(dbg_s, prof_obj); | ||
575 | nvgpu_list_del(&prof_obj->prof_obj_entry); | ||
576 | nvgpu_kfree(g, prof_obj); | ||
577 | } | ||
578 | } | ||
579 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
580 | |||
581 | nvgpu_mutex_destroy(&dbg_s->ch_list_lock); | ||
582 | nvgpu_mutex_destroy(&dbg_s->ioctl_lock); | ||
583 | |||
584 | nvgpu_kfree(g, dbg_s); | ||
585 | gk20a_put(g); | ||
586 | |||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
591 | struct nvgpu_dbg_gpu_bind_channel_args *args) | ||
592 | { | ||
593 | struct file *f; | ||
594 | struct gk20a *g = dbg_s->g; | ||
595 | struct channel_gk20a *ch; | ||
596 | struct dbg_session_channel_data *ch_data; | ||
597 | struct dbg_session_data *session_data; | ||
598 | int err = 0; | ||
599 | |||
600 | gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", | ||
601 | g->name, args->channel_fd); | ||
602 | |||
603 | /* | ||
604 | * Although gk20a_get_channel_from_file gives us a channel ref, need to | ||
605 | * hold a ref to the file during the session lifetime. See comment in | ||
606 | * struct dbg_session_channel_data. | ||
607 | */ | ||
608 | f = fget(args->channel_fd); | ||
609 | if (!f) | ||
610 | return -ENODEV; | ||
611 | |||
612 | ch = gk20a_get_channel_from_file(args->channel_fd); | ||
613 | if (!ch) { | ||
614 | gk20a_dbg_fn("no channel found for fd"); | ||
615 | err = -EINVAL; | ||
616 | goto out_fput; | ||
617 | } | ||
618 | |||
619 | gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid); | ||
620 | |||
621 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
622 | nvgpu_mutex_acquire(&ch->dbg_s_lock); | ||
623 | |||
624 | ch_data = nvgpu_kzalloc(g, sizeof(*ch_data)); | ||
625 | if (!ch_data) { | ||
626 | err = -ENOMEM; | ||
627 | goto out_chput; | ||
628 | } | ||
629 | ch_data->ch_f = f; | ||
630 | ch_data->channel_fd = args->channel_fd; | ||
631 | ch_data->chid = ch->chid; | ||
632 | nvgpu_init_list_node(&ch_data->ch_entry); | ||
633 | |||
634 | session_data = nvgpu_kzalloc(g, sizeof(*session_data)); | ||
635 | if (!session_data) { | ||
636 | err = -ENOMEM; | ||
637 | goto out_kfree; | ||
638 | } | ||
639 | session_data->dbg_s = dbg_s; | ||
640 | nvgpu_init_list_node(&session_data->dbg_s_entry); | ||
641 | ch_data->session_data = session_data; | ||
642 | |||
643 | nvgpu_list_add(&session_data->dbg_s_entry, &ch->dbg_s_list); | ||
644 | |||
645 | nvgpu_mutex_acquire(&dbg_s->ch_list_lock); | ||
646 | nvgpu_list_add_tail(&ch_data->ch_entry, &dbg_s->ch_list); | ||
647 | nvgpu_mutex_release(&dbg_s->ch_list_lock); | ||
648 | |||
649 | nvgpu_mutex_release(&ch->dbg_s_lock); | ||
650 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
651 | |||
652 | gk20a_channel_put(ch); | ||
653 | |||
654 | return 0; | ||
655 | |||
656 | out_kfree: | ||
657 | nvgpu_kfree(g, ch_data); | ||
658 | out_chput: | ||
659 | gk20a_channel_put(ch); | ||
660 | nvgpu_mutex_release(&ch->dbg_s_lock); | ||
661 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
662 | out_fput: | ||
663 | fput(f); | ||
664 | return err; | ||
665 | } | ||
666 | |||
667 | static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | ||
668 | struct nvgpu_dbg_gpu_exec_reg_ops_args *args); | ||
669 | |||
670 | static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, | ||
671 | struct nvgpu_dbg_gpu_powergate_args *args); | ||
672 | |||
673 | static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
674 | struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args); | ||
675 | |||
676 | static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
677 | struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args); | ||
678 | |||
679 | static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | ||
680 | struct dbg_session_gk20a *dbg_s, | ||
681 | struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args); | ||
682 | |||
683 | static int nvgpu_ioctl_allocate_profiler_object(struct dbg_session_gk20a *dbg_s, | ||
684 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args); | ||
685 | |||
686 | static int nvgpu_ioctl_free_profiler_object(struct dbg_session_gk20a *dbg_s, | ||
687 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args); | ||
688 | |||
689 | static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s, | ||
690 | struct nvgpu_dbg_gpu_profiler_reserve_args *args); | ||
691 | |||
692 | static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, | ||
693 | struct nvgpu_dbg_gpu_perfbuf_map_args *args); | ||
694 | |||
695 | static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s, | ||
696 | struct nvgpu_dbg_gpu_perfbuf_unmap_args *args); | ||
697 | |||
698 | static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s, | ||
699 | struct nvgpu_dbg_gpu_pc_sampling_args *args) | ||
700 | { | ||
701 | struct channel_gk20a *ch; | ||
702 | struct gk20a *g = dbg_s->g; | ||
703 | |||
704 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
705 | if (!ch) | ||
706 | return -EINVAL; | ||
707 | |||
708 | gk20a_dbg_fn(""); | ||
709 | |||
710 | return g->ops.gr.update_pc_sampling ? | ||
711 | g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL; | ||
712 | } | ||
713 | |||
714 | static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s, | ||
715 | struct nvgpu_dbg_gpu_timeout_args *args) | ||
716 | { | ||
717 | int err; | ||
718 | struct gk20a *g = dbg_s->g; | ||
719 | |||
720 | gk20a_dbg_fn("powergate mode = %d", args->enable); | ||
721 | |||
722 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
723 | err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); | ||
724 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
725 | |||
726 | return err; | ||
727 | } | ||
728 | |||
729 | static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s, | ||
730 | struct nvgpu_dbg_gpu_timeout_args *args) | ||
731 | { | ||
732 | int status; | ||
733 | struct gk20a *g = dbg_s->g; | ||
734 | |||
735 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
736 | status = g->timeouts_enabled; | ||
737 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
738 | |||
739 | if (status) | ||
740 | args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_ENABLE; | ||
741 | else | ||
742 | args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE; | ||
743 | } | ||
744 | |||
745 | static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type( | ||
746 | struct dbg_session_gk20a *dbg_s, | ||
747 | struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args) | ||
748 | { | ||
749 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
750 | |||
751 | gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); | ||
752 | |||
753 | dbg_s->broadcast_stop_trigger = (args->broadcast != 0); | ||
754 | |||
755 | gk20a_dbg_session_nvgpu_mutex_release(dbg_s); | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static int nvgpu_dbg_gpu_ioctl_read_single_sm_error_state( | ||
761 | struct dbg_session_gk20a *dbg_s, | ||
762 | struct nvgpu_dbg_gpu_read_single_sm_error_state_args *args) | ||
763 | { | ||
764 | struct gk20a *g = dbg_s->g; | ||
765 | struct gr_gk20a *gr = &g->gr; | ||
766 | struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state; | ||
767 | u32 sm_id; | ||
768 | int err = 0; | ||
769 | |||
770 | sm_id = args->sm_id; | ||
771 | if (sm_id >= gr->no_of_sm) | ||
772 | return -EINVAL; | ||
773 | |||
774 | sm_error_state = gr->sm_error_states + sm_id; | ||
775 | |||
776 | if (args->sm_error_state_record_size > 0) { | ||
777 | size_t write_size = sizeof(*sm_error_state); | ||
778 | |||
779 | if (write_size > args->sm_error_state_record_size) | ||
780 | write_size = args->sm_error_state_record_size; | ||
781 | |||
782 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
783 | err = copy_to_user((void __user *)(uintptr_t) | ||
784 | args->sm_error_state_record_mem, | ||
785 | sm_error_state, | ||
786 | write_size); | ||
787 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
788 | if (err) { | ||
789 | nvgpu_err(g, "copy_to_user failed!"); | ||
790 | return err; | ||
791 | } | ||
792 | |||
793 | args->sm_error_state_record_size = write_size; | ||
794 | } | ||
795 | |||
796 | return 0; | ||
797 | } | ||
798 | |||
799 | static int nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state( | ||
800 | struct dbg_session_gk20a *dbg_s, | ||
801 | struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *args) | ||
802 | { | ||
803 | struct gk20a *g = dbg_s->g; | ||
804 | struct gr_gk20a *gr = &g->gr; | ||
805 | u32 sm_id; | ||
806 | struct channel_gk20a *ch; | ||
807 | int err = 0; | ||
808 | |||
809 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
810 | if (!ch) | ||
811 | return -EINVAL; | ||
812 | |||
813 | sm_id = args->sm_id; | ||
814 | |||
815 | if (sm_id >= gr->no_of_sm) | ||
816 | return -EINVAL; | ||
817 | |||
818 | err = gk20a_busy(g); | ||
819 | if (err) | ||
820 | return err; | ||
821 | |||
822 | err = gr_gk20a_elpg_protected_call(g, | ||
823 | g->ops.gr.clear_sm_error_state(g, ch, sm_id)); | ||
824 | |||
825 | gk20a_idle(g); | ||
826 | |||
827 | return err; | ||
828 | } | ||
829 | |||
830 | static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state( | ||
831 | struct dbg_session_gk20a *dbg_s, | ||
832 | struct nvgpu_dbg_gpu_write_single_sm_error_state_args *args) | ||
833 | { | ||
834 | struct gk20a *g = dbg_s->g; | ||
835 | struct gr_gk20a *gr = &g->gr; | ||
836 | u32 sm_id; | ||
837 | struct channel_gk20a *ch; | ||
838 | struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state; | ||
839 | int err = 0; | ||
840 | |||
841 | /* Not currently supported in the virtual case */ | ||
842 | if (g->is_virtual) | ||
843 | return -ENOSYS; | ||
844 | |||
845 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
846 | if (!ch) | ||
847 | return -EINVAL; | ||
848 | |||
849 | sm_id = args->sm_id; | ||
850 | if (sm_id >= gr->no_of_sm) | ||
851 | return -EINVAL; | ||
852 | |||
853 | sm_error_state = nvgpu_kzalloc(g, sizeof(*sm_error_state)); | ||
854 | if (!sm_error_state) | ||
855 | return -ENOMEM; | ||
856 | |||
857 | if (args->sm_error_state_record_size > 0) { | ||
858 | size_t read_size = sizeof(*sm_error_state); | ||
859 | |||
860 | if (read_size > args->sm_error_state_record_size) | ||
861 | read_size = args->sm_error_state_record_size; | ||
862 | |||
863 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
864 | err = copy_from_user(sm_error_state, | ||
865 | (void __user *)(uintptr_t) | ||
866 | args->sm_error_state_record_mem, | ||
867 | read_size); | ||
868 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
869 | if (err) { | ||
870 | err = -ENOMEM; | ||
871 | goto err_free; | ||
872 | } | ||
873 | } | ||
874 | |||
875 | err = gk20a_busy(g); | ||
876 | if (err) | ||
877 | goto err_free; | ||
878 | |||
879 | err = gr_gk20a_elpg_protected_call(g, | ||
880 | g->ops.gr.update_sm_error_state(g, ch, | ||
881 | sm_id, sm_error_state)); | ||
882 | |||
883 | gk20a_idle(g); | ||
884 | |||
885 | err_free: | ||
886 | nvgpu_kfree(g, sm_error_state); | ||
887 | |||
888 | return err; | ||
889 | } | ||
890 | |||
891 | static int | ||
892 | nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(struct dbg_session_gk20a *dbg_s, | ||
893 | struct nvgpu_dbg_gpu_suspend_resume_contexts_args *args) | ||
894 | { | ||
895 | struct gk20a *g = dbg_s->g; | ||
896 | int err = 0; | ||
897 | int ctx_resident_ch_fd = -1; | ||
898 | |||
899 | err = gk20a_busy(g); | ||
900 | if (err) | ||
901 | return err; | ||
902 | |||
903 | switch (args->action) { | ||
904 | case NVGPU_DBG_GPU_SUSPEND_ALL_CONTEXTS: | ||
905 | err = g->ops.gr.suspend_contexts(g, dbg_s, | ||
906 | &ctx_resident_ch_fd); | ||
907 | break; | ||
908 | |||
909 | case NVGPU_DBG_GPU_RESUME_ALL_CONTEXTS: | ||
910 | err = g->ops.gr.resume_contexts(g, dbg_s, | ||
911 | &ctx_resident_ch_fd); | ||
912 | break; | ||
913 | } | ||
914 | |||
915 | if (ctx_resident_ch_fd < 0) { | ||
916 | args->is_resident_context = 0; | ||
917 | } else { | ||
918 | args->is_resident_context = 1; | ||
919 | args->resident_context_fd = ctx_resident_ch_fd; | ||
920 | } | ||
921 | |||
922 | gk20a_idle(g); | ||
923 | |||
924 | return err; | ||
925 | } | ||
926 | |||
927 | static int nvgpu_dbg_gpu_ioctl_access_fb_memory(struct dbg_session_gk20a *dbg_s, | ||
928 | struct nvgpu_dbg_gpu_access_fb_memory_args *args) | ||
929 | { | ||
930 | struct gk20a *g = dbg_s->g; | ||
931 | struct dma_buf *dmabuf; | ||
932 | void __user *user_buffer = (void __user *)(uintptr_t)args->buffer; | ||
933 | void *buffer; | ||
934 | u64 size, access_size, offset; | ||
935 | u64 access_limit_size = SZ_4K; | ||
936 | int err = 0; | ||
937 | |||
938 | if ((args->offset & 3) || (!args->size) || (args->size & 3)) | ||
939 | return -EINVAL; | ||
940 | |||
941 | dmabuf = dma_buf_get(args->dmabuf_fd); | ||
942 | if (IS_ERR(dmabuf)) | ||
943 | return -EINVAL; | ||
944 | |||
945 | if ((args->offset > dmabuf->size) || | ||
946 | (args->size > dmabuf->size) || | ||
947 | (args->offset + args->size > dmabuf->size)) { | ||
948 | err = -EINVAL; | ||
949 | goto fail_dmabuf_put; | ||
950 | } | ||
951 | |||
952 | buffer = nvgpu_big_zalloc(g, access_limit_size); | ||
953 | if (!buffer) { | ||
954 | err = -ENOMEM; | ||
955 | goto fail_dmabuf_put; | ||
956 | } | ||
957 | |||
958 | size = args->size; | ||
959 | offset = 0; | ||
960 | |||
961 | err = gk20a_busy(g); | ||
962 | if (err) | ||
963 | goto fail_free_buffer; | ||
964 | |||
965 | while (size) { | ||
966 | /* Max access size of access_limit_size in one loop */ | ||
967 | access_size = min(access_limit_size, size); | ||
968 | |||
969 | if (args->cmd == | ||
970 | NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_WRITE) { | ||
971 | err = copy_from_user(buffer, user_buffer + offset, | ||
972 | access_size); | ||
973 | if (err) | ||
974 | goto fail_idle; | ||
975 | } | ||
976 | |||
977 | err = nvgpu_vidmem_buf_access_memory(g, dmabuf, buffer, | ||
978 | args->offset + offset, access_size, | ||
979 | args->cmd); | ||
980 | if (err) | ||
981 | goto fail_idle; | ||
982 | |||
983 | if (args->cmd == | ||
984 | NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY_CMD_READ) { | ||
985 | err = copy_to_user(user_buffer + offset, | ||
986 | buffer, access_size); | ||
987 | if (err) | ||
988 | goto fail_idle; | ||
989 | } | ||
990 | |||
991 | size -= access_size; | ||
992 | offset += access_size; | ||
993 | } | ||
994 | |||
995 | fail_idle: | ||
996 | gk20a_idle(g); | ||
997 | fail_free_buffer: | ||
998 | nvgpu_big_free(g, buffer); | ||
999 | fail_dmabuf_put: | ||
1000 | dma_buf_put(dmabuf); | ||
1001 | |||
1002 | return err; | ||
1003 | } | ||
1004 | |||
1005 | long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, | ||
1006 | unsigned long arg) | ||
1007 | { | ||
1008 | struct dbg_session_gk20a *dbg_s = filp->private_data; | ||
1009 | struct gk20a *g = dbg_s->g; | ||
1010 | u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE]; | ||
1011 | int err = 0; | ||
1012 | |||
1013 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); | ||
1014 | |||
1015 | if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) || | ||
1016 | (_IOC_NR(cmd) == 0) || | ||
1017 | (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST) || | ||
1018 | (_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE)) | ||
1019 | return -EINVAL; | ||
1020 | |||
1021 | memset(buf, 0, sizeof(buf)); | ||
1022 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | ||
1023 | if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) | ||
1024 | return -EFAULT; | ||
1025 | } | ||
1026 | |||
1027 | if (!g->gr.sw_ready) { | ||
1028 | err = gk20a_busy(g); | ||
1029 | if (err) | ||
1030 | return err; | ||
1031 | |||
1032 | gk20a_idle(g); | ||
1033 | } | ||
1034 | |||
1035 | /* protect from threaded user space calls */ | ||
1036 | nvgpu_mutex_acquire(&dbg_s->ioctl_lock); | ||
1037 | |||
1038 | switch (cmd) { | ||
1039 | case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL: | ||
1040 | err = dbg_bind_channel_gk20a(dbg_s, | ||
1041 | (struct nvgpu_dbg_gpu_bind_channel_args *)buf); | ||
1042 | break; | ||
1043 | |||
1044 | case NVGPU_DBG_GPU_IOCTL_REG_OPS: | ||
1045 | err = nvgpu_ioctl_channel_reg_ops(dbg_s, | ||
1046 | (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf); | ||
1047 | break; | ||
1048 | |||
1049 | case NVGPU_DBG_GPU_IOCTL_POWERGATE: | ||
1050 | err = nvgpu_ioctl_powergate_gk20a(dbg_s, | ||
1051 | (struct nvgpu_dbg_gpu_powergate_args *)buf); | ||
1052 | break; | ||
1053 | |||
1054 | case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL: | ||
1055 | err = gk20a_dbg_gpu_events_ctrl(dbg_s, | ||
1056 | (struct nvgpu_dbg_gpu_events_ctrl_args *)buf); | ||
1057 | break; | ||
1058 | |||
1059 | case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE: | ||
1060 | err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s, | ||
1061 | (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf); | ||
1062 | break; | ||
1063 | |||
1064 | case NVGPU_DBG_GPU_IOCTL_HWPM_CTXSW_MODE: | ||
1065 | err = nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(dbg_s, | ||
1066 | (struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *)buf); | ||
1067 | break; | ||
1068 | |||
1069 | case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_ALL_SMS: | ||
1070 | err = nvgpu_dbg_gpu_ioctl_suspend_resume_sm(dbg_s, | ||
1071 | (struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *)buf); | ||
1072 | break; | ||
1073 | |||
1074 | case NVGPU_DBG_GPU_IOCTL_PERFBUF_MAP: | ||
1075 | err = gk20a_perfbuf_map(dbg_s, | ||
1076 | (struct nvgpu_dbg_gpu_perfbuf_map_args *)buf); | ||
1077 | break; | ||
1078 | |||
1079 | case NVGPU_DBG_GPU_IOCTL_PERFBUF_UNMAP: | ||
1080 | err = gk20a_perfbuf_unmap(dbg_s, | ||
1081 | (struct nvgpu_dbg_gpu_perfbuf_unmap_args *)buf); | ||
1082 | break; | ||
1083 | |||
1084 | case NVGPU_DBG_GPU_IOCTL_PC_SAMPLING: | ||
1085 | err = gk20a_dbg_pc_sampling(dbg_s, | ||
1086 | (struct nvgpu_dbg_gpu_pc_sampling_args *)buf); | ||
1087 | break; | ||
1088 | |||
1089 | case NVGPU_DBG_GPU_IOCTL_SET_NEXT_STOP_TRIGGER_TYPE: | ||
1090 | err = nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(dbg_s, | ||
1091 | (struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *)buf); | ||
1092 | break; | ||
1093 | |||
1094 | case NVGPU_DBG_GPU_IOCTL_TIMEOUT: | ||
1095 | err = nvgpu_dbg_gpu_ioctl_timeout(dbg_s, | ||
1096 | (struct nvgpu_dbg_gpu_timeout_args *)buf); | ||
1097 | break; | ||
1098 | |||
1099 | case NVGPU_DBG_GPU_IOCTL_GET_TIMEOUT: | ||
1100 | nvgpu_dbg_gpu_ioctl_get_timeout(dbg_s, | ||
1101 | (struct nvgpu_dbg_gpu_timeout_args *)buf); | ||
1102 | break; | ||
1103 | |||
1104 | case NVGPU_DBG_GPU_IOCTL_READ_SINGLE_SM_ERROR_STATE: | ||
1105 | err = nvgpu_dbg_gpu_ioctl_read_single_sm_error_state(dbg_s, | ||
1106 | (struct nvgpu_dbg_gpu_read_single_sm_error_state_args *)buf); | ||
1107 | break; | ||
1108 | |||
1109 | case NVGPU_DBG_GPU_IOCTL_CLEAR_SINGLE_SM_ERROR_STATE: | ||
1110 | err = nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(dbg_s, | ||
1111 | (struct nvgpu_dbg_gpu_clear_single_sm_error_state_args *)buf); | ||
1112 | break; | ||
1113 | |||
1114 | case NVGPU_DBG_GPU_IOCTL_WRITE_SINGLE_SM_ERROR_STATE: | ||
1115 | err = nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(dbg_s, | ||
1116 | (struct nvgpu_dbg_gpu_write_single_sm_error_state_args *)buf); | ||
1117 | break; | ||
1118 | |||
1119 | case NVGPU_DBG_GPU_IOCTL_UNBIND_CHANNEL: | ||
1120 | err = dbg_unbind_channel_gk20a(dbg_s, | ||
1121 | (struct nvgpu_dbg_gpu_unbind_channel_args *)buf); | ||
1122 | break; | ||
1123 | |||
1124 | case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_CONTEXTS: | ||
1125 | err = nvgpu_dbg_gpu_ioctl_suspend_resume_contexts(dbg_s, | ||
1126 | (struct nvgpu_dbg_gpu_suspend_resume_contexts_args *)buf); | ||
1127 | break; | ||
1128 | |||
1129 | case NVGPU_DBG_GPU_IOCTL_ACCESS_FB_MEMORY: | ||
1130 | err = nvgpu_dbg_gpu_ioctl_access_fb_memory(dbg_s, | ||
1131 | (struct nvgpu_dbg_gpu_access_fb_memory_args *)buf); | ||
1132 | break; | ||
1133 | |||
1134 | case NVGPU_DBG_GPU_IOCTL_PROFILER_ALLOCATE: | ||
1135 | err = nvgpu_ioctl_allocate_profiler_object(dbg_s, | ||
1136 | (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf); | ||
1137 | break; | ||
1138 | |||
1139 | case NVGPU_DBG_GPU_IOCTL_PROFILER_FREE: | ||
1140 | err = nvgpu_ioctl_free_profiler_object(dbg_s, | ||
1141 | (struct nvgpu_dbg_gpu_profiler_obj_mgt_args *)buf); | ||
1142 | break; | ||
1143 | |||
1144 | case NVGPU_DBG_GPU_IOCTL_PROFILER_RESERVE: | ||
1145 | err = nvgpu_ioctl_profiler_reserve(dbg_s, | ||
1146 | (struct nvgpu_dbg_gpu_profiler_reserve_args *)buf); | ||
1147 | break; | ||
1148 | |||
1149 | default: | ||
1150 | nvgpu_err(g, | ||
1151 | "unrecognized dbg gpu ioctl cmd: 0x%x", | ||
1152 | cmd); | ||
1153 | err = -ENOTTY; | ||
1154 | break; | ||
1155 | } | ||
1156 | |||
1157 | nvgpu_mutex_release(&dbg_s->ioctl_lock); | ||
1158 | |||
1159 | gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); | ||
1160 | |||
1161 | if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) | ||
1162 | err = copy_to_user((void __user *)arg, | ||
1163 | buf, _IOC_SIZE(cmd)); | ||
1164 | |||
1165 | return err; | ||
1166 | } | ||
1167 | |||
1168 | /* In order to perform a context relative op the context has | ||
1169 | * to be created already... which would imply that the | ||
1170 | * context switch mechanism has already been put in place. | ||
1171 | * So by the time we perform such an opertation it should always | ||
1172 | * be possible to query for the appropriate context offsets, etc. | ||
1173 | * | ||
1174 | * But note: while the dbg_gpu bind requires the a channel fd, | ||
1175 | * it doesn't require an allocated gr/compute obj at that point... | ||
1176 | */ | ||
1177 | static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s, | ||
1178 | struct gr_gk20a *gr) | ||
1179 | { | ||
1180 | int err; | ||
1181 | |||
1182 | nvgpu_mutex_acquire(&gr->ctx_mutex); | ||
1183 | err = !gr->ctx_vars.golden_image_initialized; | ||
1184 | nvgpu_mutex_release(&gr->ctx_mutex); | ||
1185 | if (err) | ||
1186 | return false; | ||
1187 | return true; | ||
1188 | |||
1189 | } | ||
1190 | |||
1191 | static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, | ||
1192 | struct nvgpu_dbg_gpu_exec_reg_ops_args *args) | ||
1193 | { | ||
1194 | int err = 0, powergate_err = 0; | ||
1195 | bool is_pg_disabled = false; | ||
1196 | |||
1197 | struct gk20a *g = dbg_s->g; | ||
1198 | struct channel_gk20a *ch; | ||
1199 | |||
1200 | gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); | ||
1201 | |||
1202 | if (args->num_ops > g->gpu_characteristics.reg_ops_limit) { | ||
1203 | nvgpu_err(g, "regops limit exceeded"); | ||
1204 | return -EINVAL; | ||
1205 | } | ||
1206 | |||
1207 | if (args->num_ops == 0) { | ||
1208 | /* Nothing to do */ | ||
1209 | return 0; | ||
1210 | } | ||
1211 | |||
1212 | if (g->dbg_regops_tmp_buf_ops == 0 || !g->dbg_regops_tmp_buf) { | ||
1213 | nvgpu_err(g, "reg ops work buffer not allocated"); | ||
1214 | return -ENODEV; | ||
1215 | } | ||
1216 | |||
1217 | if (!dbg_s->id) { | ||
1218 | nvgpu_err(g, "can't call reg_ops on an unbound debugger session"); | ||
1219 | return -EINVAL; | ||
1220 | } | ||
1221 | |||
1222 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1223 | if (!dbg_s->is_profiler && !ch) { | ||
1224 | nvgpu_err(g, "bind a channel before regops for a debugging session"); | ||
1225 | return -EINVAL; | ||
1226 | } | ||
1227 | |||
1228 | /* be sure that ctx info is in place */ | ||
1229 | if (!g->is_virtual && | ||
1230 | !gr_context_info_available(dbg_s, &g->gr)) { | ||
1231 | nvgpu_err(g, "gr context data not available"); | ||
1232 | return -ENODEV; | ||
1233 | } | ||
1234 | |||
1235 | /* since exec_reg_ops sends methods to the ucode, it must take the | ||
1236 | * global gpu lock to protect against mixing methods from debug sessions | ||
1237 | * on other channels */ | ||
1238 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1239 | |||
1240 | if (!dbg_s->is_pg_disabled && !g->is_virtual) { | ||
1241 | /* In the virtual case, the server will handle | ||
1242 | * disabling/enabling powergating when processing reg ops | ||
1243 | */ | ||
1244 | powergate_err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, | ||
1245 | NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE); | ||
1246 | is_pg_disabled = true; | ||
1247 | } | ||
1248 | |||
1249 | if (!powergate_err) { | ||
1250 | u64 ops_offset = 0; /* index offset */ | ||
1251 | |||
1252 | while (ops_offset < args->num_ops && !err) { | ||
1253 | const u64 num_ops = | ||
1254 | min(args->num_ops - ops_offset, | ||
1255 | (u64)(g->dbg_regops_tmp_buf_ops)); | ||
1256 | const u64 fragment_size = | ||
1257 | num_ops * sizeof(g->dbg_regops_tmp_buf[0]); | ||
1258 | |||
1259 | void __user *const fragment = | ||
1260 | (void __user *)(uintptr_t) | ||
1261 | (args->ops + | ||
1262 | ops_offset * sizeof(g->dbg_regops_tmp_buf[0])); | ||
1263 | |||
1264 | gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu", | ||
1265 | ops_offset, num_ops); | ||
1266 | |||
1267 | gk20a_dbg_fn("Copying regops from userspace"); | ||
1268 | |||
1269 | if (copy_from_user(g->dbg_regops_tmp_buf, | ||
1270 | fragment, fragment_size)) { | ||
1271 | nvgpu_err(g, "copy_from_user failed!"); | ||
1272 | err = -EFAULT; | ||
1273 | break; | ||
1274 | } | ||
1275 | |||
1276 | err = g->ops.dbg_session_ops.exec_reg_ops( | ||
1277 | dbg_s, g->dbg_regops_tmp_buf, num_ops); | ||
1278 | |||
1279 | gk20a_dbg_fn("Copying result to userspace"); | ||
1280 | |||
1281 | if (copy_to_user(fragment, g->dbg_regops_tmp_buf, | ||
1282 | fragment_size)) { | ||
1283 | nvgpu_err(g, "copy_to_user failed!"); | ||
1284 | err = -EFAULT; | ||
1285 | break; | ||
1286 | } | ||
1287 | |||
1288 | ops_offset += num_ops; | ||
1289 | } | ||
1290 | |||
1291 | /* enable powergate, if previously disabled */ | ||
1292 | if (is_pg_disabled) { | ||
1293 | powergate_err = | ||
1294 | g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, | ||
1295 | NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE); | ||
1296 | } | ||
1297 | } | ||
1298 | |||
1299 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1300 | |||
1301 | if (!err && powergate_err) | ||
1302 | err = powergate_err; | ||
1303 | |||
1304 | if (err) | ||
1305 | nvgpu_err(g, "dbg regops failed"); | ||
1306 | |||
1307 | return err; | ||
1308 | } | ||
1309 | |||
1310 | int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode) | 147 | int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode) |
1311 | { | 148 | { |
1312 | int err = 0; | 149 | int err = 0; |
@@ -1409,273 +246,6 @@ int dbg_set_powergate(struct dbg_session_gk20a *dbg_s, u32 powermode) | |||
1409 | return err; | 246 | return err; |
1410 | } | 247 | } |
1411 | 248 | ||
1412 | static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, | ||
1413 | struct nvgpu_dbg_gpu_powergate_args *args) | ||
1414 | { | ||
1415 | int err; | ||
1416 | struct gk20a *g = dbg_s->g; | ||
1417 | gk20a_dbg_fn("%s powergate mode = %d", | ||
1418 | g->name, args->mode); | ||
1419 | |||
1420 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1421 | err = g->ops.dbg_session_ops.dbg_set_powergate(dbg_s, args->mode); | ||
1422 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1423 | return err; | ||
1424 | } | ||
1425 | |||
1426 | static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
1427 | struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args) | ||
1428 | { | ||
1429 | int err; | ||
1430 | struct gk20a *g = dbg_s->g; | ||
1431 | struct channel_gk20a *ch_gk20a; | ||
1432 | |||
1433 | gk20a_dbg_fn("%s smpc ctxsw mode = %d", | ||
1434 | g->name, args->mode); | ||
1435 | |||
1436 | err = gk20a_busy(g); | ||
1437 | if (err) { | ||
1438 | nvgpu_err(g, "failed to poweron"); | ||
1439 | return err; | ||
1440 | } | ||
1441 | |||
1442 | /* Take the global lock, since we'll be doing global regops */ | ||
1443 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1444 | |||
1445 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1446 | if (!ch_gk20a) { | ||
1447 | nvgpu_err(g, | ||
1448 | "no bound channel for smpc ctxsw mode update"); | ||
1449 | err = -EINVAL; | ||
1450 | goto clean_up; | ||
1451 | } | ||
1452 | |||
1453 | err = g->ops.gr.update_smpc_ctxsw_mode(g, ch_gk20a, | ||
1454 | args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); | ||
1455 | if (err) { | ||
1456 | nvgpu_err(g, | ||
1457 | "error (%d) during smpc ctxsw mode update", err); | ||
1458 | goto clean_up; | ||
1459 | } | ||
1460 | |||
1461 | err = g->ops.regops.apply_smpc_war(dbg_s); | ||
1462 | clean_up: | ||
1463 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1464 | gk20a_idle(g); | ||
1465 | return err; | ||
1466 | } | ||
1467 | |||
1468 | static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s, | ||
1469 | struct nvgpu_dbg_gpu_hwpm_ctxsw_mode_args *args) | ||
1470 | { | ||
1471 | int err; | ||
1472 | struct gk20a *g = dbg_s->g; | ||
1473 | struct channel_gk20a *ch_gk20a; | ||
1474 | |||
1475 | gk20a_dbg_fn("%s pm ctxsw mode = %d", | ||
1476 | g->name, args->mode); | ||
1477 | |||
1478 | /* Must have a valid reservation to enable/disable hwpm cxtsw. | ||
1479 | * Just print an error message for now, but eventually this should | ||
1480 | * return an error, at the point where all client sw has been | ||
1481 | * cleaned up. | ||
1482 | */ | ||
1483 | if (!dbg_s->has_profiler_reservation) { | ||
1484 | nvgpu_err(g, | ||
1485 | "session doesn't have a valid reservation"); | ||
1486 | } | ||
1487 | |||
1488 | err = gk20a_busy(g); | ||
1489 | if (err) { | ||
1490 | nvgpu_err(g, "failed to poweron"); | ||
1491 | return err; | ||
1492 | } | ||
1493 | |||
1494 | /* Take the global lock, since we'll be doing global regops */ | ||
1495 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1496 | |||
1497 | ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1498 | if (!ch_gk20a) { | ||
1499 | nvgpu_err(g, | ||
1500 | "no bound channel for pm ctxsw mode update"); | ||
1501 | err = -EINVAL; | ||
1502 | goto clean_up; | ||
1503 | } | ||
1504 | |||
1505 | err = g->ops.gr.update_hwpm_ctxsw_mode(g, ch_gk20a, | ||
1506 | args->mode == NVGPU_DBG_GPU_HWPM_CTXSW_MODE_CTXSW); | ||
1507 | if (err) | ||
1508 | nvgpu_err(g, | ||
1509 | "error (%d) during pm ctxsw mode update", err); | ||
1510 | |||
1511 | /* gk20a would require a WAR to set the core PM_ENABLE bit, not | ||
1512 | * added here with gk20a being deprecated | ||
1513 | */ | ||
1514 | clean_up: | ||
1515 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1516 | gk20a_idle(g); | ||
1517 | return err; | ||
1518 | } | ||
1519 | |||
1520 | static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm( | ||
1521 | struct dbg_session_gk20a *dbg_s, | ||
1522 | struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args) | ||
1523 | { | ||
1524 | struct gk20a *g = dbg_s->g; | ||
1525 | struct channel_gk20a *ch; | ||
1526 | int err = 0, action = args->mode; | ||
1527 | |||
1528 | gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); | ||
1529 | |||
1530 | ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1531 | if (!ch) | ||
1532 | return -EINVAL; | ||
1533 | |||
1534 | err = gk20a_busy(g); | ||
1535 | if (err) { | ||
1536 | nvgpu_err(g, "failed to poweron"); | ||
1537 | return err; | ||
1538 | } | ||
1539 | |||
1540 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1541 | |||
1542 | /* Suspend GPU context switching */ | ||
1543 | err = gr_gk20a_disable_ctxsw(g); | ||
1544 | if (err) { | ||
1545 | nvgpu_err(g, "unable to stop gr ctxsw"); | ||
1546 | /* this should probably be ctx-fatal... */ | ||
1547 | goto clean_up; | ||
1548 | } | ||
1549 | |||
1550 | switch (action) { | ||
1551 | case NVGPU_DBG_GPU_SUSPEND_ALL_SMS: | ||
1552 | gr_gk20a_suspend_context(ch); | ||
1553 | break; | ||
1554 | |||
1555 | case NVGPU_DBG_GPU_RESUME_ALL_SMS: | ||
1556 | gr_gk20a_resume_context(ch); | ||
1557 | break; | ||
1558 | } | ||
1559 | |||
1560 | err = gr_gk20a_enable_ctxsw(g); | ||
1561 | if (err) | ||
1562 | nvgpu_err(g, "unable to restart ctxsw!"); | ||
1563 | |||
1564 | clean_up: | ||
1565 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1566 | gk20a_idle(g); | ||
1567 | |||
1568 | return err; | ||
1569 | } | ||
1570 | |||
1571 | static int nvgpu_ioctl_allocate_profiler_object( | ||
1572 | struct dbg_session_gk20a *dbg_s, | ||
1573 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args) | ||
1574 | { | ||
1575 | int err = 0; | ||
1576 | struct gk20a *g = get_gk20a(dbg_s->dev); | ||
1577 | struct dbg_profiler_object_data *prof_obj; | ||
1578 | |||
1579 | gk20a_dbg_fn("%s", g->name); | ||
1580 | |||
1581 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1582 | |||
1583 | err = alloc_profiler(g, &prof_obj); | ||
1584 | if (err) | ||
1585 | goto clean_up; | ||
1586 | |||
1587 | prof_obj->session_id = dbg_s->id; | ||
1588 | |||
1589 | if (dbg_s->is_profiler) | ||
1590 | prof_obj->ch = NULL; | ||
1591 | else { | ||
1592 | prof_obj->ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); | ||
1593 | if (prof_obj->ch == NULL) { | ||
1594 | nvgpu_err(g, | ||
1595 | "bind a channel for dbg session"); | ||
1596 | nvgpu_kfree(g, prof_obj); | ||
1597 | err = -EINVAL; | ||
1598 | goto clean_up; | ||
1599 | } | ||
1600 | } | ||
1601 | |||
1602 | /* Return handle to client */ | ||
1603 | args->profiler_handle = prof_obj->prof_handle; | ||
1604 | |||
1605 | nvgpu_init_list_node(&prof_obj->prof_obj_entry); | ||
1606 | |||
1607 | nvgpu_list_add(&prof_obj->prof_obj_entry, &g->profiler_objects); | ||
1608 | clean_up: | ||
1609 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1610 | return err; | ||
1611 | } | ||
1612 | |||
1613 | static int nvgpu_ioctl_free_profiler_object( | ||
1614 | struct dbg_session_gk20a *dbg_s, | ||
1615 | struct nvgpu_dbg_gpu_profiler_obj_mgt_args *args) | ||
1616 | { | ||
1617 | int err = 0; | ||
1618 | struct gk20a *g = get_gk20a(dbg_s->dev); | ||
1619 | struct dbg_profiler_object_data *prof_obj, *tmp_obj; | ||
1620 | bool obj_found = false; | ||
1621 | |||
1622 | gk20a_dbg_fn("%s session_id = %d profiler_handle = %x", | ||
1623 | g->name, dbg_s->id, args->profiler_handle); | ||
1624 | |||
1625 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1626 | |||
1627 | /* Remove profiler object from the list, if a match is found */ | ||
1628 | nvgpu_list_for_each_entry_safe(prof_obj, tmp_obj, &g->profiler_objects, | ||
1629 | dbg_profiler_object_data, prof_obj_entry) { | ||
1630 | if (prof_obj->prof_handle == args->profiler_handle) { | ||
1631 | if (prof_obj->session_id != dbg_s->id) { | ||
1632 | nvgpu_err(g, | ||
1633 | "invalid handle %x", | ||
1634 | args->profiler_handle); | ||
1635 | err = -EINVAL; | ||
1636 | break; | ||
1637 | } | ||
1638 | if (prof_obj->has_reservation) | ||
1639 | g->ops.dbg_session_ops. | ||
1640 | release_profiler_reservation(dbg_s, prof_obj); | ||
1641 | nvgpu_list_del(&prof_obj->prof_obj_entry); | ||
1642 | nvgpu_kfree(g, prof_obj); | ||
1643 | obj_found = true; | ||
1644 | break; | ||
1645 | } | ||
1646 | } | ||
1647 | if (!obj_found) { | ||
1648 | nvgpu_err(g, "profiler %x not found", | ||
1649 | args->profiler_handle); | ||
1650 | err = -EINVAL; | ||
1651 | } | ||
1652 | |||
1653 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1654 | return err; | ||
1655 | } | ||
1656 | |||
1657 | static struct dbg_profiler_object_data *find_matching_prof_obj( | ||
1658 | struct dbg_session_gk20a *dbg_s, | ||
1659 | u32 profiler_handle) | ||
1660 | { | ||
1661 | struct gk20a *g = dbg_s->g; | ||
1662 | struct dbg_profiler_object_data *prof_obj; | ||
1663 | |||
1664 | nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects, | ||
1665 | dbg_profiler_object_data, prof_obj_entry) { | ||
1666 | if (prof_obj->prof_handle == profiler_handle) { | ||
1667 | if (prof_obj->session_id != dbg_s->id) { | ||
1668 | nvgpu_err(g, | ||
1669 | "invalid handle %x", | ||
1670 | profiler_handle); | ||
1671 | return NULL; | ||
1672 | } | ||
1673 | return prof_obj; | ||
1674 | } | ||
1675 | } | ||
1676 | return NULL; | ||
1677 | } | ||
1678 | |||
1679 | bool nvgpu_check_and_set_global_reservation( | 249 | bool nvgpu_check_and_set_global_reservation( |
1680 | struct dbg_session_gk20a *dbg_s, | 250 | struct dbg_session_gk20a *dbg_s, |
1681 | struct dbg_profiler_object_data *prof_obj) | 251 | struct dbg_profiler_object_data *prof_obj) |
@@ -1721,149 +291,6 @@ void nvgpu_release_profiler_reservation(struct dbg_session_gk20a *dbg_s, | |||
1721 | g->global_profiler_reservation_held = false; | 291 | g->global_profiler_reservation_held = false; |
1722 | } | 292 | } |
1723 | 293 | ||
1724 | static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s, | ||
1725 | u32 profiler_handle) | ||
1726 | { | ||
1727 | struct gk20a *g = dbg_s->g; | ||
1728 | struct dbg_profiler_object_data *prof_obj, *my_prof_obj; | ||
1729 | int err = 0; | ||
1730 | |||
1731 | gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); | ||
1732 | |||
1733 | if (g->profiler_reservation_count < 0) { | ||
1734 | nvgpu_err(g, "Negative reservation count!"); | ||
1735 | return -EINVAL; | ||
1736 | } | ||
1737 | |||
1738 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1739 | |||
1740 | /* Find matching object. */ | ||
1741 | my_prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); | ||
1742 | |||
1743 | if (!my_prof_obj) { | ||
1744 | nvgpu_err(g, "object not found"); | ||
1745 | err = -EINVAL; | ||
1746 | goto exit; | ||
1747 | } | ||
1748 | |||
1749 | /* If we already have the reservation, we're done */ | ||
1750 | if (my_prof_obj->has_reservation) { | ||
1751 | err = 0; | ||
1752 | goto exit; | ||
1753 | } | ||
1754 | |||
1755 | if (my_prof_obj->ch == NULL) { | ||
1756 | /* Global reservations are only allowed if there are no other | ||
1757 | * global or per-context reservations currently held | ||
1758 | */ | ||
1759 | if (!g->ops.dbg_session_ops.check_and_set_global_reservation( | ||
1760 | dbg_s, my_prof_obj)) { | ||
1761 | nvgpu_err(g, | ||
1762 | "global reserve: have existing reservation"); | ||
1763 | err = -EBUSY; | ||
1764 | } | ||
1765 | } else if (g->global_profiler_reservation_held) { | ||
1766 | /* If there's a global reservation, | ||
1767 | * we can't take a per-context one. | ||
1768 | */ | ||
1769 | nvgpu_err(g, | ||
1770 | "per-ctxt reserve: global reservation in effect"); | ||
1771 | err = -EBUSY; | ||
1772 | } else if (gk20a_is_channel_marked_as_tsg(my_prof_obj->ch)) { | ||
1773 | /* TSG: check that another channel in the TSG | ||
1774 | * doesn't already have the reservation | ||
1775 | */ | ||
1776 | int my_tsgid = my_prof_obj->ch->tsgid; | ||
1777 | |||
1778 | nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects, | ||
1779 | dbg_profiler_object_data, prof_obj_entry) { | ||
1780 | if (prof_obj->has_reservation && | ||
1781 | (prof_obj->ch->tsgid == my_tsgid)) { | ||
1782 | nvgpu_err(g, | ||
1783 | "per-ctxt reserve (tsg): already reserved"); | ||
1784 | err = -EBUSY; | ||
1785 | goto exit; | ||
1786 | } | ||
1787 | } | ||
1788 | |||
1789 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( | ||
1790 | dbg_s, my_prof_obj)) { | ||
1791 | /* Another guest OS has the global reservation */ | ||
1792 | nvgpu_err(g, | ||
1793 | "per-ctxt reserve: global reservation in effect"); | ||
1794 | err = -EBUSY; | ||
1795 | } | ||
1796 | } else { | ||
1797 | /* channel: check that some other profiler object doesn't | ||
1798 | * already have the reservation. | ||
1799 | */ | ||
1800 | struct channel_gk20a *my_ch = my_prof_obj->ch; | ||
1801 | |||
1802 | nvgpu_list_for_each_entry(prof_obj, &g->profiler_objects, | ||
1803 | dbg_profiler_object_data, prof_obj_entry) { | ||
1804 | if (prof_obj->has_reservation && | ||
1805 | (prof_obj->ch == my_ch)) { | ||
1806 | nvgpu_err(g, | ||
1807 | "per-ctxt reserve (ch): already reserved"); | ||
1808 | err = -EBUSY; | ||
1809 | goto exit; | ||
1810 | } | ||
1811 | } | ||
1812 | |||
1813 | if (!g->ops.dbg_session_ops.check_and_set_context_reservation( | ||
1814 | dbg_s, my_prof_obj)) { | ||
1815 | /* Another guest OS has the global reservation */ | ||
1816 | nvgpu_err(g, | ||
1817 | "per-ctxt reserve: global reservation in effect"); | ||
1818 | err = -EBUSY; | ||
1819 | } | ||
1820 | } | ||
1821 | exit: | ||
1822 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1823 | return err; | ||
1824 | } | ||
1825 | |||
1826 | static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s, | ||
1827 | u32 profiler_handle) | ||
1828 | { | ||
1829 | struct gk20a *g = dbg_s->g; | ||
1830 | struct dbg_profiler_object_data *prof_obj; | ||
1831 | int err = 0; | ||
1832 | |||
1833 | gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); | ||
1834 | |||
1835 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1836 | |||
1837 | /* Find matching object. */ | ||
1838 | prof_obj = find_matching_prof_obj(dbg_s, profiler_handle); | ||
1839 | |||
1840 | if (!prof_obj) { | ||
1841 | nvgpu_err(g, "object not found"); | ||
1842 | err = -EINVAL; | ||
1843 | goto exit; | ||
1844 | } | ||
1845 | |||
1846 | if (prof_obj->has_reservation) | ||
1847 | g->ops.dbg_session_ops.release_profiler_reservation(dbg_s, prof_obj); | ||
1848 | else { | ||
1849 | nvgpu_err(g, "No reservation found"); | ||
1850 | err = -EINVAL; | ||
1851 | goto exit; | ||
1852 | } | ||
1853 | exit: | ||
1854 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1855 | return err; | ||
1856 | } | ||
1857 | |||
1858 | static int nvgpu_ioctl_profiler_reserve(struct dbg_session_gk20a *dbg_s, | ||
1859 | struct nvgpu_dbg_gpu_profiler_reserve_args *args) | ||
1860 | { | ||
1861 | if (args->acquire) | ||
1862 | return nvgpu_profiler_reserve_acquire(dbg_s, args->profiler_handle); | ||
1863 | |||
1864 | return nvgpu_profiler_reserve_release(dbg_s, args->profiler_handle); | ||
1865 | } | ||
1866 | |||
1867 | int gk20a_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size) | 294 | int gk20a_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size) |
1868 | { | 295 | { |
1869 | struct mm_gk20a *mm = &g->mm; | 296 | struct mm_gk20a *mm = &g->mm; |
@@ -1909,75 +336,6 @@ int gk20a_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size) | |||
1909 | return 0; | 336 | return 0; |
1910 | } | 337 | } |
1911 | 338 | ||
1912 | static int gk20a_perfbuf_map(struct dbg_session_gk20a *dbg_s, | ||
1913 | struct nvgpu_dbg_gpu_perfbuf_map_args *args) | ||
1914 | { | ||
1915 | struct gk20a *g = dbg_s->g; | ||
1916 | struct mm_gk20a *mm = &g->mm; | ||
1917 | int err; | ||
1918 | u32 virt_size; | ||
1919 | u32 big_page_size; | ||
1920 | |||
1921 | if (!g->ops.dbg_session_ops.perfbuffer_enable) | ||
1922 | return -ENOSYS; | ||
1923 | |||
1924 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
1925 | |||
1926 | big_page_size = g->ops.mm.get_default_big_page_size(); | ||
1927 | |||
1928 | if (g->perfbuf.owner) { | ||
1929 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1930 | return -EBUSY; | ||
1931 | } | ||
1932 | |||
1933 | mm->perfbuf.vm = nvgpu_vm_init(g, big_page_size, | ||
1934 | big_page_size << 10, | ||
1935 | NV_MM_DEFAULT_KERNEL_SIZE, | ||
1936 | NV_MM_DEFAULT_KERNEL_SIZE + NV_MM_DEFAULT_USER_SIZE, | ||
1937 | false, false, "perfbuf"); | ||
1938 | if (!mm->perfbuf.vm) { | ||
1939 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1940 | return -ENOMEM; | ||
1941 | } | ||
1942 | |||
1943 | err = nvgpu_vm_map_buffer(mm->perfbuf.vm, | ||
1944 | args->dmabuf_fd, | ||
1945 | &args->offset, | ||
1946 | 0, | ||
1947 | 0, | ||
1948 | 0, | ||
1949 | 0, | ||
1950 | args->mapping_size, | ||
1951 | NULL); | ||
1952 | if (err) | ||
1953 | goto err_remove_vm; | ||
1954 | |||
1955 | /* perf output buffer may not cross a 4GB boundary */ | ||
1956 | virt_size = u64_lo32(args->mapping_size); | ||
1957 | if (u64_hi32(args->offset) != u64_hi32(args->offset + virt_size)) { | ||
1958 | err = -EINVAL; | ||
1959 | goto err_unmap; | ||
1960 | } | ||
1961 | |||
1962 | err = g->ops.dbg_session_ops.perfbuffer_enable(g, | ||
1963 | args->offset, virt_size); | ||
1964 | if (err) | ||
1965 | goto err_unmap; | ||
1966 | |||
1967 | g->perfbuf.owner = dbg_s; | ||
1968 | g->perfbuf.offset = args->offset; | ||
1969 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1970 | |||
1971 | return 0; | ||
1972 | |||
1973 | err_unmap: | ||
1974 | nvgpu_vm_unmap_buffer(mm->perfbuf.vm, args->offset, NULL); | ||
1975 | err_remove_vm: | ||
1976 | nvgpu_vm_put(mm->perfbuf.vm); | ||
1977 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
1978 | return err; | ||
1979 | } | ||
1980 | |||
1981 | /* must be called with dbg_sessions_lock held */ | 339 | /* must be called with dbg_sessions_lock held */ |
1982 | int gk20a_perfbuf_disable_locked(struct gk20a *g) | 340 | int gk20a_perfbuf_disable_locked(struct gk20a *g) |
1983 | { | 341 | { |
@@ -2001,43 +359,3 @@ int gk20a_perfbuf_disable_locked(struct gk20a *g) | |||
2001 | 359 | ||
2002 | return 0; | 360 | return 0; |
2003 | } | 361 | } |
2004 | |||
2005 | static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset) | ||
2006 | { | ||
2007 | struct mm_gk20a *mm = &g->mm; | ||
2008 | struct vm_gk20a *vm = mm->perfbuf.vm; | ||
2009 | int err; | ||
2010 | |||
2011 | err = g->ops.dbg_session_ops.perfbuffer_disable(g); | ||
2012 | |||
2013 | nvgpu_vm_unmap_buffer(vm, offset, NULL); | ||
2014 | gk20a_free_inst_block(g, &mm->perfbuf.inst_block); | ||
2015 | nvgpu_vm_put(vm); | ||
2016 | |||
2017 | g->perfbuf.owner = NULL; | ||
2018 | g->perfbuf.offset = 0; | ||
2019 | return err; | ||
2020 | } | ||
2021 | |||
2022 | static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s, | ||
2023 | struct nvgpu_dbg_gpu_perfbuf_unmap_args *args) | ||
2024 | { | ||
2025 | struct gk20a *g = dbg_s->g; | ||
2026 | int err; | ||
2027 | |||
2028 | if (!g->ops.dbg_session_ops.perfbuffer_disable) | ||
2029 | return -ENOSYS; | ||
2030 | |||
2031 | nvgpu_mutex_acquire(&g->dbg_sessions_lock); | ||
2032 | if ((g->perfbuf.owner != dbg_s) || | ||
2033 | (g->perfbuf.offset != args->offset)) { | ||
2034 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
2035 | return -EINVAL; | ||
2036 | } | ||
2037 | |||
2038 | err = gk20a_perfbuf_release_locked(g, args->offset); | ||
2039 | |||
2040 | nvgpu_mutex_release(&g->dbg_sessions_lock); | ||
2041 | |||
2042 | return err; | ||
2043 | } | ||
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h index 1a6de3a8..d50ce844 100644 --- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.h | |||
@@ -21,18 +21,10 @@ | |||
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | 21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
22 | * DEALINGS IN THE SOFTWARE. | 22 | * DEALINGS IN THE SOFTWARE. |
23 | */ | 23 | */ |
24 | #ifndef DBG_GPU_GK20A_H | 24 | #ifndef DBG_GPU_H |
25 | #define DBG_GPU_GK20A_H | 25 | #define DBG_GPU_H |
26 | #include <linux/poll.h> | ||
27 | 26 | ||
28 | /* module debug driver interface */ | 27 | #include <nvgpu/cond.h> |
29 | int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp); | ||
30 | int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp); | ||
31 | long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); | ||
32 | unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait); | ||
33 | |||
34 | /* used by profiler driver interface */ | ||
35 | int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp); | ||
36 | 28 | ||
37 | /* used by the interrupt handler to post events */ | 29 | /* used by the interrupt handler to post events */ |
38 | void gk20a_dbg_gpu_post_events(struct channel_gk20a *fault_ch); | 30 | void gk20a_dbg_gpu_post_events(struct channel_gk20a *fault_ch); |
@@ -70,8 +62,6 @@ struct dbg_session_gk20a { | |||
70 | struct regops_whitelist *global; | 62 | struct regops_whitelist *global; |
71 | struct regops_whitelist *per_context; | 63 | struct regops_whitelist *per_context; |
72 | 64 | ||
73 | /* gpu module vagaries */ | ||
74 | struct device *dev; | ||
75 | struct gk20a *g; | 65 | struct gk20a *g; |
76 | 66 | ||
77 | /* list of bound channels, if any */ | 67 | /* list of bound channels, if any */ |
@@ -99,18 +89,12 @@ dbg_session_data_from_dbg_s_entry(struct nvgpu_list_node *node) | |||
99 | }; | 89 | }; |
100 | 90 | ||
101 | struct dbg_session_channel_data { | 91 | struct dbg_session_channel_data { |
102 | /* | ||
103 | * We have to keep a ref to the _file_, not the channel, because | ||
104 | * close(channel_fd) is synchronous and would deadlock if we had an | ||
105 | * open debug session fd holding a channel ref at that time. Holding a | ||
106 | * ref to the file makes close(channel_fd) just drop a kernel ref to | ||
107 | * the file; the channel will close when the last file ref is dropped. | ||
108 | */ | ||
109 | struct file *ch_f; | ||
110 | int channel_fd; | 92 | int channel_fd; |
111 | int chid; | 93 | int chid; |
112 | struct nvgpu_list_node ch_entry; | 94 | struct nvgpu_list_node ch_entry; |
113 | struct dbg_session_data *session_data; | 95 | struct dbg_session_data *session_data; |
96 | int (*unbind_single_channel)(struct dbg_session_gk20a *dbg_s, | ||
97 | struct dbg_session_channel_data *ch_data); | ||
114 | }; | 98 | }; |
115 | 99 | ||
116 | static inline struct dbg_session_channel_data * | 100 | static inline struct dbg_session_channel_data * |
@@ -135,9 +119,6 @@ dbg_profiler_object_data_from_prof_obj_entry(struct nvgpu_list_node *node) | |||
135 | ((uintptr_t)node - offsetof(struct dbg_profiler_object_data, prof_obj_entry)); | 119 | ((uintptr_t)node - offsetof(struct dbg_profiler_object_data, prof_obj_entry)); |
136 | }; | 120 | }; |
137 | 121 | ||
138 | int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s, | ||
139 | struct dbg_session_channel_data *ch_data); | ||
140 | |||
141 | bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch); | 122 | bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch); |
142 | int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch); | 123 | int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch); |
143 | 124 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c index be8e0f0a..e7aeaa54 100644 --- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c | |||
@@ -506,8 +506,7 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s, | |||
506 | err = gr_gk20a_exec_ctx_ops(ch, ops, num_ops, | 506 | err = gr_gk20a_exec_ctx_ops(ch, ops, num_ops, |
507 | ctx_wr_count, ctx_rd_count); | 507 | ctx_wr_count, ctx_rd_count); |
508 | if (err) { | 508 | if (err) { |
509 | dev_warn(dbg_s->dev, | 509 | nvgpu_warn(g, "failed to perform ctx ops\n"); |
510 | "failed to perform ctx ops\n"); | ||
511 | goto clean_up; | 510 | goto clean_up; |
512 | } | 511 | } |
513 | } | 512 | } |