summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2016-03-29 05:31:25 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-04-19 11:07:34 -0400
commitdfac8ce70464413c0e3748634c57d49950e71933 (patch)
tree35d17f9d35aadf134bbff98ee41e8d7664f442da /drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
parentc651adbeaacf063b856ef8126b74661b54066477 (diff)
gpu: nvgpu: support binding multiple channels to a debug session
We currently bind only one channel to a debug session But some use cases might need multiple channels bound to same debug session Add this support by adding a list of channels to debug session. List structure is implemented as struct dbg_session_channel_data List node dbg_s_list_node is currently defined in struct dbg_session_gk20a. But this is inefficient when we need to add debug session to multiple channels Hence add new reference structure dbg_session_data to store dbg_session pointer and list entry For each NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL call, create two reference structure dbg_session_channel_data for channel and dbg_session_data for debug session and bind them together Define API nvgpu_dbg_gpu_get_session_channel() which will get first channel in the list of debug session Use this API wherever we refer to channel bound to debug session Remove dbg_sessions define in struct gk20a since it is not being used anywhere Add new API NVGPU_DBG_GPU_IOCTL_UNBIND_CHANNEL to support unbinding of channel from debug sesssion Bug 200156699 Change-Id: I3bfa6f9cd5b90e7254a75c7e64ac893739776b7f Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1120331 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c235
1 files changed, 190 insertions, 45 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index f717e207..34685416 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -35,6 +35,33 @@ struct dbg_gpu_session_ops dbg_gpu_session_ops_gk20a = {
35 .exec_reg_ops = exec_regops_gk20a, 35 .exec_reg_ops = exec_regops_gk20a,
36}; 36};
37 37
38/*
39 * API to get first channel from the list of all channels
40 * bound to the debug session
41 */
42struct channel_gk20a *
43nvgpu_dbg_gpu_get_session_channel(struct dbg_session_gk20a *dbg_s)
44{
45 struct dbg_session_channel_data *ch_data;
46 struct channel_gk20a *ch;
47 struct gk20a *g = dbg_s->g;
48
49 mutex_lock(&dbg_s->ch_list_lock);
50 if (list_empty(&dbg_s->ch_list)) {
51 mutex_unlock(&dbg_s->ch_list_lock);
52 return NULL;
53 }
54
55 ch_data = list_first_entry(&dbg_s->ch_list,
56 struct dbg_session_channel_data,
57 ch_entry);
58 ch = g->fifo.channel + ch_data->chid;
59
60 mutex_unlock(&dbg_s->ch_list_lock);
61
62 return ch;
63}
64
38/* silly allocator - just increment session id */ 65/* silly allocator - just increment session id */
39static atomic_t session_id = ATOMIC_INIT(0); 66static atomic_t session_id = ATOMIC_INIT(0);
40static int generate_session_id(void) 67static int generate_session_id(void)
@@ -95,8 +122,9 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
95 if (gk20a_gpu_is_virtual(dev)) 122 if (gk20a_gpu_is_virtual(dev))
96 dbg_session->is_pg_disabled = true; 123 dbg_session->is_pg_disabled = true;
97 124
98 INIT_LIST_HEAD(&dbg_session->dbg_s_list_node);
99 init_waitqueue_head(&dbg_session->dbg_events.wait_queue); 125 init_waitqueue_head(&dbg_session->dbg_events.wait_queue);
126 INIT_LIST_HEAD(&dbg_session->ch_list);
127 mutex_init(&dbg_session->ch_list_lock);
100 dbg_session->dbg_events.events_enabled = false; 128 dbg_session->dbg_events.events_enabled = false;
101 dbg_session->dbg_events.num_pending_events = 0; 129 dbg_session->dbg_events.num_pending_events = 0;
102 130
@@ -108,18 +136,22 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
108 * since it might not have an associated channel. */ 136 * since it might not have an associated channel. */
109static void gk20a_dbg_session_mutex_lock(struct dbg_session_gk20a *dbg_s) 137static void gk20a_dbg_session_mutex_lock(struct dbg_session_gk20a *dbg_s)
110{ 138{
111 if (dbg_s->is_profiler) 139 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
140
141 if (dbg_s->is_profiler || !ch)
112 mutex_lock(&dbg_s->g->dbg_sessions_lock); 142 mutex_lock(&dbg_s->g->dbg_sessions_lock);
113 else 143 else
114 mutex_lock(&dbg_s->ch->dbg_s_lock); 144 mutex_lock(&ch->dbg_s_lock);
115} 145}
116 146
117static void gk20a_dbg_session_mutex_unlock(struct dbg_session_gk20a *dbg_s) 147static void gk20a_dbg_session_mutex_unlock(struct dbg_session_gk20a *dbg_s)
118{ 148{
119 if (dbg_s->is_profiler) 149 struct channel_gk20a *ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
150
151 if (dbg_s->is_profiler || !ch)
120 mutex_unlock(&dbg_s->g->dbg_sessions_lock); 152 mutex_unlock(&dbg_s->g->dbg_sessions_lock);
121 else 153 else
122 mutex_unlock(&dbg_s->ch->dbg_s_lock); 154 mutex_unlock(&ch->dbg_s_lock);
123} 155}
124 156
125static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) 157static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
@@ -163,10 +195,12 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
163 struct nvgpu_dbg_gpu_events_ctrl_args *args) 195 struct nvgpu_dbg_gpu_events_ctrl_args *args)
164{ 196{
165 int ret = 0; 197 int ret = 0;
198 struct channel_gk20a *ch;
166 199
167 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); 200 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
168 201
169 if (!dbg_s->ch) { 202 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
203 if (!ch) {
170 gk20a_err(dev_from_gk20a(dbg_s->g), 204 gk20a_err(dev_from_gk20a(dbg_s->g),
171 "no channel bound to dbg session\n"); 205 "no channel bound to dbg session\n");
172 return -EINVAL; 206 return -EINVAL;
@@ -235,6 +269,7 @@ int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
235 269
236void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch) 270void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
237{ 271{
272 struct dbg_session_data *session_data;
238 struct dbg_session_gk20a *dbg_s; 273 struct dbg_session_gk20a *dbg_s;
239 274
240 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 275 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
@@ -242,7 +277,8 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
242 /* guard against the session list being modified */ 277 /* guard against the session list being modified */
243 mutex_lock(&ch->dbg_s_lock); 278 mutex_lock(&ch->dbg_s_lock);
244 279
245 list_for_each_entry(dbg_s, &ch->dbg_s_list, dbg_s_list_node) { 280 list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) {
281 dbg_s = session_data->dbg_s;
246 if (dbg_s->dbg_events.events_enabled) { 282 if (dbg_s->dbg_events.events_enabled) {
247 gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d", 283 gk20a_dbg(gpu_dbg_gpu_dbg, "posting event on session id %d",
248 dbg_s->id); 284 dbg_s->id);
@@ -260,6 +296,7 @@ void gk20a_dbg_gpu_post_events(struct channel_gk20a *ch)
260 296
261bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch) 297bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
262{ 298{
299 struct dbg_session_data *session_data;
263 struct dbg_session_gk20a *dbg_s; 300 struct dbg_session_gk20a *dbg_s;
264 bool broadcast = false; 301 bool broadcast = false;
265 302
@@ -268,7 +305,8 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
268 /* guard against the session list being modified */ 305 /* guard against the session list being modified */
269 mutex_lock(&ch->dbg_s_lock); 306 mutex_lock(&ch->dbg_s_lock);
270 307
271 list_for_each_entry(dbg_s, &ch->dbg_s_list, dbg_s_list_node) { 308 list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) {
309 dbg_s = session_data->dbg_s;
272 if (dbg_s->broadcast_stop_trigger) { 310 if (dbg_s->broadcast_stop_trigger) {
273 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, 311 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr,
274 "stop trigger broadcast enabled"); 312 "stop trigger broadcast enabled");
@@ -284,6 +322,7 @@ bool gk20a_dbg_gpu_broadcast_stop_trigger(struct channel_gk20a *ch)
284 322
285int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch) 323int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch)
286{ 324{
325 struct dbg_session_data *session_data;
287 struct dbg_session_gk20a *dbg_s; 326 struct dbg_session_gk20a *dbg_s;
288 327
289 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); 328 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, "");
@@ -291,7 +330,8 @@ int gk20a_dbg_gpu_clear_broadcast_stop_trigger(struct channel_gk20a *ch)
291 /* guard against the session list being modified */ 330 /* guard against the session list being modified */
292 mutex_lock(&ch->dbg_s_lock); 331 mutex_lock(&ch->dbg_s_lock);
293 332
294 list_for_each_entry(dbg_s, &ch->dbg_s_list, dbg_s_list_node) { 333 list_for_each_entry(session_data, &ch->dbg_s_list, dbg_s_entry) {
334 dbg_s = session_data->dbg_s;
295 if (dbg_s->broadcast_stop_trigger) { 335 if (dbg_s->broadcast_stop_trigger) {
296 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr, 336 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn | gpu_dbg_intr,
297 "stop trigger broadcast disabled"); 337 "stop trigger broadcast disabled");
@@ -347,36 +387,87 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
347 return err; 387 return err;
348} 388}
349 389
350static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s) 390int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
391 struct dbg_session_channel_data *ch_data)
351{ 392{
352 struct channel_gk20a *ch_gk20a = dbg_s->ch;
353 struct gk20a *g = dbg_s->g; 393 struct gk20a *g = dbg_s->g;
394 int chid;
395 struct channel_gk20a *ch;
396 struct dbg_session_data *session_data;
354 397
355 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 398 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
356 399
357 /* wasn't bound to start with ? */ 400 chid = ch_data->chid;
358 if (!ch_gk20a) { 401 ch = g->fifo.channel + chid;
359 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "not bound already?");
360 return -ENODEV;
361 }
362 402
363 mutex_lock(&g->dbg_sessions_lock); 403 list_del_init(&ch_data->ch_entry);
364 mutex_lock(&ch_gk20a->dbg_s_lock);
365 404
366 --g->dbg_sessions; 405 session_data = ch_data->session_data;
406 list_del_init(&session_data->dbg_s_entry);
407 kfree(session_data);
367 408
368 dbg_s->ch = NULL; 409 fput(ch_data->ch_f);
369 fput(dbg_s->ch_f); 410 kfree(ch_data);
370 dbg_s->ch_f = NULL;
371 411
372 list_del_init(&dbg_s->dbg_s_list_node); 412 return 0;
413}
414
415static int dbg_unbind_all_channels_gk20a(struct dbg_session_gk20a *dbg_s)
416{
417 struct dbg_session_channel_data *ch_data, *tmp;
418 struct gk20a *g = dbg_s->g;
373 419
374 mutex_unlock(&ch_gk20a->dbg_s_lock); 420 mutex_lock(&g->dbg_sessions_lock);
421 mutex_lock(&dbg_s->ch_list_lock);
422 list_for_each_entry_safe(ch_data, tmp, &dbg_s->ch_list, ch_entry)
423 dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
424 mutex_unlock(&dbg_s->ch_list_lock);
375 mutex_unlock(&g->dbg_sessions_lock); 425 mutex_unlock(&g->dbg_sessions_lock);
376 426
377 return 0; 427 return 0;
378} 428}
379 429
430static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
431 struct nvgpu_dbg_gpu_unbind_channel_args *args)
432{
433 struct dbg_session_channel_data *ch_data;
434 struct gk20a *g = dbg_s->g;
435 bool channel_found = false;
436 struct channel_gk20a *ch;
437 int err;
438
439 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
440 dev_name(dbg_s->dev), args->channel_fd);
441
442 ch = gk20a_get_channel_from_file(args->channel_fd);
443 if (!ch) {
444 gk20a_dbg_fn("no channel found for fd");
445 return -EINVAL;
446 }
447
448 mutex_lock(&dbg_s->ch_list_lock);
449 list_for_each_entry(ch_data, &dbg_s->ch_list, ch_entry) {
450 if (ch->hw_chid == ch_data->chid) {
451 channel_found = true;
452 break;
453 }
454 }
455 mutex_unlock(&dbg_s->ch_list_lock);
456
457 if (!channel_found) {
458 gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd);
459 return -EINVAL;
460 }
461
462 mutex_lock(&g->dbg_sessions_lock);
463 mutex_lock(&dbg_s->ch_list_lock);
464 err = dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
465 mutex_unlock(&dbg_s->ch_list_lock);
466 mutex_unlock(&g->dbg_sessions_lock);
467
468 return err;
469}
470
380int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp) 471int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
381{ 472{
382 struct dbg_session_gk20a *dbg_s = filp->private_data; 473 struct dbg_session_gk20a *dbg_s = filp->private_data;
@@ -384,9 +475,8 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
384 475
385 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", dev_name(dbg_s->dev)); 476 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", dev_name(dbg_s->dev));
386 477
387 /* unbind if it was bound */ 478 /* unbind channels */
388 if (dbg_s->ch) 479 dbg_unbind_all_channels_gk20a(dbg_s);
389 dbg_unbind_channel_gk20a(dbg_s);
390 480
391 /* Powergate/Timeout enable is called here as possibility of dbg_session 481 /* Powergate/Timeout enable is called here as possibility of dbg_session
392 * which called powergate/timeout disable ioctl, to be killed without 482 * which called powergate/timeout disable ioctl, to be killed without
@@ -405,14 +495,28 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
405 struct nvgpu_dbg_gpu_bind_channel_args *args) 495 struct nvgpu_dbg_gpu_bind_channel_args *args)
406{ 496{
407 struct file *f; 497 struct file *f;
408 struct gk20a *g; 498 struct gk20a *g = dbg_s->g;
409 struct channel_gk20a *ch; 499 struct channel_gk20a *ch;
500 struct dbg_session_channel_data *ch_data, *tmp;
501 struct dbg_session_data *session_data;
410 502
411 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", 503 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
412 dev_name(dbg_s->dev), args->channel_fd); 504 dev_name(dbg_s->dev), args->channel_fd);
413 505
414 if (args->channel_fd == ~0) 506 if (args->channel_fd == ~0) {
415 return dbg_unbind_channel_gk20a(dbg_s); 507 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
508 if (!ch)
509 return -EINVAL;
510
511 mutex_lock(&dbg_s->ch_list_lock);
512 list_for_each_entry_safe(ch_data, tmp,
513 &dbg_s->ch_list, ch_entry) {
514 if (ch_data->chid == ch->hw_chid)
515 dbg_unbind_single_channel_gk20a(dbg_s, ch_data);
516 }
517 mutex_unlock(&dbg_s->ch_list_lock);
518 return 0;
519 }
416 520
417 /* even though get_file_channel is doing this it releases it as well */ 521 /* even though get_file_channel is doing this it releases it as well */
418 /* by holding it here we'll keep it from disappearing while the 522 /* by holding it here we'll keep it from disappearing while the
@@ -428,20 +532,40 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
428 return -EINVAL; 532 return -EINVAL;
429 } 533 }
430 534
431 g = dbg_s->g;
432 gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid); 535 gk20a_dbg_fn("%s hwchid=%d", dev_name(dbg_s->dev), ch->hw_chid);
433 536
434 mutex_lock(&g->dbg_sessions_lock); 537 mutex_lock(&g->dbg_sessions_lock);
435 mutex_lock(&ch->dbg_s_lock); 538 mutex_lock(&ch->dbg_s_lock);
436 539
437 dbg_s->ch_f = f; 540 ch_data = kzalloc(sizeof(*ch_data), GFP_KERNEL);
438 dbg_s->ch = ch; 541 if (!ch_data) {
439 list_add(&dbg_s->dbg_s_list_node, &dbg_s->ch->dbg_s_list); 542 fput(f);
543 return -ENOMEM;
544 }
545 ch_data->ch_f = f;
546 ch_data->channel_fd = args->channel_fd;
547 ch_data->chid = ch->hw_chid;
548 INIT_LIST_HEAD(&ch_data->ch_entry);
549
550 session_data = kzalloc(sizeof(*session_data), GFP_KERNEL);
551 if (!session_data) {
552 kfree(ch_data);
553 fput(f);
554 return -ENOMEM;
555 }
556 session_data->dbg_s = dbg_s;
557 INIT_LIST_HEAD(&session_data->dbg_s_entry);
558 ch_data->session_data = session_data;
559
560 list_add(&session_data->dbg_s_entry, &ch->dbg_s_list);
440 561
441 g->dbg_sessions++; 562 mutex_lock(&dbg_s->ch_list_lock);
563 list_add_tail(&ch_data->ch_entry, &dbg_s->ch_list);
564 mutex_unlock(&dbg_s->ch_list_lock);
442 565
443 mutex_unlock(&ch->dbg_s_lock); 566 mutex_unlock(&ch->dbg_s_lock);
444 mutex_unlock(&g->dbg_sessions_lock); 567 mutex_unlock(&g->dbg_sessions_lock);
568
445 return 0; 569 return 0;
446} 570}
447 571
@@ -470,8 +594,12 @@ static int gk20a_perfbuf_unmap(struct dbg_session_gk20a *dbg_s,
470static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s, 594static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
471 struct nvgpu_dbg_gpu_pc_sampling_args *args) 595 struct nvgpu_dbg_gpu_pc_sampling_args *args)
472{ 596{
473 struct channel_gk20a *ch = dbg_s->ch; 597 struct channel_gk20a *ch;
474 struct gk20a *g = ch->g; 598 struct gk20a *g = dbg_s->g;
599
600 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
601 if (!ch)
602 return -EINVAL;
475 603
476 gk20a_dbg_fn(""); 604 gk20a_dbg_fn("");
477 605
@@ -571,9 +699,13 @@ static int nvgpu_dbg_gpu_ioctl_clear_single_sm_error_state(
571 struct gk20a *g = get_gk20a(dbg_s->dev); 699 struct gk20a *g = get_gk20a(dbg_s->dev);
572 struct gr_gk20a *gr = &g->gr; 700 struct gr_gk20a *gr = &g->gr;
573 u32 sm_id; 701 u32 sm_id;
574 struct channel_gk20a *ch = dbg_s->ch; 702 struct channel_gk20a *ch;
575 int err = 0; 703 int err = 0;
576 704
705 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
706 if (!ch)
707 return -EINVAL;
708
577 sm_id = args->sm_id; 709 sm_id = args->sm_id;
578 710
579 if (sm_id >= gr->no_of_sm) 711 if (sm_id >= gr->no_of_sm)
@@ -598,10 +730,14 @@ static int nvgpu_dbg_gpu_ioctl_write_single_sm_error_state(
598 struct gk20a *g = get_gk20a(dbg_s->dev); 730 struct gk20a *g = get_gk20a(dbg_s->dev);
599 struct gr_gk20a *gr = &g->gr; 731 struct gr_gk20a *gr = &g->gr;
600 u32 sm_id; 732 u32 sm_id;
601 struct channel_gk20a *ch = dbg_s->ch; 733 struct channel_gk20a *ch;
602 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state; 734 struct nvgpu_dbg_gpu_sm_error_state_record *sm_error_state;
603 int err = 0; 735 int err = 0;
604 736
737 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
738 if (!ch)
739 return -EINVAL;
740
605 sm_id = args->sm_id; 741 sm_id = args->sm_id;
606 if (sm_id >= gr->no_of_sm) 742 if (sm_id >= gr->no_of_sm)
607 return -EINVAL; 743 return -EINVAL;
@@ -756,6 +892,11 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
756 (struct nvgpu_dbg_gpu_write_single_sm_error_state_args *)buf); 892 (struct nvgpu_dbg_gpu_write_single_sm_error_state_args *)buf);
757 break; 893 break;
758 894
895 case NVGPU_DBG_GPU_IOCTL_UNBIND_CHANNEL:
896 err = dbg_unbind_channel_gk20a(dbg_s,
897 (struct nvgpu_dbg_gpu_unbind_channel_args *)buf);
898 break;
899
759 default: 900 default:
760 gk20a_err(dev_from_gk20a(g), 901 gk20a_err(dev_from_gk20a(g),
761 "unrecognized dbg gpu ioctl cmd: 0x%x", 902 "unrecognized dbg gpu ioctl cmd: 0x%x",
@@ -805,6 +946,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
805 struct device *dev = dbg_s->dev; 946 struct device *dev = dbg_s->dev;
806 struct gk20a *g = get_gk20a(dbg_s->dev); 947 struct gk20a *g = get_gk20a(dbg_s->dev);
807 struct nvgpu_dbg_gpu_reg_op *ops; 948 struct nvgpu_dbg_gpu_reg_op *ops;
949 struct channel_gk20a *ch;
808 u64 ops_size = sizeof(ops[0]) * args->num_ops; 950 u64 ops_size = sizeof(ops[0]) * args->num_ops;
809 951
810 gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size); 952 gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size);
@@ -814,7 +956,8 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
814 return -EINVAL; 956 return -EINVAL;
815 } 957 }
816 958
817 if (!dbg_s->is_profiler && !dbg_s->ch) { 959 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
960 if (!dbg_s->is_profiler && !ch) {
818 gk20a_err(dev, "bind a channel before regops for a debugging session"); 961 gk20a_err(dev, "bind a channel before regops for a debugging session");
819 return -EINVAL; 962 return -EINVAL;
820 } 963 }
@@ -1016,8 +1159,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1016 /* Take the global lock, since we'll be doing global regops */ 1159 /* Take the global lock, since we'll be doing global regops */
1017 mutex_lock(&g->dbg_sessions_lock); 1160 mutex_lock(&g->dbg_sessions_lock);
1018 1161
1019 ch_gk20a = dbg_s->ch; 1162 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1020
1021 if (!ch_gk20a) { 1163 if (!ch_gk20a) {
1022 gk20a_err(dev_from_gk20a(g), 1164 gk20a_err(dev_from_gk20a(g),
1023 "no bound channel for smpc ctxsw mode update\n"); 1165 "no bound channel for smpc ctxsw mode update\n");
@@ -1052,8 +1194,7 @@ static int nvgpu_dbg_gpu_ioctl_hwpm_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
1052 /* Take the global lock, since we'll be doing global regops */ 1194 /* Take the global lock, since we'll be doing global regops */
1053 mutex_lock(&g->dbg_sessions_lock); 1195 mutex_lock(&g->dbg_sessions_lock);
1054 1196
1055 ch_gk20a = dbg_s->ch; 1197 ch_gk20a = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1056
1057 if (!ch_gk20a) { 1198 if (!ch_gk20a) {
1058 gk20a_err(dev_from_gk20a(g), 1199 gk20a_err(dev_from_gk20a(g),
1059 "no bound channel for pm ctxsw mode update\n"); 1200 "no bound channel for pm ctxsw mode update\n");
@@ -1080,12 +1221,16 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1080 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args) 1221 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
1081{ 1222{
1082 struct gk20a *g = get_gk20a(dbg_s->dev); 1223 struct gk20a *g = get_gk20a(dbg_s->dev);
1083 struct channel_gk20a *ch = dbg_s->ch; 1224 struct channel_gk20a *ch;
1084 bool ch_is_curr_ctx; 1225 bool ch_is_curr_ctx;
1085 int err = 0, action = args->mode; 1226 int err = 0, action = args->mode;
1086 1227
1087 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); 1228 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
1088 1229
1230 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1231 if (!ch)
1232 return -EINVAL;
1233
1089 mutex_lock(&g->dbg_sessions_lock); 1234 mutex_lock(&g->dbg_sessions_lock);
1090 1235
1091 /* Suspend GPU context switching */ 1236 /* Suspend GPU context switching */