summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/ioctl_dbg.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_dbg.c100
1 files changed, 58 insertions, 42 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
index a53d1cfb..2aba2664 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c
@@ -56,7 +56,7 @@ static int alloc_profiler(struct gk20a *g,
56 struct dbg_profiler_object_data *prof; 56 struct dbg_profiler_object_data *prof;
57 *_prof = NULL; 57 *_prof = NULL;
58 58
59 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 59 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
60 60
61 prof = nvgpu_kzalloc(g, sizeof(*prof)); 61 prof = nvgpu_kzalloc(g, sizeof(*prof));
62 if (!prof) 62 if (!prof)
@@ -72,7 +72,7 @@ static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_
72 struct dbg_session_gk20a_linux *dbg_s_linux; 72 struct dbg_session_gk20a_linux *dbg_s_linux;
73 *_dbg_s_linux = NULL; 73 *_dbg_s_linux = NULL;
74 74
75 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 75 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
76 76
77 dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux)); 77 dbg_s_linux = nvgpu_kzalloc(g, sizeof(*dbg_s_linux));
78 if (!dbg_s_linux) 78 if (!dbg_s_linux)
@@ -142,8 +142,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
142 unsigned int mask = 0; 142 unsigned int mask = 0;
143 struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data; 143 struct dbg_session_gk20a_linux *dbg_session_linux = filep->private_data;
144 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s; 144 struct dbg_session_gk20a *dbg_s = &dbg_session_linux->dbg_s;
145 struct gk20a *g = dbg_s->g;
145 146
146 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 147 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
147 148
148 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait); 149 poll_wait(filep, &dbg_s->dbg_events.wait_queue.wq, wait);
149 150
@@ -151,9 +152,9 @@ unsigned int gk20a_dbg_gpu_dev_poll(struct file *filep, poll_table *wait)
151 152
152 if (dbg_s->dbg_events.events_enabled && 153 if (dbg_s->dbg_events.events_enabled &&
153 dbg_s->dbg_events.num_pending_events > 0) { 154 dbg_s->dbg_events.num_pending_events > 0) {
154 gk20a_dbg(gpu_dbg_gpu_dbg, "found pending event on session id %d", 155 nvgpu_log(g, gpu_dbg_gpu_dbg, "found pending event on session id %d",
155 dbg_s->id); 156 dbg_s->id);
156 gk20a_dbg(gpu_dbg_gpu_dbg, "%d events pending", 157 nvgpu_log(g, gpu_dbg_gpu_dbg, "%d events pending",
157 dbg_s->dbg_events.num_pending_events); 158 dbg_s->dbg_events.num_pending_events);
158 mask = (POLLPRI | POLLIN); 159 mask = (POLLPRI | POLLIN);
159 } 160 }
@@ -170,7 +171,7 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
170 struct gk20a *g = dbg_s->g; 171 struct gk20a *g = dbg_s->g;
171 struct dbg_profiler_object_data *prof_obj, *tmp_obj; 172 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
172 173
173 gk20a_dbg(gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name); 174 nvgpu_log(g, gpu_dbg_gpu_dbg | gpu_dbg_fn, "%s", g->name);
174 175
175 /* unbind channels */ 176 /* unbind channels */
176 dbg_unbind_all_channels_gk20a(dbg_s); 177 dbg_unbind_all_channels_gk20a(dbg_s);
@@ -213,7 +214,11 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
213 214
214int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp) 215int gk20a_prof_gpu_dev_open(struct inode *inode, struct file *filp)
215{ 216{
216 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 217 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
218 struct nvgpu_os_linux, prof.cdev);
219 struct gk20a *g = &l->g;
220
221 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
217 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */); 222 return gk20a_dbg_gpu_do_dev_open(inode, filp, true /* is profiler */);
218} 223}
219 224
@@ -223,7 +228,7 @@ static int nvgpu_dbg_gpu_ioctl_timeout(struct dbg_session_gk20a *dbg_s,
223 int err; 228 int err;
224 struct gk20a *g = dbg_s->g; 229 struct gk20a *g = dbg_s->g;
225 230
226 gk20a_dbg_fn("powergate mode = %d", args->enable); 231 nvgpu_log_fn(g, "powergate mode = %d", args->enable);
227 232
228 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 233 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
229 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable); 234 err = nvgpu_dbg_timeout_enable(dbg_s, args->enable);
@@ -356,7 +361,9 @@ static int nvgpu_dbg_gpu_ioctl_set_next_stop_trigger_type(
356 struct dbg_session_gk20a *dbg_s, 361 struct dbg_session_gk20a *dbg_s,
357 struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args) 362 struct nvgpu_dbg_gpu_set_next_stop_trigger_type_args *args)
358{ 363{
359 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 364 struct gk20a *g = dbg_s->g;
365
366 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
360 367
361 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 368 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
362 369
@@ -373,7 +380,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
373 struct gk20a *g = dbg_s->g; 380 struct gk20a *g = dbg_s->g;
374 int err = 0; 381 int err = 0;
375 382
376 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts mode requested : %d", 383 nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts mode requested : %d",
377 timeout_mode); 384 timeout_mode);
378 385
379 switch (timeout_mode) { 386 switch (timeout_mode) {
@@ -401,7 +408,7 @@ static int nvgpu_dbg_timeout_enable(struct dbg_session_gk20a *dbg_s,
401 break; 408 break;
402 } 409 }
403 410
404 gk20a_dbg(gpu_dbg_gpu_dbg, "Timeouts enabled : %s", 411 nvgpu_log(g, gpu_dbg_gpu_dbg, "Timeouts enabled : %s",
405 g->timeouts_enabled ? "Yes" : "No"); 412 g->timeouts_enabled ? "Yes" : "No");
406 413
407 return err; 414 return err;
@@ -431,7 +438,7 @@ static int gk20a_dbg_gpu_do_dev_open(struct inode *inode,
431 438
432 dev = dev_from_gk20a(g); 439 dev = dev_from_gk20a(g);
433 440
434 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name); 441 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg session: %s", g->name);
435 442
436 err = alloc_session(g, &dbg_session_linux); 443 err = alloc_session(g, &dbg_session_linux);
437 if (err) 444 if (err)
@@ -482,7 +489,7 @@ static int dbg_unbind_single_channel_gk20a(struct dbg_session_gk20a *dbg_s,
482 struct dbg_profiler_object_data *prof_obj, *tmp_obj; 489 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
483 struct dbg_session_channel_data_linux *ch_data_linux; 490 struct dbg_session_channel_data_linux *ch_data_linux;
484 491
485 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 492 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
486 493
487 chid = ch_data->chid; 494 chid = ch_data->chid;
488 495
@@ -527,7 +534,7 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
527 struct dbg_session_data *session_data; 534 struct dbg_session_data *session_data;
528 int err = 0; 535 int err = 0;
529 536
530 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", 537 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
531 g->name, args->channel_fd); 538 g->name, args->channel_fd);
532 539
533 /* 540 /*
@@ -541,12 +548,12 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
541 548
542 ch = gk20a_get_channel_from_file(args->channel_fd); 549 ch = gk20a_get_channel_from_file(args->channel_fd);
543 if (!ch) { 550 if (!ch) {
544 gk20a_dbg_fn("no channel found for fd"); 551 nvgpu_log_fn(g, "no channel found for fd");
545 err = -EINVAL; 552 err = -EINVAL;
546 goto out_fput; 553 goto out_fput;
547 } 554 }
548 555
549 gk20a_dbg_fn("%s hwchid=%d", g->name, ch->chid); 556 nvgpu_log_fn(g, "%s hwchid=%d", g->name, ch->chid);
550 557
551 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 558 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
552 nvgpu_mutex_acquire(&ch->dbg_s_lock); 559 nvgpu_mutex_acquire(&ch->dbg_s_lock);
@@ -818,7 +825,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
818 struct gk20a *g = dbg_s->g; 825 struct gk20a *g = dbg_s->g;
819 struct channel_gk20a *ch; 826 struct channel_gk20a *ch;
820 827
821 gk20a_dbg_fn("%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops); 828 nvgpu_log_fn(g, "%d ops, max fragment %d", args->num_ops, g->dbg_regops_tmp_buf_ops);
822 829
823 if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) { 830 if (args->num_ops > NVGPU_IOCTL_DBG_REG_OPS_LIMIT) {
824 nvgpu_err(g, "regops limit exceeded"); 831 nvgpu_err(g, "regops limit exceeded");
@@ -890,10 +897,10 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
890 (args->ops + 897 (args->ops +
891 ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op)); 898 ops_offset * sizeof(struct nvgpu_dbg_gpu_reg_op));
892 899
893 gk20a_dbg_fn("Regops fragment: start_op=%llu ops=%llu", 900 nvgpu_log_fn(g, "Regops fragment: start_op=%llu ops=%llu",
894 ops_offset, num_ops); 901 ops_offset, num_ops);
895 902
896 gk20a_dbg_fn("Copying regops from userspace"); 903 nvgpu_log_fn(g, "Copying regops from userspace");
897 904
898 if (copy_from_user(linux_fragment, 905 if (copy_from_user(linux_fragment,
899 fragment, fragment_size)) { 906 fragment, fragment_size)) {
@@ -917,7 +924,7 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
917 if (err) 924 if (err)
918 break; 925 break;
919 926
920 gk20a_dbg_fn("Copying result to userspace"); 927 nvgpu_log_fn(g, "Copying result to userspace");
921 928
922 if (copy_to_user(fragment, linux_fragment, 929 if (copy_to_user(fragment, linux_fragment,
923 fragment_size)) { 930 fragment_size)) {
@@ -955,7 +962,7 @@ static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
955{ 962{
956 int err; 963 int err;
957 struct gk20a *g = dbg_s->g; 964 struct gk20a *g = dbg_s->g;
958 gk20a_dbg_fn("%s powergate mode = %d", 965 nvgpu_log_fn(g, "%s powergate mode = %d",
959 g->name, args->mode); 966 g->name, args->mode);
960 967
961 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 968 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -978,7 +985,7 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
978 struct gk20a *g = dbg_s->g; 985 struct gk20a *g = dbg_s->g;
979 struct channel_gk20a *ch_gk20a; 986 struct channel_gk20a *ch_gk20a;
980 987
981 gk20a_dbg_fn("%s smpc ctxsw mode = %d", 988 nvgpu_log_fn(g, "%s smpc ctxsw mode = %d",
982 g->name, args->mode); 989 g->name, args->mode);
983 990
984 err = gk20a_busy(g); 991 err = gk20a_busy(g);
@@ -1075,7 +1082,7 @@ static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
1075 struct channel_gk20a *ch; 1082 struct channel_gk20a *ch;
1076 int err = 0, action = args->mode; 1083 int err = 0, action = args->mode;
1077 1084
1078 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode); 1085 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "action: %d", args->mode);
1079 1086
1080 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1087 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1081 if (!ch) 1088 if (!ch)
@@ -1127,7 +1134,7 @@ static int nvgpu_ioctl_allocate_profiler_object(
1127 struct gk20a *g = get_gk20a(dbg_session_linux->dev); 1134 struct gk20a *g = get_gk20a(dbg_session_linux->dev);
1128 struct dbg_profiler_object_data *prof_obj; 1135 struct dbg_profiler_object_data *prof_obj;
1129 1136
1130 gk20a_dbg_fn("%s", g->name); 1137 nvgpu_log_fn(g, "%s", g->name);
1131 1138
1132 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1139 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1133 1140
@@ -1171,7 +1178,7 @@ static int nvgpu_ioctl_free_profiler_object(
1171 struct dbg_profiler_object_data *prof_obj, *tmp_obj; 1178 struct dbg_profiler_object_data *prof_obj, *tmp_obj;
1172 bool obj_found = false; 1179 bool obj_found = false;
1173 1180
1174 gk20a_dbg_fn("%s session_id = %d profiler_handle = %x", 1181 nvgpu_log_fn(g, "%s session_id = %d profiler_handle = %x",
1175 g->name, dbg_s->id, args->profiler_handle); 1182 g->name, dbg_s->id, args->profiler_handle);
1176 1183
1177 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1184 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
@@ -1253,7 +1260,9 @@ static void gk20a_dbg_session_nvgpu_mutex_release(struct dbg_session_gk20a *dbg_
1253 1260
1254static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s) 1261static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
1255{ 1262{
1256 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1263 struct gk20a *g = dbg_s->g;
1264
1265 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1257 1266
1258 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 1267 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1259 1268
@@ -1265,7 +1274,9 @@ static void gk20a_dbg_gpu_events_enable(struct dbg_session_gk20a *dbg_s)
1265 1274
1266static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s) 1275static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
1267{ 1276{
1268 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1277 struct gk20a *g = dbg_s->g;
1278
1279 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1269 1280
1270 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 1281 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1271 1282
@@ -1277,7 +1288,9 @@ static void gk20a_dbg_gpu_events_disable(struct dbg_session_gk20a *dbg_s)
1277 1288
1278static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s) 1289static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
1279{ 1290{
1280 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1291 struct gk20a *g = dbg_s->g;
1292
1293 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1281 1294
1282 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s); 1295 gk20a_dbg_session_nvgpu_mutex_acquire(dbg_s);
1283 1296
@@ -1294,13 +1307,13 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
1294{ 1307{
1295 int ret = 0; 1308 int ret = 0;
1296 struct channel_gk20a *ch; 1309 struct channel_gk20a *ch;
1310 struct gk20a *g = dbg_s->g;
1297 1311
1298 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd); 1312 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "dbg events ctrl cmd %d", args->cmd);
1299 1313
1300 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s); 1314 ch = nvgpu_dbg_gpu_get_session_channel(dbg_s);
1301 if (!ch) { 1315 if (!ch) {
1302 nvgpu_err(dbg_s->g, 1316 nvgpu_err(g, "no channel bound to dbg session");
1303 "no channel bound to dbg session");
1304 return -EINVAL; 1317 return -EINVAL;
1305 } 1318 }
1306 1319
@@ -1318,8 +1331,7 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
1318 break; 1331 break;
1319 1332
1320 default: 1333 default:
1321 nvgpu_err(dbg_s->g, 1334 nvgpu_err(g, "unrecognized dbg gpu events ctrl cmd: 0x%x",
1322 "unrecognized dbg gpu events ctrl cmd: 0x%x",
1323 args->cmd); 1335 args->cmd);
1324 ret = -EINVAL; 1336 ret = -EINVAL;
1325 break; 1337 break;
@@ -1422,7 +1434,7 @@ static int gk20a_dbg_pc_sampling(struct dbg_session_gk20a *dbg_s,
1422 if (!ch) 1434 if (!ch)
1423 return -EINVAL; 1435 return -EINVAL;
1424 1436
1425 gk20a_dbg_fn(""); 1437 nvgpu_log_fn(g, " ");
1426 1438
1427 return g->ops.gr.update_pc_sampling ? 1439 return g->ops.gr.update_pc_sampling ?
1428 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL; 1440 g->ops.gr.update_pc_sampling(ch, args->enable) : -EINVAL;
@@ -1646,7 +1658,7 @@ static int nvgpu_profiler_reserve_release(struct dbg_session_gk20a *dbg_s,
1646 struct dbg_profiler_object_data *prof_obj; 1658 struct dbg_profiler_object_data *prof_obj;
1647 int err = 0; 1659 int err = 0;
1648 1660
1649 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); 1661 nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
1650 1662
1651 nvgpu_mutex_acquire(&g->dbg_sessions_lock); 1663 nvgpu_mutex_acquire(&g->dbg_sessions_lock);
1652 1664
@@ -1678,7 +1690,7 @@ static int nvgpu_profiler_reserve_acquire(struct dbg_session_gk20a *dbg_s,
1678 struct dbg_profiler_object_data *prof_obj, *my_prof_obj; 1690 struct dbg_profiler_object_data *prof_obj, *my_prof_obj;
1679 int err = 0; 1691 int err = 0;
1680 1692
1681 gk20a_dbg_fn("%s profiler_handle = %x", g->name, profiler_handle); 1693 nvgpu_log_fn(g, "%s profiler_handle = %x", g->name, profiler_handle);
1682 1694
1683 if (g->profiler_reservation_count < 0) { 1695 if (g->profiler_reservation_count < 0) {
1684 nvgpu_err(g, "Negative reservation count!"); 1696 nvgpu_err(g, "Negative reservation count!");
@@ -1782,12 +1794,12 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
1782 struct channel_gk20a *ch; 1794 struct channel_gk20a *ch;
1783 int err; 1795 int err;
1784 1796
1785 gk20a_dbg(gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d", 1797 nvgpu_log(g, gpu_dbg_fn|gpu_dbg_gpu_dbg, "%s fd=%d",
1786 g->name, args->channel_fd); 1798 g->name, args->channel_fd);
1787 1799
1788 ch = gk20a_get_channel_from_file(args->channel_fd); 1800 ch = gk20a_get_channel_from_file(args->channel_fd);
1789 if (!ch) { 1801 if (!ch) {
1790 gk20a_dbg_fn("no channel found for fd"); 1802 nvgpu_log_fn(g, "no channel found for fd");
1791 return -EINVAL; 1803 return -EINVAL;
1792 } 1804 }
1793 1805
@@ -1802,7 +1814,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
1802 nvgpu_mutex_release(&dbg_s->ch_list_lock); 1814 nvgpu_mutex_release(&dbg_s->ch_list_lock);
1803 1815
1804 if (!channel_found) { 1816 if (!channel_found) {
1805 gk20a_dbg_fn("channel not bounded, fd=%d\n", args->channel_fd); 1817 nvgpu_log_fn(g, "channel not bounded, fd=%d\n", args->channel_fd);
1806 err = -EINVAL; 1818 err = -EINVAL;
1807 goto out; 1819 goto out;
1808 } 1820 }
@@ -1820,7 +1832,11 @@ out:
1820 1832
1821int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp) 1833int gk20a_dbg_gpu_dev_open(struct inode *inode, struct file *filp)
1822{ 1834{
1823 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1835 struct nvgpu_os_linux *l = container_of(inode->i_cdev,
1836 struct nvgpu_os_linux, dbg.cdev);
1837 struct gk20a *g = &l->g;
1838
1839 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1824 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */); 1840 return gk20a_dbg_gpu_do_dev_open(inode, filp, false /* not profiler */);
1825} 1841}
1826 1842
@@ -1833,7 +1849,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1833 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE]; 1849 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
1834 int err = 0; 1850 int err = 0;
1835 1851
1836 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 1852 nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, " ");
1837 1853
1838 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) || 1854 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
1839 (_IOC_NR(cmd) == 0) || 1855 (_IOC_NR(cmd) == 0) ||
@@ -1979,7 +1995,7 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
1979 1995
1980 nvgpu_mutex_release(&dbg_s->ioctl_lock); 1996 nvgpu_mutex_release(&dbg_s->ioctl_lock);
1981 1997
1982 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 1998 nvgpu_log(g, gpu_dbg_gpu_dbg, "ret=%d", err);
1983 1999
1984 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ)) 2000 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1985 err = copy_to_user((void __user *)arg, 2001 err = copy_to_user((void __user *)arg,