summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c90
1 files changed, 45 insertions, 45 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 1fefb659..2f1a08d8 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -154,7 +154,7 @@ static void gk20a_dbg_gpu_events_clear(struct dbg_session_gk20a *dbg_s)
154} 154}
155 155
156static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s, 156static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
157 struct nvhost_dbg_gpu_events_ctrl_args *args) 157 struct nvgpu_dbg_gpu_events_ctrl_args *args)
158{ 158{
159 int ret = 0; 159 int ret = 0;
160 160
@@ -167,15 +167,15 @@ static int gk20a_dbg_gpu_events_ctrl(struct dbg_session_gk20a *dbg_s,
167 } 167 }
168 168
169 switch (args->cmd) { 169 switch (args->cmd) {
170 case NVHOST_DBG_GPU_EVENTS_CTRL_CMD_ENABLE: 170 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_ENABLE:
171 gk20a_dbg_gpu_events_enable(dbg_s); 171 gk20a_dbg_gpu_events_enable(dbg_s);
172 break; 172 break;
173 173
174 case NVHOST_DBG_GPU_EVENTS_CTRL_CMD_DISABLE: 174 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_DISABLE:
175 gk20a_dbg_gpu_events_disable(dbg_s); 175 gk20a_dbg_gpu_events_disable(dbg_s);
176 break; 176 break;
177 177
178 case NVHOST_DBG_GPU_EVENTS_CTRL_CMD_CLEAR: 178 case NVGPU_DBG_GPU_EVENTS_CTRL_CMD_CLEAR:
179 gk20a_dbg_gpu_events_clear(dbg_s); 179 gk20a_dbg_gpu_events_clear(dbg_s);
180 break; 180 break;
181 181
@@ -278,7 +278,7 @@ static int dbg_unbind_channel_gk20a(struct dbg_session_gk20a *dbg_s)
278 * which called powergate disable ioctl, to be killed without calling 278 * which called powergate disable ioctl, to be killed without calling
279 * powergate enable ioctl 279 * powergate enable ioctl
280 */ 280 */
281 dbg_set_powergate(dbg_s, NVHOST_DBG_GPU_POWERGATE_MODE_ENABLE); 281 dbg_set_powergate(dbg_s, NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
282 282
283 dbg_s->ch = NULL; 283 dbg_s->ch = NULL;
284 fput(dbg_s->ch_f); 284 fput(dbg_s->ch_f);
@@ -307,7 +307,7 @@ int gk20a_dbg_gpu_dev_release(struct inode *inode, struct file *filp)
307} 307}
308 308
309static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s, 309static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
310 struct nvhost_dbg_gpu_bind_channel_args *args) 310 struct nvgpu_dbg_gpu_bind_channel_args *args)
311{ 311{
312 struct file *f; 312 struct file *f;
313 struct gk20a *g; 313 struct gk20a *g;
@@ -350,31 +350,31 @@ static int dbg_bind_channel_gk20a(struct dbg_session_gk20a *dbg_s,
350 return 0; 350 return 0;
351} 351}
352 352
353static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, 353static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
354 struct nvhost_dbg_gpu_exec_reg_ops_args *args); 354 struct nvgpu_dbg_gpu_exec_reg_ops_args *args);
355 355
356static int nvhost_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, 356static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
357 struct nvhost_dbg_gpu_powergate_args *args); 357 struct nvgpu_dbg_gpu_powergate_args *args);
358 358
359static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, 359static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
360 struct nvhost_dbg_gpu_smpc_ctxsw_mode_args *args); 360 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args);
361 361
362long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, 362long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
363 unsigned long arg) 363 unsigned long arg)
364{ 364{
365 struct dbg_session_gk20a *dbg_s = filp->private_data; 365 struct dbg_session_gk20a *dbg_s = filp->private_data;
366 struct gk20a *g = get_gk20a(dbg_s->pdev); 366 struct gk20a *g = get_gk20a(dbg_s->pdev);
367 u8 buf[NVHOST_DBG_GPU_IOCTL_MAX_ARG_SIZE]; 367 u8 buf[NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE];
368 int err = 0; 368 int err = 0;
369 369
370 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); 370 gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "");
371 371
372 if ((_IOC_TYPE(cmd) != NVHOST_DBG_GPU_IOCTL_MAGIC) || 372 if ((_IOC_TYPE(cmd) != NVGPU_DBG_GPU_IOCTL_MAGIC) ||
373 (_IOC_NR(cmd) == 0) || 373 (_IOC_NR(cmd) == 0) ||
374 (_IOC_NR(cmd) > NVHOST_DBG_GPU_IOCTL_LAST)) 374 (_IOC_NR(cmd) > NVGPU_DBG_GPU_IOCTL_LAST))
375 return -EINVAL; 375 return -EINVAL;
376 376
377 BUG_ON(_IOC_SIZE(cmd) > NVHOST_DBG_GPU_IOCTL_MAX_ARG_SIZE); 377 BUG_ON(_IOC_SIZE(cmd) > NVGPU_DBG_GPU_IOCTL_MAX_ARG_SIZE);
378 378
379 if (_IOC_DIR(cmd) & _IOC_WRITE) { 379 if (_IOC_DIR(cmd) & _IOC_WRITE) {
380 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd))) 380 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
@@ -390,32 +390,32 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
390 } 390 }
391 391
392 switch (cmd) { 392 switch (cmd) {
393 case NVHOST_DBG_GPU_IOCTL_BIND_CHANNEL: 393 case NVGPU_DBG_GPU_IOCTL_BIND_CHANNEL:
394 err = dbg_bind_channel_gk20a(dbg_s, 394 err = dbg_bind_channel_gk20a(dbg_s,
395 (struct nvhost_dbg_gpu_bind_channel_args *)buf); 395 (struct nvgpu_dbg_gpu_bind_channel_args *)buf);
396 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 396 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
397 break; 397 break;
398 398
399 case NVHOST_DBG_GPU_IOCTL_REG_OPS: 399 case NVGPU_DBG_GPU_IOCTL_REG_OPS:
400 err = nvhost_ioctl_channel_reg_ops(dbg_s, 400 err = nvgpu_ioctl_channel_reg_ops(dbg_s,
401 (struct nvhost_dbg_gpu_exec_reg_ops_args *)buf); 401 (struct nvgpu_dbg_gpu_exec_reg_ops_args *)buf);
402 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 402 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
403 break; 403 break;
404 404
405 case NVHOST_DBG_GPU_IOCTL_POWERGATE: 405 case NVGPU_DBG_GPU_IOCTL_POWERGATE:
406 err = nvhost_ioctl_powergate_gk20a(dbg_s, 406 err = nvgpu_ioctl_powergate_gk20a(dbg_s,
407 (struct nvhost_dbg_gpu_powergate_args *)buf); 407 (struct nvgpu_dbg_gpu_powergate_args *)buf);
408 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err); 408 gk20a_dbg(gpu_dbg_gpu_dbg, "ret=%d", err);
409 break; 409 break;
410 410
411 case NVHOST_DBG_GPU_IOCTL_EVENTS_CTRL: 411 case NVGPU_DBG_GPU_IOCTL_EVENTS_CTRL:
412 err = gk20a_dbg_gpu_events_ctrl(dbg_s, 412 err = gk20a_dbg_gpu_events_ctrl(dbg_s,
413 (struct nvhost_dbg_gpu_events_ctrl_args *)buf); 413 (struct nvgpu_dbg_gpu_events_ctrl_args *)buf);
414 break; 414 break;
415 415
416 case NVHOST_DBG_GPU_IOCTL_SMPC_CTXSW_MODE: 416 case NVGPU_DBG_GPU_IOCTL_SMPC_CTXSW_MODE:
417 err = nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s, 417 err = nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(dbg_s,
418 (struct nvhost_dbg_gpu_smpc_ctxsw_mode_args *)buf); 418 (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf);
419 break; 419 break;
420 420
421 default: 421 default:
@@ -456,15 +456,15 @@ static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
456 456
457} 457}
458 458
459static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, 459static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
460 struct nvhost_dbg_gpu_exec_reg_ops_args *args) 460 struct nvgpu_dbg_gpu_exec_reg_ops_args *args)
461{ 461{
462 int err = 0, powergate_err = 0; 462 int err = 0, powergate_err = 0;
463 bool is_pg_disabled = false; 463 bool is_pg_disabled = false;
464 464
465 struct device *dev = dbg_s->dev; 465 struct device *dev = dbg_s->dev;
466 struct gk20a *g = get_gk20a(dbg_s->pdev); 466 struct gk20a *g = get_gk20a(dbg_s->pdev);
467 struct nvhost_dbg_gpu_reg_op *ops; 467 struct nvgpu_dbg_gpu_reg_op *ops;
468 u64 ops_size = sizeof(ops[0]) * args->num_ops; 468 u64 ops_size = sizeof(ops[0]) * args->num_ops;
469 469
470 gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size); 470 gk20a_dbg_fn("%d ops, total size %llu", args->num_ops, ops_size);
@@ -506,7 +506,7 @@ static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
506 506
507 if (!dbg_s->is_pg_disabled) { 507 if (!dbg_s->is_pg_disabled) {
508 powergate_err = dbg_set_powergate(dbg_s, 508 powergate_err = dbg_set_powergate(dbg_s,
509 NVHOST_DBG_GPU_POWERGATE_MODE_DISABLE); 509 NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE);
510 is_pg_disabled = true; 510 is_pg_disabled = true;
511 } 511 }
512 512
@@ -515,7 +515,7 @@ static int nvhost_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
515 /* enable powergate, if previously disabled */ 515 /* enable powergate, if previously disabled */
516 if (is_pg_disabled) { 516 if (is_pg_disabled) {
517 powergate_err = dbg_set_powergate(dbg_s, 517 powergate_err = dbg_set_powergate(dbg_s,
518 NVHOST_DBG_GPU_POWERGATE_MODE_ENABLE); 518 NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE);
519 } 519 }
520 } 520 }
521 521
@@ -554,7 +554,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
554 dev_name(dbg_s->dev), powermode); 554 dev_name(dbg_s->dev), powermode);
555 555
556 switch (powermode) { 556 switch (powermode) {
557 case NVHOST_DBG_GPU_POWERGATE_MODE_DISABLE: 557 case NVGPU_DBG_GPU_POWERGATE_MODE_DISABLE:
558 /* save off current powergate, clk state. 558 /* save off current powergate, clk state.
559 * set gpu module's can_powergate = 0. 559 * set gpu module's can_powergate = 0.
560 * set gpu module's clk to max. 560 * set gpu module's clk to max.
@@ -595,7 +595,7 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
595 dbg_s->is_pg_disabled = true; 595 dbg_s->is_pg_disabled = true;
596 break; 596 break;
597 597
598 case NVHOST_DBG_GPU_POWERGATE_MODE_ENABLE: 598 case NVGPU_DBG_GPU_POWERGATE_MODE_ENABLE:
599 /* restore (can) powergate, clk state */ 599 /* restore (can) powergate, clk state */
600 /* release pending exceptions to fault/be handled as usual */ 600 /* release pending exceptions to fault/be handled as usual */
601 /*TBD: ordering of these? */ 601 /*TBD: ordering of these? */
@@ -640,8 +640,8 @@ static int dbg_set_powergate(struct dbg_session_gk20a *dbg_s,
640 return err; 640 return err;
641} 641}
642 642
643static int nvhost_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s, 643static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
644 struct nvhost_dbg_gpu_powergate_args *args) 644 struct nvgpu_dbg_gpu_powergate_args *args)
645{ 645{
646 int err; 646 int err;
647 struct gk20a *g = get_gk20a(dbg_s->pdev); 647 struct gk20a *g = get_gk20a(dbg_s->pdev);
@@ -654,8 +654,8 @@ static int nvhost_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
654 return err; 654 return err;
655} 655}
656 656
657static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, 657static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
658 struct nvhost_dbg_gpu_smpc_ctxsw_mode_args *args) 658 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args)
659{ 659{
660 int err; 660 int err;
661 struct gk20a *g = get_gk20a(dbg_s->pdev); 661 struct gk20a *g = get_gk20a(dbg_s->pdev);
@@ -677,7 +677,7 @@ static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
677 } 677 }
678 678
679 err = gr_gk20a_update_smpc_ctxsw_mode(g, ch_gk20a, 679 err = gr_gk20a_update_smpc_ctxsw_mode(g, ch_gk20a,
680 args->mode == NVHOST_DBG_GPU_SMPC_CTXSW_MODE_CTXSW); 680 args->mode == NVGPU_DBG_GPU_SMPC_CTXSW_MODE_CTXSW);
681 if (err) { 681 if (err) {
682 gk20a_err(dev_from_gk20a(dbg_s->g), 682 gk20a_err(dev_from_gk20a(dbg_s->g),
683 "error (%d) during smpc ctxsw mode update\n", err); 683 "error (%d) during smpc ctxsw mode update\n", err);
@@ -688,12 +688,12 @@ static int nvhost_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
688 * it was already swapped out in/out once or not, etc. 688 * it was already swapped out in/out once or not, etc.
689 */ 689 */
690 { 690 {
691 struct nvhost_dbg_gpu_reg_op ops[4]; 691 struct nvgpu_dbg_gpu_reg_op ops[4];
692 int i; 692 int i;
693 for (i = 0; i < ARRAY_SIZE(ops); i++) { 693 for (i = 0; i < ARRAY_SIZE(ops); i++) {
694 ops[i].op = NVHOST_DBG_GPU_REG_OP_WRITE_32; 694 ops[i].op = NVGPU_DBG_GPU_REG_OP_WRITE_32;
695 ops[i].type = NVHOST_DBG_GPU_REG_OP_TYPE_GR_CTX; 695 ops[i].type = NVGPU_DBG_GPU_REG_OP_TYPE_GR_CTX;
696 ops[i].status = NVHOST_DBG_GPU_REG_OP_STATUS_SUCCESS; 696 ops[i].status = NVGPU_DBG_GPU_REG_OP_STATUS_SUCCESS;
697 ops[i].value_hi = 0; 697 ops[i].value_hi = 0;
698 ops[i].and_n_mask_lo = 0; 698 ops[i].and_n_mask_lo = 0;
699 ops[i].and_n_mask_hi = 0; 699 ops[i].and_n_mask_hi = 0;