summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/ioctl_dbg.c')
-rw-r--r--drivers/gpu/nvgpu/os/linux/ioctl_dbg.c33
1 files changed, 0 insertions, 33 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c
index 1989a5cb..eadf1f93 100644
--- a/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c
+++ b/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c
@@ -100,9 +100,6 @@ static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_
100 return 0; 100 return 0;
101} 101}
102 102
103static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
104 struct gr_gk20a *gr);
105
106static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset); 103static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset);
107 104
108static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, 105static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
@@ -870,13 +867,6 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
870 return -EINVAL; 867 return -EINVAL;
871 } 868 }
872 869
873 /* be sure that ctx info is in place */
874 if (!g->is_virtual &&
875 !gr_context_info_available(dbg_s, &g->gr)) {
876 nvgpu_err(g, "gr context data not available");
877 return -ENODEV;
878 }
879
880 /* since exec_reg_ops sends methods to the ucode, it must take the 870 /* since exec_reg_ops sends methods to the ucode, it must take the
881 * global gpu lock to protect against mixing methods from debug sessions 871 * global gpu lock to protect against mixing methods from debug sessions
882 * on other channels */ 872 * on other channels */
@@ -1653,29 +1643,6 @@ static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s,
1653 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE; 1643 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE;
1654} 1644}
1655 1645
1656/* In order to perform a context relative op the context has
1657 * to be created already... which would imply that the
1658 * context switch mechanism has already been put in place.
1659 * So by the time we perform such an opertation it should always
1660 * be possible to query for the appropriate context offsets, etc.
1661 *
1662 * But note: while the dbg_gpu bind requires the a channel fd,
1663 * it doesn't require an allocated gr/compute obj at that point...
1664 */
1665static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
1666 struct gr_gk20a *gr)
1667{
1668 int err;
1669
1670 nvgpu_mutex_acquire(&gr->ctx_mutex);
1671 err = !gr->ctx_vars.golden_image_initialized;
1672 nvgpu_mutex_release(&gr->ctx_mutex);
1673 if (err)
1674 return false;
1675 return true;
1676
1677}
1678
1679static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset) 1646static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset)
1680{ 1647{
1681 struct mm_gk20a *mm = &g->mm; 1648 struct mm_gk20a *mm = &g->mm;