summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c34
-rw-r--r--drivers/gpu/nvgpu/os/linux/ioctl_dbg.c33
2 files changed, 33 insertions, 34 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index a5595c15..232d01a7 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -53,6 +53,31 @@ static inline bool linear_search(u32 offset, const u32 *list, int size)
53 return false; 53 return false;
54} 54}
55 55
56/*
57 * In order to perform a context relative op the context has
58 * to be created already... which would imply that the
59 * context switch mechanism has already been put in place.
60 * So by the time we perform such an opertation it should always
61 * be possible to query for the appropriate context offsets, etc.
62 *
63 * But note: while the dbg_gpu bind requires the a channel fd,
64 * it doesn't require an allocated gr/compute obj at that point...
65 */
66static bool gr_context_info_available(struct gr_gk20a *gr)
67{
68 int err;
69
70 nvgpu_mutex_acquire(&gr->ctx_mutex);
71 err = !gr->ctx_vars.golden_image_initialized;
72 nvgpu_mutex_release(&gr->ctx_mutex);
73 if (err) {
74 return false;
75 }
76
77 return true;
78
79}
80
56static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, 81static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
57 u32 *ctx_rd_count, u32 *ctx_wr_count, 82 u32 *ctx_rd_count, u32 *ctx_wr_count,
58 struct nvgpu_dbg_reg_op *ops, 83 struct nvgpu_dbg_reg_op *ops,
@@ -91,7 +116,6 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
91 ok = validate_reg_ops(dbg_s, 116 ok = validate_reg_ops(dbg_s,
92 &ctx_rd_count, &ctx_wr_count, 117 &ctx_rd_count, &ctx_wr_count,
93 ops, num_ops); 118 ops, num_ops);
94
95 if (!ok) { 119 if (!ok) {
96 nvgpu_err(g, "invalid op(s)"); 120 nvgpu_err(g, "invalid op(s)");
97 err = -EINVAL; 121 err = -EINVAL;
@@ -99,6 +123,14 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
99 goto clean_up; 123 goto clean_up;
100 } 124 }
101 125
126 /* be sure that ctx info is in place if there are ctx ops */
127 if (ctx_wr_count | ctx_rd_count) {
128 if (!gr_context_info_available(&g->gr)) {
129 nvgpu_err(g, "gr context data not available");
130 return -ENODEV;
131 }
132 }
133
102 for (i = 0; i < num_ops; i++) { 134 for (i = 0; i < num_ops; i++) {
103 /* if it isn't global then it is done in the ctx ops... */ 135 /* if it isn't global then it is done in the ctx ops... */
104 if (ops[i].type != REGOP(TYPE_GLOBAL)) 136 if (ops[i].type != REGOP(TYPE_GLOBAL))
diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c
index 1989a5cb..eadf1f93 100644
--- a/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c
+++ b/drivers/gpu/nvgpu/os/linux/ioctl_dbg.c
@@ -100,9 +100,6 @@ static int alloc_session(struct gk20a *g, struct dbg_session_gk20a_linux **_dbg_
100 return 0; 100 return 0;
101} 101}
102 102
103static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
104 struct gr_gk20a *gr);
105
106static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset); 103static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset);
107 104
108static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s, 105static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
@@ -870,13 +867,6 @@ static int nvgpu_ioctl_channel_reg_ops(struct dbg_session_gk20a *dbg_s,
870 return -EINVAL; 867 return -EINVAL;
871 } 868 }
872 869
873 /* be sure that ctx info is in place */
874 if (!g->is_virtual &&
875 !gr_context_info_available(dbg_s, &g->gr)) {
876 nvgpu_err(g, "gr context data not available");
877 return -ENODEV;
878 }
879
880 /* since exec_reg_ops sends methods to the ucode, it must take the 870 /* since exec_reg_ops sends methods to the ucode, it must take the
881 * global gpu lock to protect against mixing methods from debug sessions 871 * global gpu lock to protect against mixing methods from debug sessions
882 * on other channels */ 872 * on other channels */
@@ -1653,29 +1643,6 @@ static void nvgpu_dbg_gpu_ioctl_get_timeout(struct dbg_session_gk20a *dbg_s,
1653 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE; 1643 args->enable = NVGPU_DBG_GPU_IOCTL_TIMEOUT_DISABLE;
1654} 1644}
1655 1645
1656/* In order to perform a context relative op the context has
1657 * to be created already... which would imply that the
1658 * context switch mechanism has already been put in place.
1659 * So by the time we perform such an opertation it should always
1660 * be possible to query for the appropriate context offsets, etc.
1661 *
1662 * But note: while the dbg_gpu bind requires the a channel fd,
1663 * it doesn't require an allocated gr/compute obj at that point...
1664 */
1665static bool gr_context_info_available(struct dbg_session_gk20a *dbg_s,
1666 struct gr_gk20a *gr)
1667{
1668 int err;
1669
1670 nvgpu_mutex_acquire(&gr->ctx_mutex);
1671 err = !gr->ctx_vars.golden_image_initialized;
1672 nvgpu_mutex_release(&gr->ctx_mutex);
1673 if (err)
1674 return false;
1675 return true;
1676
1677}
1678
1679static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset) 1646static int gk20a_perfbuf_release_locked(struct gk20a *g, u64 offset)
1680{ 1647{
1681 struct mm_gk20a *mm = &g->mm; 1648 struct mm_gk20a *mm = &g->mm;