summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index a5595c15..232d01a7 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -53,6 +53,31 @@ static inline bool linear_search(u32 offset, const u32 *list, int size)
53 return false; 53 return false;
54} 54}
55 55
56/*
57 * In order to perform a context relative op the context has
58 * to be created already... which would imply that the
59 * context switch mechanism has already been put in place.
60 * So by the time we perform such an opertation it should always
61 * be possible to query for the appropriate context offsets, etc.
62 *
63 * But note: while the dbg_gpu bind requires the a channel fd,
64 * it doesn't require an allocated gr/compute obj at that point...
65 */
66static bool gr_context_info_available(struct gr_gk20a *gr)
67{
68 int err;
69
70 nvgpu_mutex_acquire(&gr->ctx_mutex);
71 err = !gr->ctx_vars.golden_image_initialized;
72 nvgpu_mutex_release(&gr->ctx_mutex);
73 if (err) {
74 return false;
75 }
76
77 return true;
78
79}
80
56static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, 81static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
57 u32 *ctx_rd_count, u32 *ctx_wr_count, 82 u32 *ctx_rd_count, u32 *ctx_wr_count,
58 struct nvgpu_dbg_reg_op *ops, 83 struct nvgpu_dbg_reg_op *ops,
@@ -91,7 +116,6 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
91 ok = validate_reg_ops(dbg_s, 116 ok = validate_reg_ops(dbg_s,
92 &ctx_rd_count, &ctx_wr_count, 117 &ctx_rd_count, &ctx_wr_count,
93 ops, num_ops); 118 ops, num_ops);
94
95 if (!ok) { 119 if (!ok) {
96 nvgpu_err(g, "invalid op(s)"); 120 nvgpu_err(g, "invalid op(s)");
97 err = -EINVAL; 121 err = -EINVAL;
@@ -99,6 +123,14 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
99 goto clean_up; 123 goto clean_up;
100 } 124 }
101 125
126 /* be sure that ctx info is in place if there are ctx ops */
127 if (ctx_wr_count | ctx_rd_count) {
128 if (!gr_context_info_available(&g->gr)) {
129 nvgpu_err(g, "gr context data not available");
130 return -ENODEV;
131 }
132 }
133
102 for (i = 0; i < num_ops; i++) { 134 for (i = 0; i < num_ops; i++) {
103 /* if it isn't global then it is done in the ctx ops... */ 135 /* if it isn't global then it is done in the ctx ops... */
104 if (ops[i].type != REGOP(TYPE_GLOBAL)) 136 if (ops[i].type != REGOP(TYPE_GLOBAL))