summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2018-07-31 03:39:44 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-08-02 16:56:40 -0400
commit7216f3dd71cc023ec4d8ef0c9a90a554c0f09362 (patch)
tree4ddcb2517e4094d930f8dd9e5604b8e212e86fa2 /drivers/gpu/nvgpu/gk20a/regops_gk20a.c
parent63e6e8ee3ee9ce01b1f7d4c0014bb589df105d71 (diff)
gpu: nvgpu: allow global regops before ctx is created
In nvgpu_ioctl_channel_reg_ops(), we right now first check if context is allocated or not and if context is not allocated we fail the regops operation But it is possible that the regops operation only includes global regops which does not need global context allocated So move this global context check from nvgpu_ioctl_channel_reg_ops() to exec_regops_gk20a() and only if we have context ops included in the regops Bug 200431958 Change-Id: Iaa4953235d95b2106d5f81a456141d3a57603fb9 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1789262 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/regops_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/regops_gk20a.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
index a5595c15..232d01a7 100644
--- a/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/regops_gk20a.c
@@ -53,6 +53,31 @@ static inline bool linear_search(u32 offset, const u32 *list, int size)
53 return false; 53 return false;
54} 54}
55 55
56/*
57 * In order to perform a context relative op the context has
58 * to be created already... which would imply that the
59 * context switch mechanism has already been put in place.
60 * So by the time we perform such an opertation it should always
61 * be possible to query for the appropriate context offsets, etc.
62 *
63 * But note: while the dbg_gpu bind requires the a channel fd,
64 * it doesn't require an allocated gr/compute obj at that point...
65 */
66static bool gr_context_info_available(struct gr_gk20a *gr)
67{
68 int err;
69
70 nvgpu_mutex_acquire(&gr->ctx_mutex);
71 err = !gr->ctx_vars.golden_image_initialized;
72 nvgpu_mutex_release(&gr->ctx_mutex);
73 if (err) {
74 return false;
75 }
76
77 return true;
78
79}
80
56static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s, 81static bool validate_reg_ops(struct dbg_session_gk20a *dbg_s,
57 u32 *ctx_rd_count, u32 *ctx_wr_count, 82 u32 *ctx_rd_count, u32 *ctx_wr_count,
58 struct nvgpu_dbg_reg_op *ops, 83 struct nvgpu_dbg_reg_op *ops,
@@ -91,7 +116,6 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
91 ok = validate_reg_ops(dbg_s, 116 ok = validate_reg_ops(dbg_s,
92 &ctx_rd_count, &ctx_wr_count, 117 &ctx_rd_count, &ctx_wr_count,
93 ops, num_ops); 118 ops, num_ops);
94
95 if (!ok) { 119 if (!ok) {
96 nvgpu_err(g, "invalid op(s)"); 120 nvgpu_err(g, "invalid op(s)");
97 err = -EINVAL; 121 err = -EINVAL;
@@ -99,6 +123,14 @@ int exec_regops_gk20a(struct dbg_session_gk20a *dbg_s,
99 goto clean_up; 123 goto clean_up;
100 } 124 }
101 125
126 /* be sure that ctx info is in place if there are ctx ops */
127 if (ctx_wr_count | ctx_rd_count) {
128 if (!gr_context_info_available(&g->gr)) {
129 nvgpu_err(g, "gr context data not available");
130 return -ENODEV;
131 }
132 }
133
102 for (i = 0; i < num_ops; i++) { 134 for (i = 0; i < num_ops; i++) {
103 /* if it isn't global then it is done in the ctx ops... */ 135 /* if it isn't global then it is done in the ctx ops... */
104 if (ops[i].type != REGOP(TYPE_GLOBAL)) 136 if (ops[i].type != REGOP(TYPE_GLOBAL))