diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2017-12-15 12:04:15 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-01-17 15:29:09 -0500 |
commit | 2f6698b863c9cc1db6455637b7c72e812b470b93 (patch) | |
tree | d0c8abf32d6994b9f54bf5eddafd8316e038c829 /drivers/gpu/nvgpu/gv11b/subctx_gv11b.c | |
parent | 6a73114788ffafe4c53771c707ecbd9c9ea0a117 (diff) |
gpu: nvgpu: Make graphics context property of TSG
Move graphics context ownership to TSG instead of channel. Combine
channel_ctx_gk20a and gr_ctx_desc to one structure, because the split
between them was arbitrary. Move context header to be property of
channel.
Bug 1842197
Change-Id: I410e3262f80b318d8528bcbec270b63a2d8d2ff9
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1639532
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Tested-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/subctx_gv11b.c')
-rw-r--r-- | drivers/gpu/nvgpu/gv11b/subctx_gv11b.c | 42 |
1 files changed, 36 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c index fe1aa8a5..607fff91 100644 --- a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c +++ b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c | |||
@@ -43,7 +43,7 @@ static void gv11b_subctx_commit_pdb(struct channel_gk20a *c, | |||
43 | 43 | ||
44 | void gv11b_free_subctx_header(struct channel_gk20a *c) | 44 | void gv11b_free_subctx_header(struct channel_gk20a *c) |
45 | { | 45 | { |
46 | struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; | 46 | struct ctx_header_desc *ctx = &c->ctx_header; |
47 | struct gk20a *g = c->g; | 47 | struct gk20a *g = c->g; |
48 | 48 | ||
49 | nvgpu_log(g, gpu_dbg_fn, "gv11b_free_subctx_header"); | 49 | nvgpu_log(g, gpu_dbg_fn, "gv11b_free_subctx_header"); |
@@ -57,13 +57,13 @@ void gv11b_free_subctx_header(struct channel_gk20a *c) | |||
57 | 57 | ||
58 | int gv11b_alloc_subctx_header(struct channel_gk20a *c) | 58 | int gv11b_alloc_subctx_header(struct channel_gk20a *c) |
59 | { | 59 | { |
60 | struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; | 60 | struct ctx_header_desc *ctx = &c->ctx_header; |
61 | struct gk20a *g = c->g; | 61 | struct gk20a *g = c->g; |
62 | int ret = 0; | 62 | int ret = 0; |
63 | 63 | ||
64 | nvgpu_log(g, gpu_dbg_fn, "gv11b_alloc_subctx_header"); | 64 | nvgpu_log(g, gpu_dbg_fn, "gv11b_alloc_subctx_header"); |
65 | 65 | ||
66 | if (ctx->mem.gpu_va == 0) { | 66 | if (!nvgpu_mem_is_valid(&ctx->mem)) { |
67 | ret = nvgpu_dma_alloc_flags_sys(g, | 67 | ret = nvgpu_dma_alloc_flags_sys(g, |
68 | 0, /* No Special flags */ | 68 | 0, /* No Special flags */ |
69 | ctxsw_prog_fecs_header_v(), | 69 | ctxsw_prog_fecs_header_v(), |
@@ -111,20 +111,50 @@ static void gv11b_init_subcontext_pdb(struct channel_gk20a *c, | |||
111 | 111 | ||
112 | int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va) | 112 | int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va) |
113 | { | 113 | { |
114 | struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; | 114 | struct ctx_header_desc *ctx = &c->ctx_header; |
115 | struct nvgpu_mem *gr_mem; | 115 | struct nvgpu_mem *gr_mem; |
116 | struct gk20a *g = c->g; | 116 | struct gk20a *g = c->g; |
117 | int ret = 0; | 117 | int ret = 0; |
118 | u32 addr_lo, addr_hi; | 118 | u32 addr_lo, addr_hi; |
119 | struct tsg_gk20a *tsg; | ||
120 | struct nvgpu_gr_ctx *gr_ctx; | ||
119 | 121 | ||
120 | addr_lo = u64_lo32(gpu_va); | 122 | tsg = tsg_gk20a_from_ch(c); |
121 | addr_hi = u64_hi32(gpu_va); | 123 | if (!tsg) |
124 | return -EINVAL; | ||
125 | |||
126 | gr_ctx = &tsg->gr_ctx; | ||
122 | 127 | ||
123 | gr_mem = &ctx->mem; | 128 | gr_mem = &ctx->mem; |
124 | g->ops.mm.l2_flush(g, true); | 129 | g->ops.mm.l2_flush(g, true); |
125 | if (nvgpu_mem_begin(g, gr_mem)) | 130 | if (nvgpu_mem_begin(g, gr_mem)) |
126 | return -ENOMEM; | 131 | return -ENOMEM; |
127 | 132 | ||
133 | /* set priv access map */ | ||
134 | addr_lo = u64_lo32(gr_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]); | ||
135 | addr_hi = u64_hi32(gr_ctx->global_ctx_buffer_va[PRIV_ACCESS_MAP_VA]); | ||
136 | nvgpu_mem_wr(g, gr_mem, | ||
137 | ctxsw_prog_main_image_priv_access_map_addr_lo_o(), | ||
138 | addr_lo); | ||
139 | nvgpu_mem_wr(g, gr_mem, | ||
140 | ctxsw_prog_main_image_priv_access_map_addr_hi_o(), | ||
141 | addr_hi); | ||
142 | |||
143 | addr_lo = u64_lo32(gr_ctx->patch_ctx.mem.gpu_va); | ||
144 | addr_hi = u64_hi32(gr_ctx->patch_ctx.mem.gpu_va); | ||
145 | nvgpu_mem_wr(g, gr_mem, | ||
146 | ctxsw_prog_main_image_patch_adr_lo_o(), | ||
147 | addr_lo); | ||
148 | nvgpu_mem_wr(g, gr_mem, | ||
149 | ctxsw_prog_main_image_patch_adr_hi_o(), | ||
150 | addr_hi); | ||
151 | |||
152 | g->ops.gr.write_pm_ptr(g, gr_mem, gr_ctx->pm_ctx.mem.gpu_va); | ||
153 | g->ops.gr.write_zcull_ptr(g, gr_mem, gr_ctx->zcull_ctx.gpu_va); | ||
154 | |||
155 | addr_lo = u64_lo32(gpu_va); | ||
156 | addr_hi = u64_hi32(gpu_va); | ||
157 | |||
128 | nvgpu_mem_wr(g, gr_mem, | 158 | nvgpu_mem_wr(g, gr_mem, |
129 | ctxsw_prog_main_image_context_buffer_ptr_hi_o(), addr_hi); | 159 | ctxsw_prog_main_image_context_buffer_ptr_hi_o(), addr_hi); |
130 | nvgpu_mem_wr(g, gr_mem, | 160 | nvgpu_mem_wr(g, gr_mem, |