diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2017-12-15 12:04:15 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-01-17 15:29:09 -0500 |
commit | 2f6698b863c9cc1db6455637b7c72e812b470b93 (patch) | |
tree | d0c8abf32d6994b9f54bf5eddafd8316e038c829 /drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c | |
parent | 6a73114788ffafe4c53771c707ecbd9c9ea0a117 (diff) |
gpu: nvgpu: Make graphics context property of TSG
Move graphics context ownership to TSG instead of channel. Combine
channel_ctx_gk20a and gr_ctx_desc to one structure, because the split
between them was arbitrary. Move context header to be property of
channel.
Bug 1842197
Change-Id: I410e3262f80b318d8528bcbec270b63a2d8d2ff9
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1639532
Reviewed-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Tested-by: Seshendra Gadagottu <sgadagottu@nvidia.com>
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c | 25 |
1 files changed, 11 insertions, 14 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c index ed61f16b..9adf20d1 100644 --- a/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c +++ b/drivers/gpu/nvgpu/common/linux/vgpu/gp10b/vgpu_gr_gp10b.c | |||
@@ -27,12 +27,11 @@ | |||
27 | #include <nvgpu/hw/gp10b/hw_gr_gp10b.h> | 27 | #include <nvgpu/hw/gp10b/hw_gr_gp10b.h> |
28 | 28 | ||
29 | int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g, | 29 | int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g, |
30 | struct gr_ctx_desc **__gr_ctx, | 30 | struct nvgpu_gr_ctx *gr_ctx, |
31 | struct vm_gk20a *vm, | 31 | struct vm_gk20a *vm, |
32 | u32 class, | 32 | u32 class, |
33 | u32 flags) | 33 | u32 flags) |
34 | { | 34 | { |
35 | struct gr_ctx_desc *gr_ctx; | ||
36 | u32 graphics_preempt_mode = 0; | 35 | u32 graphics_preempt_mode = 0; |
37 | u32 compute_preempt_mode = 0; | 36 | u32 compute_preempt_mode = 0; |
38 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); | 37 | struct vgpu_priv_data *priv = vgpu_get_priv_data(g); |
@@ -40,12 +39,10 @@ int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g, | |||
40 | 39 | ||
41 | gk20a_dbg_fn(""); | 40 | gk20a_dbg_fn(""); |
42 | 41 | ||
43 | err = vgpu_gr_alloc_gr_ctx(g, __gr_ctx, vm, class, flags); | 42 | err = vgpu_gr_alloc_gr_ctx(g, gr_ctx, vm, class, flags); |
44 | if (err) | 43 | if (err) |
45 | return err; | 44 | return err; |
46 | 45 | ||
47 | gr_ctx = *__gr_ctx; | ||
48 | |||
49 | if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP) | 46 | if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_GFXP) |
50 | graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP; | 47 | graphics_preempt_mode = NVGPU_PREEMPTION_MODE_GRAPHICS_GFXP; |
51 | if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP) | 48 | if (flags & NVGPU_OBJ_CTX_FLAGS_SUPPORT_CILP) |
@@ -84,7 +81,7 @@ fail: | |||
84 | } | 81 | } |
85 | 82 | ||
86 | int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, | 83 | int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g, |
87 | struct gr_ctx_desc *gr_ctx, | 84 | struct nvgpu_gr_ctx *gr_ctx, |
88 | struct vm_gk20a *vm, u32 class, | 85 | struct vm_gk20a *vm, u32 class, |
89 | u32 graphics_preempt_mode, | 86 | u32 graphics_preempt_mode, |
90 | u32 compute_preempt_mode) | 87 | u32 compute_preempt_mode) |
@@ -240,7 +237,7 @@ int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, | |||
240 | u32 graphics_preempt_mode, | 237 | u32 graphics_preempt_mode, |
241 | u32 compute_preempt_mode) | 238 | u32 compute_preempt_mode) |
242 | { | 239 | { |
243 | struct gr_ctx_desc *gr_ctx = ch->ch_ctx.gr_ctx; | 240 | struct nvgpu_gr_ctx *gr_ctx; |
244 | struct gk20a *g = ch->g; | 241 | struct gk20a *g = ch->g; |
245 | struct tsg_gk20a *tsg; | 242 | struct tsg_gk20a *tsg; |
246 | struct vm_gk20a *vm; | 243 | struct vm_gk20a *vm; |
@@ -251,6 +248,13 @@ int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, | |||
251 | if (!class) | 248 | if (!class) |
252 | return -EINVAL; | 249 | return -EINVAL; |
253 | 250 | ||
251 | tsg = tsg_gk20a_from_ch(ch); | ||
252 | if (!tsg) | ||
253 | return -EINVAL; | ||
254 | |||
255 | vm = tsg->vm; | ||
256 | gr_ctx = &tsg->gr_ctx; | ||
257 | |||
254 | /* skip setting anything if both modes are already set */ | 258 | /* skip setting anything if both modes are already set */ |
255 | if (graphics_preempt_mode && | 259 | if (graphics_preempt_mode && |
256 | (graphics_preempt_mode == gr_ctx->graphics_preempt_mode)) | 260 | (graphics_preempt_mode == gr_ctx->graphics_preempt_mode)) |
@@ -263,13 +267,6 @@ int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch, | |||
263 | if (graphics_preempt_mode == 0 && compute_preempt_mode == 0) | 267 | if (graphics_preempt_mode == 0 && compute_preempt_mode == 0) |
264 | return 0; | 268 | return 0; |
265 | 269 | ||
266 | if (gk20a_is_channel_marked_as_tsg(ch)) { | ||
267 | tsg = &g->fifo.tsg[ch->tsgid]; | ||
268 | vm = tsg->vm; | ||
269 | } else { | ||
270 | vm = ch->vm; | ||
271 | } | ||
272 | |||
273 | if (g->ops.gr.set_ctxsw_preemption_mode) { | 270 | if (g->ops.gr.set_ctxsw_preemption_mode) { |
274 | err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class, | 271 | err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class, |
275 | graphics_preempt_mode, | 272 | graphics_preempt_mode, |