summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorRichard Zhao <rizhao@nvidia.com>2016-10-10 14:32:05 -0400
committerDeepak Nibade <dnibade@nvidia.com>2016-12-27 04:56:50 -0500
commitcb78f5aa749fcea198851ae4adf6e3acd47b37ac (patch)
tree02353aa190a39b868149b505e999cda3c1dfc1d1 /drivers
parent9b11fb9b8d5f8d98ae8479d0da455e66a692e6c8 (diff)
gpu: nvgpu: vgpu: add set_preemption_mode
Implement HAL callback set_preemption_mode Bug 200238497 JIRA VFND-2683 Change-Id: I8fca8e1ba112d8782ce18f0899eca38a1d12b512 Signed-off-by: Richard Zhao <rizhao@nvidia.com> Reviewed-on: http://git-master/r/1236976 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c149
1 files changed, 132 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
index 78205afb..4746f04b 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_gr_gp10b.c
@@ -51,31 +51,80 @@ static int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
51 u32 class, 51 u32 class,
52 u32 flags) 52 u32 flags)
53{ 53{
54 struct tegra_vgpu_cmd_msg msg = {0};
55 struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p =
56 &msg.params.gr_bind_ctxsw_buffers;
57 struct gr_ctx_desc *gr_ctx; 54 struct gr_ctx_desc *gr_ctx;
55 u32 graphics_preempt_mode = 0;
56 u32 compute_preempt_mode = 0;
58 int err; 57 int err;
59 58
60 gk20a_dbg_fn(""); 59 gk20a_dbg_fn("");
61 60
62 WARN_ON(TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAX !=
63 TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_LAST);
64
65 err = vgpu_gr_alloc_gr_ctx(g, __gr_ctx, vm, class, flags); 61 err = vgpu_gr_alloc_gr_ctx(g, __gr_ctx, vm, class, flags);
66 if (err) 62 if (err)
67 return err; 63 return err;
68 64
69 gr_ctx = *__gr_ctx; 65 gr_ctx = *__gr_ctx;
70 66
67 if (flags & NVGPU_ALLOC_OBJ_FLAGS_GFXP)
68 graphics_preempt_mode = NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
69 if (flags & NVGPU_ALLOC_OBJ_FLAGS_CILP)
70 compute_preempt_mode = NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
71
72 if (graphics_preempt_mode || compute_preempt_mode) {
73 if (g->ops.gr.set_ctxsw_preemption_mode) {
74 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm,
75 class, graphics_preempt_mode, compute_preempt_mode);
76 if (err) {
77 gk20a_err(dev_from_gk20a(g),
78 "set_ctxsw_preemption_mode failed");
79 goto fail;
80 }
81 } else {
82 err = -ENOSYS;
83 goto fail;
84 }
85 }
86
87 gk20a_dbg_fn("done");
88 return err;
89
90fail:
91 vgpu_gr_gp10b_free_gr_ctx(g, vm, gr_ctx);
92 return err;
93}
94
95static int vgpu_gr_gp10b_set_ctxsw_preemption_mode(struct gk20a *g,
96 struct gr_ctx_desc *gr_ctx,
97 struct vm_gk20a *vm, u32 class,
98 u32 graphics_preempt_mode,
99 u32 compute_preempt_mode)
100{
101 struct tegra_vgpu_cmd_msg msg = {};
102 struct tegra_vgpu_gr_bind_ctxsw_buffers_params *p =
103 &msg.params.gr_bind_ctxsw_buffers;
104 int err = 0;
105
106 WARN_ON(TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_MAX !=
107 TEGRA_VGPU_GR_BIND_CTXSW_BUFFER_LAST);
108
71 if (class == PASCAL_A && g->gr.t18x.ctx_vars.force_preemption_gfxp) 109 if (class == PASCAL_A && g->gr.t18x.ctx_vars.force_preemption_gfxp)
72 flags |= NVGPU_ALLOC_OBJ_FLAGS_GFXP; 110 graphics_preempt_mode = NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
73 111
74 if (class == PASCAL_COMPUTE_A && 112 if (class == PASCAL_COMPUTE_A &&
75 g->gr.t18x.ctx_vars.force_preemption_cilp) 113 g->gr.t18x.ctx_vars.force_preemption_cilp)
76 flags |= NVGPU_ALLOC_OBJ_FLAGS_CILP; 114 compute_preempt_mode = NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
115
116 /* check for invalid combinations */
117 if ((graphics_preempt_mode == 0) && (compute_preempt_mode == 0))
118 return -EINVAL;
119
120 if ((graphics_preempt_mode == NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP) &&
121 (compute_preempt_mode == NVGPU_COMPUTE_PREEMPTION_MODE_CILP))
122 return -EINVAL;
77 123
78 if (flags & NVGPU_ALLOC_OBJ_FLAGS_GFXP) { 124 /* set preemption modes */
125 switch (graphics_preempt_mode) {
126 case NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP:
127 {
79 u32 spill_size = 128 u32 spill_size =
80 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() * 129 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v() *
81 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(); 130 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v();
@@ -146,15 +195,37 @@ static int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
146 195
147 gr_ctx->graphics_preempt_mode = NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP; 196 gr_ctx->graphics_preempt_mode = NVGPU_GRAPHICS_PREEMPTION_MODE_GFXP;
148 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP; 197 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_GFX_GFXP;
198 break;
199 }
200 case NVGPU_GRAPHICS_PREEMPTION_MODE_WFI:
201 gr_ctx->graphics_preempt_mode = graphics_preempt_mode;
202 break;
203
204 default:
205 break;
149 } 206 }
150 207
151 if (class == PASCAL_COMPUTE_A) { 208 if (class == PASCAL_COMPUTE_A) {
152 if (flags & NVGPU_ALLOC_OBJ_FLAGS_CILP) { 209 switch (compute_preempt_mode) {
153 gr_ctx->compute_preempt_mode = NVGPU_COMPUTE_PREEMPTION_MODE_CILP; 210 case NVGPU_COMPUTE_PREEMPTION_MODE_WFI:
154 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP; 211 gr_ctx->compute_preempt_mode =
155 } else { 212 NVGPU_COMPUTE_PREEMPTION_MODE_WFI;
156 gr_ctx->compute_preempt_mode = NVGPU_COMPUTE_PREEMPTION_MODE_CTA; 213 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_WFI;
157 p->mode = TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA; 214 break;
215 case NVGPU_COMPUTE_PREEMPTION_MODE_CTA:
216 gr_ctx->compute_preempt_mode =
217 NVGPU_COMPUTE_PREEMPTION_MODE_CTA;
218 p->mode =
219 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CTA;
220 break;
221 case NVGPU_COMPUTE_PREEMPTION_MODE_CILP:
222 gr_ctx->compute_preempt_mode =
223 NVGPU_COMPUTE_PREEMPTION_MODE_CILP;
224 p->mode =
225 TEGRA_VGPU_GR_CTXSW_PREEMPTION_MODE_COMPUTE_CILP;
226 break;
227 default:
228 break;
158 } 229 }
159 } 230 }
160 231
@@ -169,11 +240,52 @@ static int vgpu_gr_gp10b_alloc_gr_ctx(struct gk20a *g,
169 } 240 }
170 } 241 }
171 242
172 gk20a_dbg_fn("done");
173 return err; 243 return err;
174 244
175fail: 245fail:
176 vgpu_gr_gp10b_free_gr_ctx(g, vm, gr_ctx); 246 gk20a_err(dev_from_gk20a(g), "%s failed %d", __func__, err);
247 return err;
248}
249
250static int vgpu_gr_gp10b_set_preemption_mode(struct channel_gk20a *ch,
251 u32 graphics_preempt_mode,
252 u32 compute_preempt_mode)
253{
254 struct gr_ctx_desc *gr_ctx = ch->ch_ctx.gr_ctx;
255 struct gk20a *g = ch->g;
256 struct tsg_gk20a *tsg;
257 struct vm_gk20a *vm;
258 u32 class;
259 int err;
260
261 class = ch->obj_class;
262 if (!class)
263 return -EINVAL;
264
265 /* preemption already set ? */
266 if (gr_ctx->graphics_preempt_mode || gr_ctx->compute_preempt_mode)
267 return -EINVAL;
268
269 if (gk20a_is_channel_marked_as_tsg(ch)) {
270 tsg = &g->fifo.tsg[ch->tsgid];
271 vm = tsg->vm;
272 } else {
273 vm = ch->vm;
274 }
275
276 if (g->ops.gr.set_ctxsw_preemption_mode) {
277 err = g->ops.gr.set_ctxsw_preemption_mode(g, gr_ctx, vm, class,
278 graphics_preempt_mode,
279 compute_preempt_mode);
280 if (err) {
281 gk20a_err(dev_from_gk20a(g),
282 "set_ctxsw_preemption_mode failed");
283 return err;
284 }
285 } else {
286 err = -ENOSYS;
287 }
288
177 return err; 289 return err;
178} 290}
179 291
@@ -202,4 +314,7 @@ void vgpu_gp10b_init_gr_ops(struct gpu_ops *gops)
202 gops->gr.alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx; 314 gops->gr.alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx;
203 gops->gr.free_gr_ctx = vgpu_gr_gp10b_free_gr_ctx; 315 gops->gr.free_gr_ctx = vgpu_gr_gp10b_free_gr_ctx;
204 gops->gr.init_ctx_state = vgpu_gr_gp10b_init_ctx_state; 316 gops->gr.init_ctx_state = vgpu_gr_gp10b_init_ctx_state;
317 gops->gr.set_preemption_mode = vgpu_gr_gp10b_set_preemption_mode;
318 gops->gr.set_ctxsw_preemption_mode =
319 vgpu_gr_gp10b_set_ctxsw_preemption_mode;
205} 320}