diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/fifo/tsg.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/fifo/tsg.c | 441 |
1 files changed, 441 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/fifo/tsg.c b/drivers/gpu/nvgpu/common/fifo/tsg.c new file mode 100644 index 00000000..0892e8bf --- /dev/null +++ b/drivers/gpu/nvgpu/common/fifo/tsg.c | |||
@@ -0,0 +1,441 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <nvgpu/kmem.h> | ||
24 | #include <nvgpu/log.h> | ||
25 | #include <nvgpu/os_sched.h> | ||
26 | #include <nvgpu/channel.h> | ||
27 | #include <nvgpu/tsg.h> | ||
28 | #include <nvgpu/gk20a.h> | ||
29 | |||
30 | bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch) | ||
31 | { | ||
32 | return !(ch->tsgid == NVGPU_INVALID_TSG_ID); | ||
33 | } | ||
34 | |||
35 | int gk20a_enable_tsg(struct tsg_gk20a *tsg) | ||
36 | { | ||
37 | struct gk20a *g = tsg->g; | ||
38 | struct channel_gk20a *ch; | ||
39 | bool is_next, is_ctx_reload; | ||
40 | |||
41 | gk20a_fifo_disable_tsg_sched(g, tsg); | ||
42 | |||
43 | /* | ||
44 | * Due to h/w bug that exists in Maxwell and Pascal, | ||
45 | * we first need to enable all channels with NEXT and CTX_RELOAD set, | ||
46 | * and then rest of the channels should be enabled | ||
47 | */ | ||
48 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
49 | nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { | ||
50 | is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); | ||
51 | is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); | ||
52 | |||
53 | if (is_next || is_ctx_reload) { | ||
54 | g->ops.fifo.enable_channel(ch); | ||
55 | } | ||
56 | } | ||
57 | |||
58 | nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { | ||
59 | is_next = gk20a_fifo_channel_status_is_next(g, ch->chid); | ||
60 | is_ctx_reload = gk20a_fifo_channel_status_is_ctx_reload(g, ch->chid); | ||
61 | |||
62 | if (is_next || is_ctx_reload) { | ||
63 | continue; | ||
64 | } | ||
65 | |||
66 | g->ops.fifo.enable_channel(ch); | ||
67 | } | ||
68 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
69 | |||
70 | gk20a_fifo_enable_tsg_sched(g, tsg); | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | int gk20a_disable_tsg(struct tsg_gk20a *tsg) | ||
76 | { | ||
77 | struct gk20a *g = tsg->g; | ||
78 | struct channel_gk20a *ch; | ||
79 | |||
80 | nvgpu_rwsem_down_read(&tsg->ch_list_lock); | ||
81 | nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { | ||
82 | g->ops.fifo.disable_channel(ch); | ||
83 | } | ||
84 | nvgpu_rwsem_up_read(&tsg->ch_list_lock); | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static bool gk20a_is_channel_active(struct gk20a *g, struct channel_gk20a *ch) | ||
90 | { | ||
91 | struct fifo_gk20a *f = &g->fifo; | ||
92 | struct fifo_runlist_info_gk20a *runlist; | ||
93 | unsigned int i; | ||
94 | |||
95 | for (i = 0; i < f->max_runlists; ++i) { | ||
96 | runlist = &f->runlist_info[i]; | ||
97 | if (test_bit(ch->chid, runlist->active_channels)) { | ||
98 | return true; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | return false; | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * API to mark channel as part of TSG | ||
107 | * | ||
108 | * Note that channel is not runnable when we bind it to TSG | ||
109 | */ | ||
110 | int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, | ||
111 | struct channel_gk20a *ch) | ||
112 | { | ||
113 | struct gk20a *g = ch->g; | ||
114 | |||
115 | nvgpu_log_fn(g, " "); | ||
116 | |||
117 | /* check if channel is already bound to some TSG */ | ||
118 | if (gk20a_is_channel_marked_as_tsg(ch)) { | ||
119 | return -EINVAL; | ||
120 | } | ||
121 | |||
122 | /* channel cannot be bound to TSG if it is already active */ | ||
123 | if (gk20a_is_channel_active(tsg->g, ch)) { | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | ch->tsgid = tsg->tsgid; | ||
128 | |||
129 | /* all the channel part of TSG should need to be same runlist_id */ | ||
130 | if (tsg->runlist_id == FIFO_INVAL_TSG_ID) { | ||
131 | tsg->runlist_id = ch->runlist_id; | ||
132 | } else if (tsg->runlist_id != ch->runlist_id) { | ||
133 | nvgpu_err(tsg->g, | ||
134 | "Error: TSG channel should be share same runlist ch[%d] tsg[%d]", | ||
135 | ch->runlist_id, tsg->runlist_id); | ||
136 | return -EINVAL; | ||
137 | } | ||
138 | |||
139 | nvgpu_rwsem_down_write(&tsg->ch_list_lock); | ||
140 | nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list); | ||
141 | nvgpu_rwsem_up_write(&tsg->ch_list_lock); | ||
142 | |||
143 | nvgpu_ref_get(&tsg->refcount); | ||
144 | |||
145 | nvgpu_log(g, gpu_dbg_fn, "BIND tsg:%d channel:%d\n", | ||
146 | tsg->tsgid, ch->chid); | ||
147 | |||
148 | nvgpu_log_fn(g, "done"); | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) | ||
153 | { | ||
154 | struct gk20a *g = ch->g; | ||
155 | struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; | ||
156 | int err; | ||
157 | |||
158 | err = g->ops.fifo.tsg_unbind_channel(ch); | ||
159 | if (err) { | ||
160 | nvgpu_err(g, "Channel %d unbind failed, tearing down TSG %d", | ||
161 | ch->chid, tsg->tsgid); | ||
162 | |||
163 | gk20a_fifo_abort_tsg(ch->g, ch->tsgid, true); | ||
164 | /* If channel unbind fails, channel is still part of runlist */ | ||
165 | channel_gk20a_update_runlist(ch, false); | ||
166 | |||
167 | nvgpu_rwsem_down_write(&tsg->ch_list_lock); | ||
168 | nvgpu_list_del(&ch->ch_entry); | ||
169 | nvgpu_rwsem_up_write(&tsg->ch_list_lock); | ||
170 | } | ||
171 | |||
172 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); | ||
173 | ch->tsgid = NVGPU_INVALID_TSG_ID; | ||
174 | |||
175 | nvgpu_log(g, gpu_dbg_fn, "UNBIND tsg:%d channel:%d\n", | ||
176 | tsg->tsgid, ch->chid); | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid) | ||
182 | { | ||
183 | struct tsg_gk20a *tsg = NULL; | ||
184 | int err; | ||
185 | |||
186 | if (tsgid >= g->fifo.num_channels) { | ||
187 | return -EINVAL; | ||
188 | } | ||
189 | |||
190 | tsg = &g->fifo.tsg[tsgid]; | ||
191 | |||
192 | tsg->in_use = false; | ||
193 | tsg->tsgid = tsgid; | ||
194 | |||
195 | nvgpu_init_list_node(&tsg->ch_list); | ||
196 | nvgpu_rwsem_init(&tsg->ch_list_lock); | ||
197 | |||
198 | nvgpu_init_list_node(&tsg->event_id_list); | ||
199 | err = nvgpu_mutex_init(&tsg->event_id_list_lock); | ||
200 | if (err) { | ||
201 | tsg->in_use = true; /* make this TSG unusable */ | ||
202 | return err; | ||
203 | } | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | int gk20a_tsg_set_runlist_interleave(struct tsg_gk20a *tsg, u32 level) | ||
209 | { | ||
210 | struct gk20a *g = tsg->g; | ||
211 | int ret; | ||
212 | |||
213 | nvgpu_log(g, gpu_dbg_sched, "tsgid=%u interleave=%u", tsg->tsgid, level); | ||
214 | |||
215 | switch (level) { | ||
216 | case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW: | ||
217 | case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_MEDIUM: | ||
218 | case NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_HIGH: | ||
219 | ret = g->ops.fifo.set_runlist_interleave(g, tsg->tsgid, | ||
220 | 0, level); | ||
221 | if (!ret) { | ||
222 | tsg->interleave_level = level; | ||
223 | } | ||
224 | break; | ||
225 | default: | ||
226 | ret = -EINVAL; | ||
227 | break; | ||
228 | } | ||
229 | |||
230 | return ret ? ret : g->ops.fifo.update_runlist(g, tsg->runlist_id, ~0, true, true); | ||
231 | } | ||
232 | |||
233 | int gk20a_tsg_set_timeslice(struct tsg_gk20a *tsg, u32 timeslice) | ||
234 | { | ||
235 | struct gk20a *g = tsg->g; | ||
236 | |||
237 | nvgpu_log(g, gpu_dbg_sched, "tsgid=%u timeslice=%u us", tsg->tsgid, timeslice); | ||
238 | |||
239 | return g->ops.fifo.tsg_set_timeslice(tsg, timeslice); | ||
240 | } | ||
241 | |||
242 | u32 gk20a_tsg_get_timeslice(struct tsg_gk20a *tsg) | ||
243 | { | ||
244 | struct gk20a *g = tsg->g; | ||
245 | |||
246 | if (!tsg->timeslice_us) { | ||
247 | return g->ops.fifo.default_timeslice_us(g); | ||
248 | } | ||
249 | |||
250 | return tsg->timeslice_us; | ||
251 | } | ||
252 | |||
253 | static void release_used_tsg(struct fifo_gk20a *f, struct tsg_gk20a *tsg) | ||
254 | { | ||
255 | nvgpu_mutex_acquire(&f->tsg_inuse_mutex); | ||
256 | f->tsg[tsg->tsgid].in_use = false; | ||
257 | nvgpu_mutex_release(&f->tsg_inuse_mutex); | ||
258 | } | ||
259 | |||
260 | static struct tsg_gk20a *gk20a_tsg_acquire_unused_tsg(struct fifo_gk20a *f) | ||
261 | { | ||
262 | struct tsg_gk20a *tsg = NULL; | ||
263 | unsigned int tsgid; | ||
264 | |||
265 | nvgpu_mutex_acquire(&f->tsg_inuse_mutex); | ||
266 | for (tsgid = 0; tsgid < f->num_channels; tsgid++) { | ||
267 | if (!f->tsg[tsgid].in_use) { | ||
268 | f->tsg[tsgid].in_use = true; | ||
269 | tsg = &f->tsg[tsgid]; | ||
270 | break; | ||
271 | } | ||
272 | } | ||
273 | nvgpu_mutex_release(&f->tsg_inuse_mutex); | ||
274 | |||
275 | return tsg; | ||
276 | } | ||
277 | |||
278 | struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) | ||
279 | { | ||
280 | struct tsg_gk20a *tsg; | ||
281 | int err; | ||
282 | |||
283 | tsg = gk20a_tsg_acquire_unused_tsg(&g->fifo); | ||
284 | if (tsg == NULL) { | ||
285 | return NULL; | ||
286 | } | ||
287 | |||
288 | /* we need to allocate this after g->ops.gr.init_fs_state() since | ||
289 | * we initialize gr->no_of_sm in this function | ||
290 | */ | ||
291 | if (g->gr.no_of_sm == 0U) { | ||
292 | nvgpu_err(g, "no_of_sm %d not set, failed allocation", | ||
293 | g->gr.no_of_sm); | ||
294 | return NULL; | ||
295 | } | ||
296 | |||
297 | err = gk20a_tsg_alloc_sm_error_states_mem(g, tsg, g->gr.no_of_sm); | ||
298 | if (err != 0) { | ||
299 | return NULL; | ||
300 | } | ||
301 | |||
302 | tsg->g = g; | ||
303 | tsg->num_active_channels = 0; | ||
304 | nvgpu_ref_init(&tsg->refcount); | ||
305 | |||
306 | tsg->vm = NULL; | ||
307 | tsg->interleave_level = NVGPU_FIFO_RUNLIST_INTERLEAVE_LEVEL_LOW; | ||
308 | tsg->timeslice_us = 0; | ||
309 | tsg->timeslice_timeout = 0; | ||
310 | tsg->timeslice_scale = 0; | ||
311 | tsg->runlist_id = ~0; | ||
312 | tsg->tgid = pid; | ||
313 | tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; | ||
314 | |||
315 | if (g->ops.fifo.init_eng_method_buffers) { | ||
316 | g->ops.fifo.init_eng_method_buffers(g, tsg); | ||
317 | } | ||
318 | |||
319 | if (g->ops.fifo.tsg_open) { | ||
320 | err = g->ops.fifo.tsg_open(tsg); | ||
321 | if (err != 0) { | ||
322 | nvgpu_err(g, "tsg %d fifo open failed %d", | ||
323 | tsg->tsgid, err); | ||
324 | goto clean_up; | ||
325 | } | ||
326 | } | ||
327 | |||
328 | nvgpu_log(g, gpu_dbg_fn, "tsg opened %d\n", tsg->tsgid); | ||
329 | |||
330 | return tsg; | ||
331 | |||
332 | clean_up: | ||
333 | |||
334 | if(tsg->sm_error_states != NULL) { | ||
335 | nvgpu_kfree(g, tsg->sm_error_states); | ||
336 | tsg->sm_error_states = NULL; | ||
337 | } | ||
338 | |||
339 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); | ||
340 | return NULL; | ||
341 | } | ||
342 | |||
343 | void gk20a_tsg_release(struct nvgpu_ref *ref) | ||
344 | { | ||
345 | struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount); | ||
346 | struct gk20a *g = tsg->g; | ||
347 | struct gk20a_event_id_data *event_id_data, *event_id_data_temp; | ||
348 | |||
349 | if (g->ops.fifo.tsg_release != NULL) { | ||
350 | g->ops.fifo.tsg_release(tsg); | ||
351 | } | ||
352 | |||
353 | if (nvgpu_mem_is_valid(&tsg->gr_ctx.mem)) { | ||
354 | gr_gk20a_free_tsg_gr_ctx(tsg); | ||
355 | } | ||
356 | |||
357 | if (g->ops.fifo.deinit_eng_method_buffers != NULL) { | ||
358 | g->ops.fifo.deinit_eng_method_buffers(g, tsg); | ||
359 | } | ||
360 | |||
361 | if (tsg->vm != NULL) { | ||
362 | nvgpu_vm_put(tsg->vm); | ||
363 | tsg->vm = NULL; | ||
364 | } | ||
365 | |||
366 | if(tsg->sm_error_states != NULL) { | ||
367 | nvgpu_kfree(g, tsg->sm_error_states); | ||
368 | tsg->sm_error_states = NULL; | ||
369 | } | ||
370 | |||
371 | /* unhook all events created on this TSG */ | ||
372 | nvgpu_mutex_acquire(&tsg->event_id_list_lock); | ||
373 | nvgpu_list_for_each_entry_safe(event_id_data, event_id_data_temp, | ||
374 | &tsg->event_id_list, | ||
375 | gk20a_event_id_data, | ||
376 | event_id_node) { | ||
377 | nvgpu_list_del(&event_id_data->event_id_node); | ||
378 | } | ||
379 | nvgpu_mutex_release(&tsg->event_id_list_lock); | ||
380 | |||
381 | release_used_tsg(&g->fifo, tsg); | ||
382 | |||
383 | tsg->runlist_id = ~0; | ||
384 | tsg->sm_exception_mask_type = NVGPU_SM_EXCEPTION_TYPE_MASK_NONE; | ||
385 | |||
386 | nvgpu_log(g, gpu_dbg_fn, "tsg released %d\n", tsg->tsgid); | ||
387 | } | ||
388 | |||
389 | struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) | ||
390 | { | ||
391 | struct tsg_gk20a *tsg = NULL; | ||
392 | |||
393 | if (gk20a_is_channel_marked_as_tsg(ch)) { | ||
394 | struct gk20a *g = ch->g; | ||
395 | struct fifo_gk20a *f = &g->fifo; | ||
396 | tsg = &f->tsg[ch->tsgid]; | ||
397 | } | ||
398 | |||
399 | return tsg; | ||
400 | } | ||
401 | |||
402 | int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, | ||
403 | struct tsg_gk20a *tsg, | ||
404 | u32 num_sm) | ||
405 | { | ||
406 | int err = 0; | ||
407 | |||
408 | if (tsg->sm_error_states != NULL) { | ||
409 | return err; | ||
410 | } | ||
411 | |||
412 | tsg->sm_error_states = nvgpu_kzalloc(g, | ||
413 | sizeof(struct nvgpu_tsg_sm_error_state) | ||
414 | * num_sm); | ||
415 | if (tsg->sm_error_states == NULL) { | ||
416 | nvgpu_err(g, "sm_error_states mem allocation failed"); | ||
417 | err = -ENOMEM; | ||
418 | } | ||
419 | |||
420 | return err; | ||
421 | } | ||
422 | |||
423 | void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg, | ||
424 | u32 sm_id, | ||
425 | struct nvgpu_tsg_sm_error_state *sm_error_state) | ||
426 | { | ||
427 | struct nvgpu_tsg_sm_error_state *tsg_sm_error_states; | ||
428 | |||
429 | tsg_sm_error_states = tsg->sm_error_states + sm_id; | ||
430 | |||
431 | tsg_sm_error_states->hww_global_esr = | ||
432 | sm_error_state->hww_global_esr; | ||
433 | tsg_sm_error_states->hww_warp_esr = | ||
434 | sm_error_state->hww_warp_esr; | ||
435 | tsg_sm_error_states->hww_warp_esr_pc = | ||
436 | sm_error_state->hww_warp_esr_pc; | ||
437 | tsg_sm_error_states->hww_global_esr_report_mask = | ||
438 | sm_error_state->hww_global_esr_report_mask; | ||
439 | tsg_sm_error_states->hww_warp_esr_report_mask = | ||
440 | sm_error_state->hww_warp_esr_report_mask; | ||
441 | } | ||