diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/tsg_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | 82 |
1 files changed, 76 insertions, 6 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index 62763da3..624ee1d7 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | |||
@@ -275,8 +275,23 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) | |||
275 | int err; | 275 | int err; |
276 | 276 | ||
277 | tsg = gk20a_tsg_acquire_unused_tsg(&g->fifo); | 277 | tsg = gk20a_tsg_acquire_unused_tsg(&g->fifo); |
278 | if (!tsg) | 278 | if (tsg == NULL) { |
279 | return NULL; | 279 | return NULL; |
280 | } | ||
281 | |||
282 | /* we need to allocate this after g->ops.gr.init_fs_state() since | ||
283 | * we initialize gr->no_of_sm in this function | ||
284 | */ | ||
285 | if (g->gr.no_of_sm == 0U) { | ||
286 | nvgpu_err(g, "no_of_sm %d not set, failed allocation", | ||
287 | g->gr.no_of_sm); | ||
288 | return NULL; | ||
289 | } | ||
290 | |||
291 | err = gk20a_tsg_alloc_sm_error_states_mem(g, tsg, g->gr.no_of_sm); | ||
292 | if (err != 0) { | ||
293 | return NULL; | ||
294 | } | ||
280 | 295 | ||
281 | tsg->g = g; | 296 | tsg->g = g; |
282 | tsg->num_active_channels = 0; | 297 | tsg->num_active_channels = 0; |
@@ -295,7 +310,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) | |||
295 | 310 | ||
296 | if (g->ops.fifo.tsg_open) { | 311 | if (g->ops.fifo.tsg_open) { |
297 | err = g->ops.fifo.tsg_open(tsg); | 312 | err = g->ops.fifo.tsg_open(tsg); |
298 | if (err) { | 313 | if (err != 0) { |
299 | nvgpu_err(g, "tsg %d fifo open failed %d", | 314 | nvgpu_err(g, "tsg %d fifo open failed %d", |
300 | tsg->tsgid, err); | 315 | tsg->tsgid, err); |
301 | goto clean_up; | 316 | goto clean_up; |
@@ -307,6 +322,12 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g, pid_t pid) | |||
307 | return tsg; | 322 | return tsg; |
308 | 323 | ||
309 | clean_up: | 324 | clean_up: |
325 | |||
326 | if(tsg->sm_error_states != NULL) { | ||
327 | nvgpu_kfree(g, tsg->sm_error_states); | ||
328 | tsg->sm_error_states = NULL; | ||
329 | } | ||
330 | |||
310 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); | 331 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
311 | return NULL; | 332 | return NULL; |
312 | } | 333 | } |
@@ -317,20 +338,28 @@ void gk20a_tsg_release(struct nvgpu_ref *ref) | |||
317 | struct gk20a *g = tsg->g; | 338 | struct gk20a *g = tsg->g; |
318 | struct gk20a_event_id_data *event_id_data, *event_id_data_temp; | 339 | struct gk20a_event_id_data *event_id_data, *event_id_data_temp; |
319 | 340 | ||
320 | if (g->ops.fifo.tsg_release) | 341 | if (g->ops.fifo.tsg_release != NULL) { |
321 | g->ops.fifo.tsg_release(tsg); | 342 | g->ops.fifo.tsg_release(tsg); |
343 | } | ||
322 | 344 | ||
323 | if (nvgpu_mem_is_valid(&tsg->gr_ctx.mem)) | 345 | if (nvgpu_mem_is_valid(&tsg->gr_ctx.mem)) { |
324 | gr_gk20a_free_tsg_gr_ctx(tsg); | 346 | gr_gk20a_free_tsg_gr_ctx(tsg); |
347 | } | ||
325 | 348 | ||
326 | if (g->ops.fifo.deinit_eng_method_buffers) | 349 | if (g->ops.fifo.deinit_eng_method_buffers != NULL) { |
327 | g->ops.fifo.deinit_eng_method_buffers(g, tsg); | 350 | g->ops.fifo.deinit_eng_method_buffers(g, tsg); |
351 | } | ||
328 | 352 | ||
329 | if (tsg->vm) { | 353 | if (tsg->vm != NULL) { |
330 | nvgpu_vm_put(tsg->vm); | 354 | nvgpu_vm_put(tsg->vm); |
331 | tsg->vm = NULL; | 355 | tsg->vm = NULL; |
332 | } | 356 | } |
333 | 357 | ||
358 | if(tsg->sm_error_states != NULL) { | ||
359 | nvgpu_kfree(g, tsg->sm_error_states); | ||
360 | tsg->sm_error_states = NULL; | ||
361 | } | ||
362 | |||
334 | /* unhook all events created on this TSG */ | 363 | /* unhook all events created on this TSG */ |
335 | nvgpu_mutex_acquire(&tsg->event_id_list_lock); | 364 | nvgpu_mutex_acquire(&tsg->event_id_list_lock); |
336 | nvgpu_list_for_each_entry_safe(event_id_data, event_id_data_temp, | 365 | nvgpu_list_for_each_entry_safe(event_id_data, event_id_data_temp, |
@@ -360,3 +389,44 @@ struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch) | |||
360 | 389 | ||
361 | return tsg; | 390 | return tsg; |
362 | } | 391 | } |
392 | |||
393 | int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, | ||
394 | struct tsg_gk20a *tsg, | ||
395 | u32 num_sm) | ||
396 | { | ||
397 | int err = 0; | ||
398 | |||
399 | if (tsg->sm_error_states != NULL) { | ||
400 | return err; | ||
401 | } | ||
402 | |||
403 | tsg->sm_error_states = nvgpu_kzalloc(g, | ||
404 | sizeof(struct nvgpu_tsg_sm_error_state) | ||
405 | * num_sm); | ||
406 | if (tsg->sm_error_states == NULL) { | ||
407 | nvgpu_err(g, "sm_error_states mem allocation failed"); | ||
408 | err = -ENOMEM; | ||
409 | } | ||
410 | |||
411 | return err; | ||
412 | } | ||
413 | |||
414 | void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg, | ||
415 | u32 sm_id, | ||
416 | struct nvgpu_tsg_sm_error_state *sm_error_state) | ||
417 | { | ||
418 | struct nvgpu_tsg_sm_error_state *tsg_sm_error_states; | ||
419 | |||
420 | tsg_sm_error_states = tsg->sm_error_states + sm_id; | ||
421 | |||
422 | tsg_sm_error_states->hww_global_esr = | ||
423 | sm_error_state->hww_global_esr; | ||
424 | tsg_sm_error_states->hww_warp_esr = | ||
425 | sm_error_state->hww_warp_esr; | ||
426 | tsg_sm_error_states->hww_warp_esr_pc = | ||
427 | sm_error_state->hww_warp_esr_pc; | ||
428 | tsg_sm_error_states->hww_global_esr_report_mask = | ||
429 | sm_error_state->hww_global_esr_report_mask; | ||
430 | tsg_sm_error_states->hww_warp_esr_report_mask = | ||
431 | sm_error_state->hww_warp_esr_report_mask; | ||
432 | } | ||