From c5810a670d367ae1dc405fcc3108e11265df34bb Mon Sep 17 00:00:00 2001 From: aalex Date: Fri, 7 Sep 2018 22:08:05 +0530 Subject: gpu: nvgpu: refactor SET_SM_EXCEPTION_MASK ioctl added hal layer for SM exception mask handling for taking care of vitualization case. Jira VQRM-4806 Bug 200447406 Bug 2331747 Change-Id: Ia44778a2e41c1a508c48026b8dee285966f1a544 Signed-off-by: aalex Reviewed-on: https://git-master.nvidia.com/r/1816284 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | 24 ++++++++++++++++++++++++ drivers/gpu/nvgpu/gk20a/tsg_gk20a.h | 3 +++ 2 files changed, 27 insertions(+) (limited to 'drivers/gpu/nvgpu/gk20a') diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index 885ce172..43ee8d7c 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c @@ -367,6 +367,7 @@ void gk20a_tsg_release(struct nvgpu_ref *ref) if(tsg->sm_error_states != NULL) { nvgpu_kfree(g, tsg->sm_error_states); tsg->sm_error_states = NULL; + nvgpu_mutex_destroy(&tsg->sm_exception_mask_lock); } /* unhook all events created on this TSG */ @@ -407,6 +408,11 @@ int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, int err = 0; if (tsg->sm_error_states != NULL) { + return -EINVAL; + } + + err = nvgpu_mutex_init(&tsg->sm_exception_mask_lock); + if (err) { return err; } @@ -415,6 +421,7 @@ int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, * num_sm); if (tsg->sm_error_states == NULL) { nvgpu_err(g, "sm_error_states mem allocation failed"); + nvgpu_mutex_destroy(&tsg->sm_exception_mask_lock); err = -ENOMEM; } @@ -440,3 +447,20 @@ void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg, tsg_sm_error_states->hww_warp_esr_report_mask = sm_error_state->hww_warp_esr_report_mask; } + +int gk20a_tsg_set_sm_exception_type_mask(struct channel_gk20a *ch, + u32 exception_mask) +{ + struct tsg_gk20a *tsg; + + tsg = tsg_gk20a_from_ch(ch); + if (!tsg) { + return -EINVAL; + } + + nvgpu_mutex_acquire(&tsg->sm_exception_mask_lock); + tsg->sm_exception_mask_type = exception_mask; + nvgpu_mutex_release(&tsg->sm_exception_mask_lock); + + return 0; +} diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h index 1e3be553..d13cd388 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h @@ -82,6 +82,7 @@ struct tsg_gk20a { #define NVGPU_SM_EXCEPTION_TYPE_MASK_NONE (0x0U) #define NVGPU_SM_EXCEPTION_TYPE_MASK_FATAL (0x1U << 0) u32 sm_exception_mask_type; + struct nvgpu_mutex sm_exception_mask_lock; }; int gk20a_enable_tsg(struct tsg_gk20a *tsg); @@ -103,6 +104,8 @@ int gk20a_tsg_alloc_sm_error_states_mem(struct gk20a *g, void gk20a_tsg_update_sm_error_state_locked(struct tsg_gk20a *tsg, u32 sm_id, struct nvgpu_tsg_sm_error_state *sm_error_state); +int gk20a_tsg_set_sm_exception_type_mask(struct channel_gk20a *ch, + u32 exception_mask); struct gk20a_event_id_data { struct gk20a *g; -- cgit v1.2.2