diff options
author | Seema Khowala <seemaj@nvidia.com> | 2018-01-23 15:16:40 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-01-31 16:23:30 -0500 |
commit | 791ce6bd5480a8393c12be55e8afa459cb4dd1ff (patch) | |
tree | c34ed1f076bec31bfc5b87a7fa490eb28a2789d6 /drivers/gpu/nvgpu/gk20a | |
parent | 9beefc45516097db2eabf2887ff66d3334ff9fde (diff) |
gpu: nvgpu: gv11b: enable more gr exceptions
-pd, scc, ds, ssync, mme and sked exceptions are
enabled. This will be useful for debugging
-Handle enabled interrupts
-Add gr ops to handle ssync hww. For legacy
chips, ssync hww_esr register is gpcs_ppcs_ssync_hww_esr.
Since ssync hww is not enabled on legacy chips, added
ssync hww exception handling for volta only.
Change-Id: I63ba2eb51fa82e74832df26ee4cf3546458e5669
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1644751
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 1 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 78 |
2 files changed, 61 insertions, 18 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 3bc10109..5e46344a 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -442,6 +442,7 @@ struct gpu_ops { | |||
442 | void (*dump_ctxsw_stats)(struct gk20a *g, struct vm_gk20a *vm, | 442 | void (*dump_ctxsw_stats)(struct gk20a *g, struct vm_gk20a *vm, |
443 | struct nvgpu_gr_ctx *gr_ctx); | 443 | struct nvgpu_gr_ctx *gr_ctx); |
444 | void (*fecs_host_int_enable)(struct gk20a *g); | 444 | void (*fecs_host_int_enable)(struct gk20a *g); |
445 | int (*handle_ssync_hww)(struct gk20a *g); | ||
445 | } gr; | 446 | } gr; |
446 | struct { | 447 | struct { |
447 | void (*init_hw)(struct gk20a *g); | 448 | void (*init_hw)(struct gk20a *g); |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index c7b00500..fb02bb81 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -5895,7 +5895,10 @@ int gk20a_gr_isr(struct gk20a *g) | |||
5895 | 5895 | ||
5896 | if (exception & gr_exception_fe_m()) { | 5896 | if (exception & gr_exception_fe_m()) { |
5897 | u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); | 5897 | u32 fe = gk20a_readl(g, gr_fe_hww_esr_r()); |
5898 | nvgpu_err(g, "fe warning %08x", fe); | 5898 | u32 info = gk20a_readl(g, gr_fe_hww_esr_info_r()); |
5899 | |||
5900 | nvgpu_err(g, "fe exception: esr 0x%08x, info 0x%08x", | ||
5901 | fe, info); | ||
5899 | gk20a_writel(g, gr_fe_hww_esr_r(), | 5902 | gk20a_writel(g, gr_fe_hww_esr_r(), |
5900 | gr_fe_hww_esr_reset_active_f()); | 5903 | gr_fe_hww_esr_reset_active_f()); |
5901 | need_reset |= -EFAULT; | 5904 | need_reset |= -EFAULT; |
@@ -5903,12 +5906,67 @@ int gk20a_gr_isr(struct gk20a *g) | |||
5903 | 5906 | ||
5904 | if (exception & gr_exception_memfmt_m()) { | 5907 | if (exception & gr_exception_memfmt_m()) { |
5905 | u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r()); | 5908 | u32 memfmt = gk20a_readl(g, gr_memfmt_hww_esr_r()); |
5906 | nvgpu_err(g, "memfmt exception %08x", memfmt); | 5909 | |
5910 | nvgpu_err(g, "memfmt exception: esr %08x", memfmt); | ||
5907 | gk20a_writel(g, gr_memfmt_hww_esr_r(), | 5911 | gk20a_writel(g, gr_memfmt_hww_esr_r(), |
5908 | gr_memfmt_hww_esr_reset_active_f()); | 5912 | gr_memfmt_hww_esr_reset_active_f()); |
5909 | need_reset |= -EFAULT; | 5913 | need_reset |= -EFAULT; |
5910 | } | 5914 | } |
5911 | 5915 | ||
5916 | if (exception & gr_exception_pd_m()) { | ||
5917 | u32 pd = gk20a_readl(g, gr_pd_hww_esr_r()); | ||
5918 | |||
5919 | nvgpu_err(g, "pd exception: esr 0x%08x", pd); | ||
5920 | gk20a_writel(g, gr_pd_hww_esr_r(), | ||
5921 | gr_pd_hww_esr_reset_active_f()); | ||
5922 | need_reset |= -EFAULT; | ||
5923 | } | ||
5924 | |||
5925 | if (exception & gr_exception_scc_m()) { | ||
5926 | u32 scc = gk20a_readl(g, gr_scc_hww_esr_r()); | ||
5927 | |||
5928 | nvgpu_err(g, "scc exception: esr 0x%08x", scc); | ||
5929 | gk20a_writel(g, gr_scc_hww_esr_r(), | ||
5930 | gr_scc_hww_esr_reset_active_f()); | ||
5931 | need_reset |= -EFAULT; | ||
5932 | } | ||
5933 | |||
5934 | if (exception & gr_exception_ds_m()) { | ||
5935 | u32 ds = gk20a_readl(g, gr_ds_hww_esr_r()); | ||
5936 | |||
5937 | nvgpu_err(g, "ds exception: esr: 0x%08x", ds); | ||
5938 | gk20a_writel(g, gr_ds_hww_esr_r(), | ||
5939 | gr_ds_hww_esr_reset_task_f()); | ||
5940 | need_reset |= -EFAULT; | ||
5941 | } | ||
5942 | |||
5943 | if (exception & gr_exception_ssync_m()) { | ||
5944 | if (g->ops.gr.handle_ssync_hww) | ||
5945 | need_reset |= g->ops.gr.handle_ssync_hww(g); | ||
5946 | else | ||
5947 | nvgpu_err(g, "unhandled ssync exception"); | ||
5948 | } | ||
5949 | |||
5950 | if (exception & gr_exception_mme_m()) { | ||
5951 | u32 mme = gk20a_readl(g, gr_mme_hww_esr_r()); | ||
5952 | u32 info = gk20a_readl(g, gr_mme_hww_esr_info_r()); | ||
5953 | |||
5954 | nvgpu_err(g, "mme exception: esr 0x%08x info:0x%08x", | ||
5955 | mme, info); | ||
5956 | gk20a_writel(g, gr_mme_hww_esr_r(), | ||
5957 | gr_mme_hww_esr_reset_active_f()); | ||
5958 | need_reset |= -EFAULT; | ||
5959 | } | ||
5960 | |||
5961 | if (exception & gr_exception_sked_m()) { | ||
5962 | u32 sked = gk20a_readl(g, gr_sked_hww_esr_r()); | ||
5963 | |||
5964 | nvgpu_err(g, "sked exception: esr 0x%08x", sked); | ||
5965 | gk20a_writel(g, gr_sked_hww_esr_r(), | ||
5966 | gr_sked_hww_esr_reset_active_f()); | ||
5967 | need_reset |= -EFAULT; | ||
5968 | } | ||
5969 | |||
5912 | /* check if a gpc exception has occurred */ | 5970 | /* check if a gpc exception has occurred */ |
5913 | if (exception & gr_exception_gpc_m() && need_reset == 0) { | 5971 | if (exception & gr_exception_gpc_m() && need_reset == 0) { |
5914 | bool post_event = false; | 5972 | bool post_event = false; |
@@ -5931,22 +5989,6 @@ int gk20a_gr_isr(struct gk20a *g) | |||
5931 | } | 5989 | } |
5932 | } | 5990 | } |
5933 | 5991 | ||
5934 | if (exception & gr_exception_ds_m()) { | ||
5935 | u32 ds = gk20a_readl(g, gr_ds_hww_esr_r()); | ||
5936 | nvgpu_err(g, "ds exception %08x", ds); | ||
5937 | gk20a_writel(g, gr_ds_hww_esr_r(), | ||
5938 | gr_ds_hww_esr_reset_task_f()); | ||
5939 | need_reset |= -EFAULT; | ||
5940 | } | ||
5941 | |||
5942 | if (exception & gr_exception_sked_m()) { | ||
5943 | u32 sked = gk20a_readl(g, gr_sked_hww_esr_r()); | ||
5944 | |||
5945 | nvgpu_err(g, "sked exception %08x", sked); | ||
5946 | gk20a_writel(g, gr_sked_hww_esr_r(), | ||
5947 | gr_sked_hww_esr_reset_active_f()); | ||
5948 | } | ||
5949 | |||
5950 | gk20a_writel(g, gr_intr_r(), gr_intr_exception_reset_f()); | 5992 | gk20a_writel(g, gr_intr_r(), gr_intr_exception_reset_f()); |
5951 | gr_intr &= ~gr_intr_exception_pending_f(); | 5993 | gr_intr &= ~gr_intr_exception_pending_f(); |
5952 | 5994 | ||