summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu
diff options
context:
space:
mode:
authorsujeet baranwal <sbaranwal@nvidia.com>2014-09-30 13:54:57 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:11:57 -0400
commit5febd08ae76cbd4042e53ad70f062cd491b7e8b6 (patch)
tree3815d03eaab78e1d249a8fafeec32a312ddc9de7 /drivers/gpu/nvgpu
parent3d313d06570dcb28bba73247a2c0fc52bec56af0 (diff)
gpu: kernel support for suspending/resuming SMs
Kernel support for allowing a GPU debugger to suspend and resume SMs. Invocation of "suspend" on a given channel will suspend all SMs if the channel is resident, else remove the channel form the runlist. Similarly, "resume" will either resume all SMs if the channel was resident, or re-enable the channel in the runlist. Change-Id: I3b4ae21dc1b91c1059c828ec6db8125f8a0ce194 Signed-off-by: sujeet baranwal <sbaranwal@nvidia.com> Signed-off-by: Mayank Kaushik <mkaushik@nvidia.com> Reviewed-on: http://git-master/r/552115 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c8
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h2
-rw-r--r--drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c73
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c187
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.h9
-rw-r--r--drivers/gpu/nvgpu/gk20a/hw_gr_gk20a.h62
6 files changed, 284 insertions, 57 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 9f2e0017..f554cf77 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -396,6 +396,14 @@ static int channel_gk20a_update_runlist(struct channel_gk20a *c, bool add)
396 return c->g->ops.fifo.update_runlist(c->g, 0, c->hw_chid, add, true); 396 return c->g->ops.fifo.update_runlist(c->g, 0, c->hw_chid, add, true);
397} 397}
398 398
399void channel_gk20a_enable(struct channel_gk20a *ch)
400{
401 /* enable channel */
402 gk20a_writel(ch->g, ccsr_channel_r(ch->hw_chid),
403 gk20a_readl(ch->g, ccsr_channel_r(ch->hw_chid)) |
404 ccsr_channel_enable_set_true_f());
405}
406
399void channel_gk20a_disable(struct channel_gk20a *ch) 407void channel_gk20a_disable(struct channel_gk20a *ch)
400{ 408{
401 /* disable channel */ 409 /* disable channel */
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index aa87464b..a028b6f3 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -223,5 +223,5 @@ int channel_gk20a_alloc_inst(struct gk20a *g, struct channel_gk20a *ch);
223void channel_gk20a_free_inst(struct gk20a *g, struct channel_gk20a *ch); 223void channel_gk20a_free_inst(struct gk20a *g, struct channel_gk20a *ch);
224int channel_gk20a_setup_ramfc(struct channel_gk20a *c, 224int channel_gk20a_setup_ramfc(struct channel_gk20a *c,
225 u64 gpfifo_base, u32 gpfifo_entries); 225 u64 gpfifo_base, u32 gpfifo_entries);
226 226void channel_gk20a_enable(struct channel_gk20a *ch);
227#endif /* CHANNEL_GK20A_H */ 227#endif /* CHANNEL_GK20A_H */
diff --git a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
index 94486064..39941aae 100644
--- a/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/dbg_gpu_gk20a.c
@@ -28,6 +28,7 @@
28#include "dbg_gpu_gk20a.h" 28#include "dbg_gpu_gk20a.h"
29#include "regops_gk20a.h" 29#include "regops_gk20a.h"
30#include "hw_therm_gk20a.h" 30#include "hw_therm_gk20a.h"
31#include "hw_gr_gk20a.h"
31 32
32struct dbg_gpu_session_ops dbg_gpu_session_ops_gk20a = { 33struct dbg_gpu_session_ops dbg_gpu_session_ops_gk20a = {
33 .exec_reg_ops = exec_regops_gk20a, 34 .exec_reg_ops = exec_regops_gk20a,
@@ -359,6 +360,11 @@ static int nvgpu_ioctl_powergate_gk20a(struct dbg_session_gk20a *dbg_s,
359static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s, 360static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
360 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args); 361 struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *args);
361 362
363static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
364 struct dbg_session_gk20a *dbg_s,
365 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args);
366
367
362long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd, 368long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
363 unsigned long arg) 369 unsigned long arg)
364{ 370{
@@ -418,8 +424,13 @@ long gk20a_dbg_gpu_dev_ioctl(struct file *filp, unsigned int cmd,
418 (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf); 424 (struct nvgpu_dbg_gpu_smpc_ctxsw_mode_args *)buf);
419 break; 425 break;
420 426
427 case NVGPU_DBG_GPU_IOCTL_SUSPEND_RESUME_ALL_SMS:
428 err = nvgpu_dbg_gpu_ioctl_suspend_resume_sm(dbg_s,
429 (struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *)buf);
430 break;
431
421 default: 432 default:
422 dev_dbg(dev_from_gk20a(g), 433 gk20a_err(dev_from_gk20a(g),
423 "unrecognized dbg gpu ioctl cmd: 0x%x", 434 "unrecognized dbg gpu ioctl cmd: 0x%x",
424 cmd); 435 cmd);
425 err = -ENOTTY; 436 err = -ENOTTY;
@@ -693,3 +704,63 @@ static int nvgpu_dbg_gpu_ioctl_smpc_ctxsw_mode(struct dbg_session_gk20a *dbg_s,
693 mutex_unlock(&g->dbg_sessions_lock); 704 mutex_unlock(&g->dbg_sessions_lock);
694 return err; 705 return err;
695} 706}
707
708static int nvgpu_dbg_gpu_ioctl_suspend_resume_sm(
709 struct dbg_session_gk20a *dbg_s,
710 struct nvgpu_dbg_gpu_suspend_resume_all_sms_args *args)
711{
712 struct gk20a *g = get_gk20a(dbg_s->pdev);
713 struct channel_gk20a *ch = dbg_s->ch;
714 bool ch_is_curr_ctx;
715 int err = 0, action = args->mode;
716
717 mutex_lock(&g->dbg_sessions_lock);
718
719 /* Suspend GPU context switching */
720 /* Disable channel switching.
721 * at that point the hardware state can be inspected to
722 * determine if the context we're interested in is current.
723 */
724 err = gr_gk20a_disable_ctxsw(g);
725 if (err) {
726 gk20a_err(dev_from_gk20a(g), "unable to stop gr ctxsw");
727 /* this should probably be ctx-fatal... */
728 goto clean_up;
729 }
730
731 /* find out whether the current channel is resident */
732 ch_is_curr_ctx = gk20a_is_channel_ctx_resident(ch);
733
734 if (ch_is_curr_ctx) {
735 switch (action) {
736 case NVGPU_DBG_GPU_SUSPEND_ALL_SMS:
737 gk20a_suspend_all_sms(g);
738 break;
739
740 case NVGPU_DBG_GPU_RESUME_ALL_SMS:
741 gk20a_resume_all_sms(g);
742 break;
743 }
744 } else {
745 switch (action) {
746 case NVGPU_DBG_GPU_SUSPEND_ALL_SMS:
747 /* Disable the channel */
748 channel_gk20a_disable(ch);
749 break;
750
751 case NVGPU_DBG_GPU_RESUME_ALL_SMS:
752 /* Enable the channel */
753 channel_gk20a_enable(ch);
754 break;
755 }
756 }
757
758 /* Resume GPU context switching */
759 err = gr_gk20a_enable_ctxsw(g);
760 if (err)
761 gk20a_err(dev_from_gk20a(g), "unable to restart ctxsw!\n");
762
763 clean_up:
764 mutex_unlock(&g->dbg_sessions_lock);
765 return err;
766}
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 452560d8..b3fc8ae1 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -79,6 +79,10 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c);
79static int gr_gk20a_init_golden_ctx_image(struct gk20a *g, 79static int gr_gk20a_init_golden_ctx_image(struct gk20a *g,
80 struct channel_gk20a *c); 80 struct channel_gk20a *c);
81 81
82/* sm lock down */
83static int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
84 u32 global_esr_mask, bool check_errors);
85
82void gk20a_fecs_dump_falcon_stats(struct gk20a *g) 86void gk20a_fecs_dump_falcon_stats(struct gk20a *g)
83{ 87{
84 int i; 88 int i;
@@ -5365,13 +5369,9 @@ unlock:
5365 return chid; 5369 return chid;
5366} 5370}
5367 5371
5368static int gk20a_gr_lock_down_sm(struct gk20a *g, 5372int gk20a_gr_lock_down_sm(struct gk20a *g,
5369 u32 gpc, u32 tpc, u32 global_esr_mask) 5373 u32 gpc, u32 tpc, u32 global_esr_mask)
5370{ 5374{
5371 unsigned long end_jiffies = jiffies +
5372 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
5373 u32 delay = GR_IDLE_CHECK_DEFAULT;
5374 bool mmu_debug_mode_enabled = g->ops.mm.is_debug_mode_enabled(g);
5375 u32 offset = 5375 u32 offset =
5376 proj_gpc_stride_v() * gpc + proj_tpc_in_gpc_stride_v() * tpc; 5376 proj_gpc_stride_v() * gpc + proj_tpc_in_gpc_stride_v() * tpc;
5377 u32 dbgr_control0; 5377 u32 dbgr_control0;
@@ -5386,55 +5386,8 @@ static int gk20a_gr_lock_down_sm(struct gk20a *g,
5386 gk20a_writel(g, 5386 gk20a_writel(g,
5387 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0); 5387 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset, dbgr_control0);
5388 5388
5389 /* wait for the sm to lock down */ 5389 return gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc, global_esr_mask,
5390 do { 5390 true);
5391 u32 global_esr = gk20a_readl(g,
5392 gr_gpc0_tpc0_sm_hww_global_esr_r() + offset);
5393 u32 warp_esr = gk20a_readl(g,
5394 gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
5395 u32 dbgr_status0 = gk20a_readl(g,
5396 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset);
5397 bool locked_down =
5398 (gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(dbgr_status0) ==
5399 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v());
5400 bool error_pending =
5401 (gr_gpc0_tpc0_sm_hww_warp_esr_error_v(warp_esr) !=
5402 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_v()) ||
5403 ((global_esr & ~global_esr_mask) != 0);
5404
5405 if (locked_down || !error_pending) {
5406 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
5407 "GPC%d TPC%d: locked down SM", gpc, tpc);
5408
5409 /* de-assert stop trigger */
5410 dbgr_control0 &= ~gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f();
5411 gk20a_writel(g,
5412 gr_gpc0_tpc0_sm_dbgr_control0_r() + offset,
5413 dbgr_control0);
5414
5415 return 0;
5416 }
5417
5418 /* if an mmu fault is pending and mmu debug mode is not
5419 * enabled, the sm will never lock down. */
5420 if (!mmu_debug_mode_enabled && gk20a_fifo_mmu_fault_pending(g)) {
5421 gk20a_err(dev_from_gk20a(g),
5422 "GPC%d TPC%d: mmu fault pending,"
5423 " sm will never lock down!", gpc, tpc);
5424 return -EFAULT;
5425 }
5426
5427 usleep_range(delay, delay * 2);
5428 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
5429
5430 } while (time_before(jiffies, end_jiffies)
5431 || !tegra_platform_is_silicon());
5432
5433 gk20a_err(dev_from_gk20a(g),
5434 "GPC%d TPC%d: timed out while trying to lock down SM",
5435 gpc, tpc);
5436
5437 return -EAGAIN;
5438} 5391}
5439 5392
5440bool gk20a_gr_sm_debugger_attached(struct gk20a *g) 5393bool gk20a_gr_sm_debugger_attached(struct gk20a *g)
@@ -7198,6 +7151,131 @@ static u32 gr_gk20a_get_tpc_num(u32 addr)
7198 return 0; 7151 return 0;
7199} 7152}
7200 7153
7154static int gk20a_gr_wait_for_sm_lock_down(struct gk20a *g, u32 gpc, u32 tpc,
7155 u32 global_esr_mask, bool check_errors)
7156{
7157 unsigned long end_jiffies = jiffies +
7158 msecs_to_jiffies(gk20a_get_gr_idle_timeout(g));
7159 u32 delay = GR_IDLE_CHECK_DEFAULT;
7160 bool mmu_debug_mode_enabled = g->ops.mm.is_debug_mode_enabled(g);
7161 u32 offset =
7162 proj_gpc_stride_v() * gpc + proj_tpc_in_gpc_stride_v() * tpc;
7163
7164 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
7165 "GPC%d TPC%d: locking down SM", gpc, tpc);
7166
7167 /* wait for the sm to lock down */
7168 do {
7169 u32 global_esr = gk20a_readl(g,
7170 gr_gpc0_tpc0_sm_hww_global_esr_r() + offset);
7171 u32 warp_esr = gk20a_readl(g,
7172 gr_gpc0_tpc0_sm_hww_warp_esr_r() + offset);
7173 u32 dbgr_status0 = gk20a_readl(g,
7174 gr_gpc0_tpc0_sm_dbgr_status0_r() + offset);
7175 bool locked_down =
7176 (gr_gpc0_tpc0_sm_dbgr_status0_locked_down_v(dbgr_status0) ==
7177 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v());
7178 bool no_error_pending =
7179 check_errors &&
7180 (gr_gpc0_tpc0_sm_hww_warp_esr_error_v(warp_esr) ==
7181 gr_gpc0_tpc0_sm_hww_warp_esr_error_none_v()) &&
7182 ((global_esr & ~global_esr_mask) == 0);
7183
7184 if (locked_down || no_error_pending) {
7185 gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg,
7186 "GPC%d TPC%d: locked down SM", gpc, tpc);
7187 return 0;
7188 }
7189
7190 /* if an mmu fault is pending and mmu debug mode is not
7191 * enabled, the sm will never lock down. */
7192 if (!mmu_debug_mode_enabled &&
7193 gk20a_fifo_mmu_fault_pending(g)) {
7194 gk20a_err(dev_from_gk20a(g),
7195 "GPC%d TPC%d: mmu fault pending,"
7196 " sm will never lock down!", gpc, tpc);
7197 return -EFAULT;
7198 }
7199
7200 usleep_range(delay, delay * 2);
7201 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
7202
7203 } while (time_before(jiffies, end_jiffies)
7204 || !tegra_platform_is_silicon());
7205
7206 gk20a_err(dev_from_gk20a(g),
7207 "GPC%d TPC%d: timed out while trying to lock down SM",
7208 gpc, tpc);
7209
7210 return -EAGAIN;
7211}
7212
7213void gk20a_suspend_all_sms(struct gk20a *g)
7214{
7215 struct gr_gk20a *gr = &g->gr;
7216 u32 gpc, tpc;
7217 int err;
7218 u32 dbgr_control0;
7219
7220 /* if an SM debugger isn't attached, skip suspend */
7221 if (!gk20a_gr_sm_debugger_attached(g)) {
7222 gk20a_err(dev_from_gk20a(g), "SM debugger not attached, "
7223 "skipping suspend!\n");
7224 return;
7225 }
7226
7227 /* assert stop trigger. uniformity assumption: all SMs will have
7228 * the same state in dbg_control0. */
7229 dbgr_control0 =
7230 gk20a_readl(g, gr_gpc0_tpc0_sm_dbgr_control0_r());
7231 dbgr_control0 |= gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f();
7232
7233 /* broadcast write */
7234 gk20a_writel(g,
7235 gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0);
7236
7237 for (gpc = 0; gpc < gr->gpc_count; gpc++) {
7238 for (tpc = 0; tpc < gr->tpc_count; tpc++) {
7239 err =
7240 gk20a_gr_wait_for_sm_lock_down(g, gpc, tpc, 0, false);
7241 if (err) {
7242 gk20a_err(dev_from_gk20a(g),
7243 "SuspendAllSms failed\n");
7244 return;
7245 }
7246 }
7247 }
7248}
7249
7250void gk20a_resume_all_sms(struct gk20a *g)
7251{
7252 u32 dbgr_control0;
7253 /*
7254 * The following requires some clarification. Despite the fact that both
7255 * RUN_TRIGGER and STOP_TRIGGER have the word "TRIGGER" in their
7256 * names, only one is actually a trigger, and that is the STOP_TRIGGER.
7257 * Merely writing a 1(_TASK) to the RUN_TRIGGER is not sufficient to
7258 * resume the gpu - the _STOP_TRIGGER must explicitly be set to 0
7259 * (_DISABLE) as well.
7260
7261 * Advice from the arch group: Disable the stop trigger first, as a
7262 * separate operation, in order to ensure that the trigger has taken
7263 * effect, before enabling the run trigger.
7264 */
7265
7266 /*De-assert stop trigger */
7267 dbgr_control0 =
7268 gk20a_readl(g, gr_gpcs_tpcs_sm_dbgr_control0_r());
7269 dbgr_control0 &= ~gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f();
7270 gk20a_writel(g,
7271 gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0);
7272
7273 /* Run trigger */
7274 dbgr_control0 |= gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_enable_f();
7275 gk20a_writel(g,
7276 gr_gpcs_tpcs_sm_dbgr_control0_r(), dbgr_control0);
7277}
7278
7201void gk20a_init_gr_ops(struct gpu_ops *gops) 7279void gk20a_init_gr_ops(struct gpu_ops *gops)
7202{ 7280{
7203 gops->gr.access_smpc_reg = gr_gk20a_access_smpc_reg; 7281 gops->gr.access_smpc_reg = gr_gk20a_access_smpc_reg;
@@ -7232,3 +7310,4 @@ void gk20a_init_gr_ops(struct gpu_ops *gops)
7232 gops->gr.is_tpc_addr = gr_gk20a_is_tpc_addr; 7310 gops->gr.is_tpc_addr = gr_gk20a_is_tpc_addr;
7233 gops->gr.get_tpc_num = gr_gk20a_get_tpc_num; 7311 gops->gr.get_tpc_num = gr_gk20a_get_tpc_num;
7234} 7312}
7313
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
index 4b1f6de2..72642a41 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.h
@@ -449,4 +449,11 @@ void gr_gk20a_load_ctxsw_ucode_boot(struct gk20a *g, u64 addr_base,
449 449
450 450
451void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *c); 451void gr_gk20a_free_tsg_gr_ctx(struct tsg_gk20a *c);
452#endif /* GR_GK20A_H */ 452int gr_gk20a_disable_ctxsw(struct gk20a *g);
453int gr_gk20a_enable_ctxsw(struct gk20a *g);
454void gk20a_resume_all_sms(struct gk20a *g);
455void gk20a_suspend_all_sms(struct gk20a *g);
456int gk20a_gr_lock_down_sm(struct gk20a *g,
457 u32 gpc, u32 tpc, u32 global_esr_mask);
458bool gk20a_is_channel_ctx_resident(struct channel_gk20a *ch);
459#endif /*__GR_GK20A_H__*/
diff --git a/drivers/gpu/nvgpu/gk20a/hw_gr_gk20a.h b/drivers/gpu/nvgpu/gk20a/hw_gr_gk20a.h
index 463443d6..65a3072c 100644
--- a/drivers/gpu/nvgpu/gk20a/hw_gr_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/hw_gr_gk20a.h
@@ -2810,6 +2810,14 @@ static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_enable_f(void)
2810{ 2810{
2811 return 0x80000000; 2811 return 0x80000000;
2812} 2812}
2813static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_stop_trigger_disable_f(void)
2814{
2815 return 0x00000000;
2816}
2817static inline u32 gr_gpc0_tpc0_sm_dbgr_control0_run_trigger_task_f(void)
2818{
2819 return 0x40000000;
2820}
2813static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_r(void) 2821static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_r(void)
2814{ 2822{
2815 return 0x0050460c; 2823 return 0x0050460c;
@@ -2822,6 +2830,22 @@ static inline u32 gr_gpc0_tpc0_sm_dbgr_status0_locked_down_true_v(void)
2822{ 2830{
2823 return 0x00000001; 2831 return 0x00000001;
2824} 2832}
2833static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_r(void)
2834{
2835 return 0x00419e50;
2836}
2837static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_int_pending_f(void)
2838{
2839 return 0x10;
2840}
2841static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_bpt_pause_pending_f(void)
2842{
2843 return 0x20;
2844}
2845static inline u32 gr_gpcs_tpcs_sm_hww_global_esr_single_step_complete_pending_f(void)
2846{
2847 return 0x40;
2848}
2825static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_r(void) 2849static inline u32 gr_gpc0_tpc0_sm_hww_global_esr_r(void)
2826{ 2850{
2827 return 0x00504650; 2851 return 0x00504650;
@@ -3206,4 +3230,42 @@ static inline u32 gr_gpcs_tpcs_sm_sch_texlock_dot_t_unlock_disable_f(void)
3206{ 3230{
3207 return 0x0; 3231 return 0x0;
3208} 3232}
3233static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_r(void)
3234{
3235 return 0x00419e10;
3236}
3237
3238static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_r_debugger_mode_v(u32 r)
3239{
3240 return (r >> 0) & 0x1;
3241}
3242
3243static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_v(u32 r)
3244{
3245 return (r >> 31) & 0x1;
3246}
3247static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_m(void)
3248{
3249 return 0x1 << 31;
3250}
3251static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_stop_trigger_enable_f(void)
3252{
3253 return 0x80000000;
3254}
3255static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_v(u32 r)
3256{
3257 return (r >> 30) & 0x1;
3258}
3259static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_m(void)
3260{
3261 return 0x1 << 30;
3262}
3263static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_run_trigger_enable_f(void)
3264{
3265 return 0x40000000;
3266}
3267static inline u32 gr_gpcs_tpcs_sm_dbgr_control0_debugger_mode_on_f(void)
3268{
3269 return 0x1;
3270}
3209#endif 3271#endif