diff options
author | Seema Khowala <seemaj@nvidia.com> | 2018-06-27 01:57:02 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-07-19 16:54:26 -0400 |
commit | b1d0d8ece83ba0aa7b1e7ea9062eedc5cd9e4e33 (patch) | |
tree | 5a88d345e23e05d3a3ca9018cedcf6b12958a20b /drivers/gpu/nvgpu/gk20a | |
parent | d859c5f4a03b975dc493f72a35016e83adad279a (diff) |
Revert "Revert: GV11B runlist preemption patches"
This reverts commit 0b02c8589dcc507865a8fd398431c45fbda2ba9c.
Originally change was reverted as it was making ap_compute test on
embedded-qnx-hv e3550-t194 fail. With fixes related to replacing tsg
preempt with runlist preempt during teardown, preempt timeout set to
100 ms (earlier this was set to 1000ms for t194 and 3000ms for legacy
chips) and not issuing preempt timeout recovery if preempt fails, helped
resolve the issue.
Bug 200426402
Change-Id: If9a68d028a155075444cc1bdf411057e3388d48e
Signed-off-by: Seema Khowala <seemaj@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1762563
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 15 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | 10 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gk20a.h | 6 |
3 files changed, 14 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index cd54baf1..57cb0019 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -55,9 +55,7 @@ | |||
55 | #define FECS_METHOD_WFI_RESTORE 0x80000 | 55 | #define FECS_METHOD_WFI_RESTORE 0x80000 |
56 | #define FECS_MAILBOX_0_ACK_RESTORE 0x4 | 56 | #define FECS_MAILBOX_0_ACK_RESTORE 0x4 |
57 | 57 | ||
58 | static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | 58 | |
59 | u32 chid, bool add, | ||
60 | bool wait_for_finish); | ||
61 | static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg); | 59 | static u32 gk20a_fifo_engines_on_id(struct gk20a *g, u32 id, bool is_tsg); |
62 | 60 | ||
63 | static const char *const pbdma_intr_fault_type_desc[] = { | 61 | static const char *const pbdma_intr_fault_type_desc[] = { |
@@ -2708,7 +2706,7 @@ void gk20a_fifo_issue_preempt(struct gk20a *g, u32 id, bool is_tsg) | |||
2708 | } | 2706 | } |
2709 | 2707 | ||
2710 | int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, | 2708 | int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, |
2711 | unsigned int id_type, unsigned int timeout_rc_type) | 2709 | unsigned int id_type) |
2712 | { | 2710 | { |
2713 | struct nvgpu_timeout timeout; | 2711 | struct nvgpu_timeout timeout; |
2714 | u32 delay = GR_IDLE_CHECK_DEFAULT; | 2712 | u32 delay = GR_IDLE_CHECK_DEFAULT; |
@@ -2781,8 +2779,8 @@ int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg) | |||
2781 | id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL; | 2779 | id_type = is_tsg ? ID_TYPE_TSG : ID_TYPE_CHANNEL; |
2782 | 2780 | ||
2783 | /* wait for preempt */ | 2781 | /* wait for preempt */ |
2784 | ret = g->ops.fifo.is_preempt_pending(g, id, id_type, | 2782 | ret = g->ops.fifo.is_preempt_pending(g, id, id_type); |
2785 | PREEMPT_TIMEOUT_RC); | 2783 | |
2786 | return ret; | 2784 | return ret; |
2787 | } | 2785 | } |
2788 | 2786 | ||
@@ -3279,7 +3277,7 @@ void gk20a_fifo_runlist_hw_submit(struct gk20a *g, u32 runlist_id, | |||
3279 | fifo_eng_runlist_length_f(count)); | 3277 | fifo_eng_runlist_length_f(count)); |
3280 | } | 3278 | } |
3281 | 3279 | ||
3282 | static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | 3280 | int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, |
3283 | u32 chid, bool add, | 3281 | u32 chid, bool add, |
3284 | bool wait_for_finish) | 3282 | bool wait_for_finish) |
3285 | { | 3283 | { |
@@ -3452,8 +3450,7 @@ static int __locked_fifo_reschedule_preempt_next(struct channel_gk20a *ch, | |||
3452 | gk20a_readl(g, fifo_preempt_r())); | 3450 | gk20a_readl(g, fifo_preempt_r())); |
3453 | #endif | 3451 | #endif |
3454 | if (wait_preempt) { | 3452 | if (wait_preempt) { |
3455 | g->ops.fifo.is_preempt_pending( | 3453 | g->ops.fifo.is_preempt_pending(g, preempt_id, preempt_type); |
3456 | g, preempt_id, preempt_type, PREEMPT_TIMEOUT_RC); | ||
3457 | } | 3454 | } |
3458 | #ifdef TRACEPOINTS_ENABLED | 3455 | #ifdef TRACEPOINTS_ENABLED |
3459 | trace_gk20a_reschedule_preempted_next(ch->chid); | 3456 | trace_gk20a_reschedule_preempted_next(ch->chid); |
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index bccd15f6..77030c94 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | |||
@@ -50,9 +50,6 @@ enum { | |||
50 | #define ID_TYPE_TSG 1 | 50 | #define ID_TYPE_TSG 1 |
51 | #define ID_TYPE_UNKNOWN ((u32)~0) | 51 | #define ID_TYPE_UNKNOWN ((u32)~0) |
52 | 52 | ||
53 | #define PREEMPT_TIMEOUT_RC 1 | ||
54 | #define PREEMPT_TIMEOUT_NORC 0 | ||
55 | |||
56 | #define RC_YES 1 | 53 | #define RC_YES 1 |
57 | #define RC_NO 0 | 54 | #define RC_NO 0 |
58 | 55 | ||
@@ -257,6 +254,9 @@ int nvgpu_fifo_reschedule_runlist(struct channel_gk20a *ch, bool preempt_next, | |||
257 | int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 chid, | 254 | int gk20a_fifo_update_runlist(struct gk20a *g, u32 engine_id, u32 chid, |
258 | bool add, bool wait_for_finish); | 255 | bool add, bool wait_for_finish); |
259 | 256 | ||
257 | int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, | ||
258 | u32 chid, bool add, | ||
259 | bool wait_for_finish); | ||
260 | int gk20a_fifo_suspend(struct gk20a *g); | 260 | int gk20a_fifo_suspend(struct gk20a *g); |
261 | 261 | ||
262 | bool gk20a_fifo_mmu_fault_pending(struct gk20a *g); | 262 | bool gk20a_fifo_mmu_fault_pending(struct gk20a *g); |
@@ -390,8 +390,8 @@ void gk20a_fifo_channel_unbind(struct channel_gk20a *ch_gk20a); | |||
390 | 390 | ||
391 | u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g); | 391 | u32 gk20a_fifo_intr_0_error_mask(struct gk20a *g); |
392 | 392 | ||
393 | int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, unsigned int id_type, | 393 | int gk20a_fifo_is_preempt_pending(struct gk20a *g, u32 id, |
394 | unsigned int timeout_rc_type); | 394 | unsigned int id_type); |
395 | int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg); | 395 | int __locked_fifo_preempt(struct gk20a *g, u32 id, bool is_tsg); |
396 | void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, | 396 | void gk20a_fifo_preempt_timeout_rc(struct gk20a *g, u32 id, |
397 | unsigned int id_type); | 397 | unsigned int id_type); |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index d6e0342b..17b0a60b 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -685,9 +685,9 @@ struct gpu_ops { | |||
685 | struct ch_state *ch_state); | 685 | struct ch_state *ch_state); |
686 | u32 (*intr_0_error_mask)(struct gk20a *g); | 686 | u32 (*intr_0_error_mask)(struct gk20a *g); |
687 | int (*is_preempt_pending)(struct gk20a *g, u32 id, | 687 | int (*is_preempt_pending)(struct gk20a *g, u32 id, |
688 | unsigned int id_type, unsigned int timeout_rc_type); | 688 | unsigned int id_type); |
689 | int (*preempt_ch_tsg)(struct gk20a *g, u32 id, | 689 | int (*preempt_ch_tsg)(struct gk20a *g, u32 id, |
690 | unsigned int id_type, unsigned int timeout_rc_type); | 690 | unsigned int id_type); |
691 | void (*init_pbdma_intr_descs)(struct fifo_gk20a *f); | 691 | void (*init_pbdma_intr_descs)(struct fifo_gk20a *f); |
692 | int (*reset_enable_hw)(struct gk20a *g); | 692 | int (*reset_enable_hw)(struct gk20a *g); |
693 | int (*setup_userd)(struct channel_gk20a *c); | 693 | int (*setup_userd)(struct channel_gk20a *c); |
@@ -1132,7 +1132,7 @@ struct gpu_ops { | |||
1132 | bool (*is_intr_hub_pending)(struct gk20a *g, u32 mc_intr); | 1132 | bool (*is_intr_hub_pending)(struct gk20a *g, u32 mc_intr); |
1133 | bool (*is_intr_nvlink_pending)(struct gk20a *g, u32 mc_intr); | 1133 | bool (*is_intr_nvlink_pending)(struct gk20a *g, u32 mc_intr); |
1134 | bool (*is_stall_and_eng_intr_pending)(struct gk20a *g, | 1134 | bool (*is_stall_and_eng_intr_pending)(struct gk20a *g, |
1135 | u32 act_eng_id); | 1135 | u32 act_eng_id, u32 *eng_intr_pending); |
1136 | u32 (*intr_stall)(struct gk20a *g); | 1136 | u32 (*intr_stall)(struct gk20a *g); |
1137 | void (*intr_stall_pause)(struct gk20a *g); | 1137 | void (*intr_stall_pause)(struct gk20a *g); |
1138 | void (*intr_stall_resume)(struct gk20a *g); | 1138 | void (*intr_stall_resume)(struct gk20a *g); |