summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2014-08-04 05:29:41 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:46 -0400
commit478e659ae466d870889bda8ccba008c6bd27399e (patch)
tree07befb0bd5e90666ce4f5584354e1ca43291f59c /drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
parent76993ba18c6969cd26bb500eee4ecf734deb7bcb (diff)
gpu: nvgpu: do not touch runlist during recovery
Currently we clear the runlist and re-create it in scheduled work during fifo recovery process But we can post-pone this runlist re-generation for later time i.e. when channel is closed Hence, remove runlist locks and re-generation from handle_mmu_fault() methods. Instead of that, disable gr fifo access at start of recovery and re-enable it at end of recovery process. Also, delete scheduled work to re-create runlist. Re-enable EPLG and fifo access in finish_mmu_fault_handling() itself. bug 1470692 Change-Id: I705a6a5236734c7207a01d9a9fa9eca22bdbe7eb Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/449225 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/fifo_gk20a.c58
1 files changed, 17 insertions, 41 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
index a41955bd..2089482a 100644
--- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c
@@ -39,7 +39,6 @@
39static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id, 39static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
40 u32 hw_chid, bool add, 40 u32 hw_chid, bool add,
41 bool wait_for_finish); 41 bool wait_for_finish);
42static void gk20a_fifo_handle_mmu_fault_thread(struct work_struct *work);
43 42
44/* 43/*
45 * Link engine IDs to MMU IDs and vice versa. 44 * Link engine IDs to MMU IDs and vice versa.
@@ -498,8 +497,6 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
498 497
499 f->g = g; 498 f->g = g;
500 499
501 INIT_WORK(&f->fault_restore_thread,
502 gk20a_fifo_handle_mmu_fault_thread);
503 mutex_init(&f->intr.isr.mutex); 500 mutex_init(&f->intr.isr.mutex);
504 gk20a_init_fifo_pbdma_intr_descs(f); /* just filling in data/tables */ 501 gk20a_init_fifo_pbdma_intr_descs(f); /* just filling in data/tables */
505 502
@@ -835,26 +832,6 @@ static void gk20a_fifo_reset_engine(struct gk20a *g, u32 engine_id)
835 gk20a_reset(g, mc_enable_ce2_m()); 832 gk20a_reset(g, mc_enable_ce2_m());
836} 833}
837 834
838static void gk20a_fifo_handle_mmu_fault_thread(struct work_struct *work)
839{
840 struct fifo_gk20a *f = container_of(work, struct fifo_gk20a,
841 fault_restore_thread);
842 struct gk20a *g = f->g;
843 int i;
844
845 /* It is safe to enable ELPG again. */
846 gk20a_pmu_enable_elpg(g);
847
848 /* Restore the runlist */
849 for (i = 0; i < g->fifo.max_runlists; i++)
850 gk20a_fifo_update_runlist_locked(g, i, ~0, true, true);
851
852 /* unlock all runlists */
853 for (i = 0; i < g->fifo.max_runlists; i++)
854 mutex_unlock(&g->fifo.runlist_info[i].mutex);
855
856}
857
858static void gk20a_fifo_handle_chsw_fault(struct gk20a *g) 835static void gk20a_fifo_handle_chsw_fault(struct gk20a *g)
859{ 836{
860 u32 intr; 837 u32 intr;
@@ -895,7 +872,6 @@ static bool gk20a_fifo_should_defer_engine_reset(struct gk20a *g, u32 engine_id,
895void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g, 872void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
896 unsigned long fault_id) { 873 unsigned long fault_id) {
897 u32 engine_mmu_id; 874 u32 engine_mmu_id;
898 int i;
899 875
900 /* reset engines */ 876 /* reset engines */
901 for_each_set_bit(engine_mmu_id, &fault_id, 32) { 877 for_each_set_bit(engine_mmu_id, &fault_id, 32) {
@@ -904,11 +880,6 @@ void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
904 gk20a_fifo_reset_engine(g, engine_id); 880 gk20a_fifo_reset_engine(g, engine_id);
905 } 881 }
906 882
907 /* CLEAR the runlists. Do not wait for runlist to start as
908 * some engines may not be available right now */
909 for (i = 0; i < g->fifo.max_runlists; i++)
910 gk20a_fifo_update_runlist_locked(g, i, ~0, false, false);
911
912 /* clear interrupt */ 883 /* clear interrupt */
913 gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id); 884 gk20a_writel(g, fifo_intr_mmu_fault_id_r(), fault_id);
914 885
@@ -916,8 +887,13 @@ void fifo_gk20a_finish_mmu_fault_handling(struct gk20a *g,
916 gk20a_writel(g, fifo_error_sched_disable_r(), 887 gk20a_writel(g, fifo_error_sched_disable_r(),
917 gk20a_readl(g, fifo_error_sched_disable_r())); 888 gk20a_readl(g, fifo_error_sched_disable_r()));
918 889
919 /* Spawn a work to enable PMU and restore runlists */ 890 /* Re-enable fifo access */
920 schedule_work(&g->fifo.fault_restore_thread); 891 gk20a_writel(g, gr_gpfifo_ctl_r(),
892 gr_gpfifo_ctl_access_enabled_f() |
893 gr_gpfifo_ctl_semaphore_access_enabled_f());
894
895 /* It is safe to enable ELPG again. */
896 gk20a_pmu_enable_elpg(g);
921} 897}
922 898
923static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g, 899static bool gk20a_fifo_set_ctx_mmu_error(struct gk20a *g,
@@ -960,8 +936,8 @@ static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
960 bool fake_fault; 936 bool fake_fault;
961 unsigned long fault_id; 937 unsigned long fault_id;
962 unsigned long engine_mmu_id; 938 unsigned long engine_mmu_id;
963 int i;
964 bool verbose = true; 939 bool verbose = true;
940 u32 grfifo_ctl;
965 gk20a_dbg_fn(""); 941 gk20a_dbg_fn("");
966 942
967 g->fifo.deferred_reset_pending = false; 943 g->fifo.deferred_reset_pending = false;
@@ -969,6 +945,15 @@ static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
969 /* Disable ELPG */ 945 /* Disable ELPG */
970 gk20a_pmu_disable_elpg(g); 946 gk20a_pmu_disable_elpg(g);
971 947
948 /* Disable fifo access */
949 grfifo_ctl = gk20a_readl(g, gr_gpfifo_ctl_r());
950 grfifo_ctl &= ~gr_gpfifo_ctl_semaphore_access_f(1);
951 grfifo_ctl &= ~gr_gpfifo_ctl_access_f(1);
952
953 gk20a_writel(g, gr_gpfifo_ctl_r(),
954 grfifo_ctl | gr_gpfifo_ctl_access_f(0) |
955 gr_gpfifo_ctl_semaphore_access_f(0));
956
972 /* If we have recovery in progress, MMU fault id is invalid */ 957 /* If we have recovery in progress, MMU fault id is invalid */
973 if (g->fifo.mmu_fault_engines) { 958 if (g->fifo.mmu_fault_engines) {
974 fault_id = g->fifo.mmu_fault_engines; 959 fault_id = g->fifo.mmu_fault_engines;
@@ -980,17 +965,12 @@ static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
980 gk20a_debug_dump(g->dev); 965 gk20a_debug_dump(g->dev);
981 } 966 }
982 967
983 /* lock all runlists. Note that locks are are released in
984 * gk20a_fifo_handle_mmu_fault_thread() */
985 for (i = 0; i < g->fifo.max_runlists; i++)
986 mutex_lock(&g->fifo.runlist_info[i].mutex);
987 968
988 /* go through all faulted engines */ 969 /* go through all faulted engines */
989 for_each_set_bit(engine_mmu_id, &fault_id, 32) { 970 for_each_set_bit(engine_mmu_id, &fault_id, 32) {
990 /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to 971 /* bits in fifo_intr_mmu_fault_id_r do not correspond 1:1 to
991 * engines. Convert engine_mmu_id to engine_id */ 972 * engines. Convert engine_mmu_id to engine_id */
992 u32 engine_id = gk20a_mmu_id_to_engine_id(engine_mmu_id); 973 u32 engine_id = gk20a_mmu_id_to_engine_id(engine_mmu_id);
993 struct fifo_runlist_info_gk20a *runlist = g->fifo.runlist_info;
994 struct fifo_mmu_fault_info_gk20a f; 974 struct fifo_mmu_fault_info_gk20a f;
995 struct channel_gk20a *ch = NULL; 975 struct channel_gk20a *ch = NULL;
996 976
@@ -1055,10 +1035,6 @@ static bool gk20a_fifo_handle_mmu_fault(struct gk20a *g)
1055 /* disable the channel from hw and increment 1035 /* disable the channel from hw and increment
1056 * syncpoints */ 1036 * syncpoints */
1057 gk20a_channel_abort(ch); 1037 gk20a_channel_abort(ch);
1058
1059 /* remove the channel from runlist */
1060 clear_bit(ch->hw_chid,
1061 runlist->active_channels);
1062 } 1038 }
1063 1039
1064 } else if (f.inst_ptr == 1040 } else if (f.inst_ptr ==