summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-06-26 11:53:15 -0400
committerAlex Waterman <alexw@nvidia.com>2018-06-26 17:43:08 -0400
commit0b02c8589dcc507865a8fd398431c45fbda2ba9c (patch)
tree7714c296d600b118ea71abb8cc6fab7b2aa50de2 /drivers/gpu/nvgpu/gv11b/fb_gv11b.c
parent8586414cc15ca36e92cf3c40c2cfb2f8b2691bee (diff)
Revert: GV11B runlist preemption patches
This reverts commit 2d397e34a5aafb5feed406a13f3db536eadae5bb. This reverts commit cd6e821cf66837a2c3479e928414007064b9c496. This reverts commit 5cf1eb145fef763f7153e449be60f1a7602e2c81. This reverts commit a8d6f31bde3ccef22ee77023eaff4a62f6f88199. This reverts commit 067ddbc4e4df3f1f756f03e7865c369a46f420aa. This reverts commit 3eede64de058fcb1e39d723dd146bcd5d06c6f43. This reverts commit 1407133b7e1b27a92ee8c116009541904d2ff691. This reverts commit 797dde3e32647df3b616cea67f4defae59d38b3f. Looks like this makes the ap_compute test on embedded-qnx-hv e3550-t194 quite bad. Might also affect ap_resmgr. Signed-off-by: Alex Waterman <alexw@nvidia.com> Change-Id: Ib9f06514d554d1a67993f0f2bd3d180147385e0a Reviewed-on: https://git-master.nvidia.com/r/1761864 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/fb_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c79
1 files changed, 27 insertions, 52 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
index 2ceb816b..54f0d2d8 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -870,11 +870,10 @@ static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
870static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g, 870static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
871 struct mmu_fault_info *mmfault, u32 *invalidate_replay_val) 871 struct mmu_fault_info *mmfault, u32 *invalidate_replay_val)
872{ 872{
873 unsigned int id_type = ID_TYPE_UNKNOWN; 873 unsigned int id_type;
874 u32 num_lce, act_eng_bitmask = 0; 874 u32 num_lce, act_eng_bitmask = 0;
875 int err = 0; 875 int err = 0;
876 u32 id = FIFO_INVAL_TSG_ID; 876 u32 id = ((u32)~0);
877 unsigned int rc_type = RC_TYPE_NO_RC;
878 877
879 if (!mmfault->valid) 878 if (!mmfault->valid)
880 return; 879 return;
@@ -889,23 +888,18 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
889 /* CE page faults are not reported as replayable */ 888 /* CE page faults are not reported as replayable */
890 nvgpu_log(g, gpu_dbg_intr, "CE Faulted"); 889 nvgpu_log(g, gpu_dbg_intr, "CE Faulted");
891 err = gv11b_fb_fix_page_fault(g, mmfault); 890 err = gv11b_fb_fix_page_fault(g, mmfault);
892 if (mmfault->refch && 891 gv11b_fifo_reset_pbdma_and_eng_faulted(g, mmfault->refch,
893 (u32)mmfault->refch->tsgid != FIFO_INVAL_TSG_ID) { 892 mmfault->faulted_pbdma, mmfault->faulted_engine);
894 gv11b_fifo_reset_pbdma_and_eng_faulted(g,
895 &g->fifo.tsg[mmfault->refch->tsgid],
896 mmfault->faulted_pbdma,
897 mmfault->faulted_engine);
898 }
899 if (!err) { 893 if (!err) {
900 nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Fixed"); 894 nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Fixed");
901 *invalidate_replay_val = 0; 895 *invalidate_replay_val = 0;
902 if (mmfault->refch) { 896 /* refch in mmfault is assigned at the time of copying
903 gk20a_channel_put(mmfault->refch); 897 * fault info from snap reg or bar2 fault buf
904 mmfault->refch = NULL; 898 */
905 } 899 gk20a_channel_put(mmfault->refch);
906 return; 900 return;
907 } 901 }
908 /* Do recovery */ 902 /* Do recovery. Channel recovery needs refch */
909 nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Not Fixed"); 903 nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Not Fixed");
910 } 904 }
911 905
@@ -917,9 +911,16 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
917 * instance block, the fault cannot be isolated to a 911 * instance block, the fault cannot be isolated to a
918 * single context so we need to reset the entire runlist 912 * single context so we need to reset the entire runlist
919 */ 913 */
920 rc_type = RC_TYPE_MMU_FAULT; 914 id_type = ID_TYPE_UNKNOWN;
921 915
922 } else if (mmfault->refch) { 916 } else if (mmfault->refch) {
917 if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) {
918 id = mmfault->refch->tsgid;
919 id_type = ID_TYPE_TSG;
920 } else {
921 id = mmfault->chid;
922 id_type = ID_TYPE_CHANNEL;
923 }
923 if (mmfault->refch->mmu_nack_handled) { 924 if (mmfault->refch->mmu_nack_handled) {
924 /* We have already recovered for the same 925 /* We have already recovered for the same
925 * context, skip doing another recovery. 926 * context, skip doing another recovery.
@@ -940,40 +941,19 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
940 */ 941 */
941 gk20a_channel_put(mmfault->refch); 942 gk20a_channel_put(mmfault->refch);
942 return; 943 return;
943 } else {
944 /* Indicate recovery is handled if mmu fault is
945 * a result of mmu nack.
946 */
947 mmfault->refch->mmu_nack_handled = true;
948 }
949
950 rc_type = RC_TYPE_MMU_FAULT;
951 if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) {
952 id = mmfault->refch->tsgid;
953 if (id != FIFO_INVAL_TSG_ID)
954 id_type = ID_TYPE_TSG;
955 } else {
956 nvgpu_err(g, "bare channels not supported");
957 } 944 }
945 } else {
946 id_type = ID_TYPE_UNKNOWN;
958 } 947 }
959 948 if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID)
960 /* engine is faulted */
961 if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID) {
962 act_eng_bitmask = BIT(mmfault->faulted_engine); 949 act_eng_bitmask = BIT(mmfault->faulted_engine);
963 rc_type = RC_TYPE_MMU_FAULT;
964 }
965 950
966 /* refch in mmfault is assigned at the time of copying 951 /* Indicate recovery is handled if mmu fault is a result of
967 * fault info from snap reg or bar2 fault buf 952 * mmu nack.
968 */ 953 */
969 if (mmfault->refch) { 954 mmfault->refch->mmu_nack_handled = true;
970 gk20a_channel_put(mmfault->refch); 955 g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask,
971 mmfault->refch = NULL; 956 id, id_type, RC_TYPE_MMU_FAULT, mmfault);
972 }
973
974 if (rc_type != RC_TYPE_NO_RC)
975 g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask,
976 id, id_type, rc_type, mmfault);
977 } else { 957 } else {
978 if (mmfault->fault_type == gmmu_fault_type_pte_v()) { 958 if (mmfault->fault_type == gmmu_fault_type_pte_v()) {
979 nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix"); 959 nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix");
@@ -992,10 +972,7 @@ static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
992 /* refch in mmfault is assigned at the time of copying 972 /* refch in mmfault is assigned at the time of copying
993 * fault info from snap reg or bar2 fault buf 973 * fault info from snap reg or bar2 fault buf
994 */ 974 */
995 if (mmfault->refch) { 975 gk20a_channel_put(mmfault->refch);
996 gk20a_channel_put(mmfault->refch);
997 mmfault->refch = NULL;
998 }
999 } 976 }
1000} 977}
1001 978
@@ -1084,10 +1061,8 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
1084 next_fault_addr = mmfault->fault_addr; 1061 next_fault_addr = mmfault->fault_addr;
1085 if (prev_fault_addr == next_fault_addr) { 1062 if (prev_fault_addr == next_fault_addr) {
1086 nvgpu_log(g, gpu_dbg_intr, "pte already scanned"); 1063 nvgpu_log(g, gpu_dbg_intr, "pte already scanned");
1087 if (mmfault->refch) { 1064 if (mmfault->refch)
1088 gk20a_channel_put(mmfault->refch); 1065 gk20a_channel_put(mmfault->refch);
1089 mmfault->refch = NULL;
1090 }
1091 continue; 1066 continue;
1092 } 1067 }
1093 } 1068 }