summaryrefslogblamecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
blob: f9532d66095ca346ca4684532e9e351eaf585d64 (plain) (tree)
1
2
3
4
5
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333


           

                                                                     











                                                                            


                      
                        

                             
                           
 
                             
                           
                           
 
                                         
                                       

                                         
                                        
 
                      
                          
 

                                                          




                                                                           
 
                                                             
                                                 
 


















                                      


 



                                                        
                                                          

                                                                      






















































                                                                               



                                           
                                        

                                                      





                                                            






                                                  
                                                       

                                                          
                                                                  


                                                                  
                                                               

         
 


















































































                                                                       
















































                                                                           








                                                                    













































































                                                                         
























































                                                                                
















                                                                              
























                                                               











                                                                         
                       

                                                                       



                                                        






                                                                          









                                                         
                                              
                       












                                                                         



                                                        






                                                                          


























                                                                    


















































































































































































                                                                                                    
                                                                  
 





                                                                     
 
















































































































































































































                                                                                


                                










                                                                            









                                                                           

         




                                                                              













































































































































































































































































































































                                                                                

                                                  
 



                                                                      
                       

                                                               
 
                                                                 

                       








                                                                               

                                                              

                                                                      

                                                               

                                                                        

                                                                 




                                                                               
         






                                                                     

                                                        
















                                                                    

 


                                        
                                            
                                        

                                                        
 


                                           
 
/*
 * GV11B FB
 *
 * Copyright (c) 2016-2017, NVIDIA CORPORATION.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/types.h>

#include <nvgpu/dma.h>
#include <nvgpu/log.h>

#include "gk20a/gk20a.h"
#include "gk20a/kind_gk20a.h"

#include "gp10b/fb_gp10b.h"

#include "gv11b/fifo_gv11b.h"
#include "gv11b/fb_gv11b.h"
#include "gv11b/ce_gv11b.h"

#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>

#include <nvgpu/log.h>
#include <nvgpu/enabled.h>

static void gv11b_init_nvlink_soc_credits(struct gk20a *g)
{
	void __iomem *soc1 = ioremap(0x01f20010, 4096); //MSS_NVLINK_1_BASE
	void __iomem *soc2 = ioremap(0x01f40010, 4096); //MSS_NVLINK_2_BASE
	void __iomem *soc3 = ioremap(0x01f60010, 4096); //MSS_NVLINK_3_BASE
	void __iomem *soc4 = ioremap(0x01f80010, 4096); //MSS_NVLINK_4_BASE
	u32 val;

	/* TODO : replace this code with proper nvlink API */
	nvgpu_info(g, "init nvlink soc credits");

	val = readl_relaxed(soc1);
	writel_relaxed(val, soc1);
	val = readl_relaxed(soc1 + 4);
	writel_relaxed(val, soc1 + 4);

	val = readl_relaxed(soc2);
	writel_relaxed(val, soc2);
	val = readl_relaxed(soc2 + 4);
	writel_relaxed(val, soc2 + 4);

	val = readl_relaxed(soc3);
	writel_relaxed(val, soc3);
	val = readl_relaxed(soc3 + 4);
	writel_relaxed(val, soc3 + 4);

	val = readl_relaxed(soc4);
	writel_relaxed(val, soc4);
	val = readl_relaxed(soc4 + 4);
	writel_relaxed(val, soc4 + 4);

}

static void gv11b_fb_init_fs_state(struct gk20a *g)
{
	nvgpu_log(g, gpu_dbg_fn, "initialize gv11b fb");

	nvgpu_log(g, gpu_dbg_info, "fbhub active ltcs %x",
			gk20a_readl(g, fb_fbhub_num_active_ltcs_r()));

	nvgpu_log(g, gpu_dbg_info, "mmu active ltcs %u",
			fb_mmu_num_active_ltcs_count_v(
			gk20a_readl(g, fb_mmu_num_active_ltcs_r())));
}

static void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
{
	u32 max_size = gr->max_comptag_mem;
	/* one tag line covers 64KB */
	u32 max_comptag_lines = max_size << 4;
	u32 compbit_base_post_divide;
	u64 compbit_base_post_multiply64;
	u64 compbit_store_iova;
	u64 compbit_base_post_divide64;

	if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
		compbit_store_iova = gk20a_mem_phys(&gr->compbit_store.mem);
	else
		compbit_store_iova = g->ops.mm.get_iova_addr(g,
				gr->compbit_store.mem.priv.sgt->sgl, 0);

	compbit_base_post_divide64 = compbit_store_iova >>
		fb_mmu_cbc_base_address_alignment_shift_v();

	do_div(compbit_base_post_divide64, g->ltc_count);
	compbit_base_post_divide = u64_lo32(compbit_base_post_divide64);

	compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
		g->ltc_count) << fb_mmu_cbc_base_address_alignment_shift_v();

	if (compbit_base_post_multiply64 < compbit_store_iova)
		compbit_base_post_divide++;

	if (g->ops.ltc.cbc_fix_config)
		compbit_base_post_divide =
			g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);

	gk20a_writel(g, fb_mmu_cbc_base_r(),
		fb_mmu_cbc_base_address_f(compbit_base_post_divide));

	nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
		"compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
		(u32)(compbit_store_iova >> 32),
		(u32)(compbit_store_iova & 0xffffffff),
		compbit_base_post_divide);
	nvgpu_log(g, gpu_dbg_fn, "cbc base %x",
		gk20a_readl(g, fb_mmu_cbc_base_r()));

	gr->compbit_store.base_hw = compbit_base_post_divide;

	g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
			0, max_comptag_lines - 1);

}

static void gv11b_fb_reset(struct gk20a *g)
{
	u32 val;

	nvgpu_info(g, "reset gv11b fb");

	g->ops.mc.reset(g, mc_enable_pfb_enabled_f() |
				mc_enable_xbar_enabled_f() |
				mc_enable_hub_enabled_f());

	val = gk20a_readl(g, mc_elpg_enable_r());
	val |= mc_elpg_enable_xbar_enabled_f() |
		mc_elpg_enable_pfb_enabled_f() |
		mc_elpg_enable_hub_enabled_f();
	gk20a_writel(g, mc_elpg_enable_r(), val);

	/* fs hub should be out of reset by now */
	gv11b_init_nvlink_soc_credits(g);

	val = gk20a_readl(g, fifo_fb_iface_r());
	nvgpu_info(g, "fifo_fb_iface val = 0x%x", val);
	if (!(val & fifo_fb_iface_control_enable_f() &&
		val & fifo_fb_iface_status_enabled_f())) {
		nvgpu_info(g, "fifo_fb_iface set control enable");
		gk20a_writel(g, fifo_fb_iface_r(),
				fifo_fb_iface_control_enable_f());
		val = gk20a_readl(g, fifo_fb_iface_r());
		nvgpu_info(g, "fifo_fb_iface val = 0x%x", val);
	}
}

static const char * const invalid_str = "invalid";

static const char *const fault_type_descs_gv11b[] = {
	"invalid pde",
	"invalid pde size",
	"invalid pte",
	"limit violation",
	"unbound inst block",
	"priv violation",
	"write",
	"read",
	"pitch mask violation",
	"work creation",
	"unsupported aperture",
	"compression failure",
	"unsupported kind",
	"region violation",
	"poison",
	"atomic"
};

static const char *const fault_client_type_descs_gv11b[] = {
	"gpc",
	"hub",
};

static const char *const fault_access_type_descs_gv11b[] = {
	"virt read",
	"virt write",
	"virt atomic strong",
	"virt prefetch",
	"virt atomic weak",
	"xxx",
	"xxx",
	"xxx",
	"phys read",
	"phys write",
	"phys atomic",
	"phys prefetch",
};

static const char *const hub_client_descs_gv11b[] = {
	"vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
	"host cpu nb", "iso", "mmu", "nvdec", "nvenc1", "nvenc2",
	"niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
	"scc nb", "sec", "ssync", "gr copy", "xv", "mmu nb",
	"nvenc", "d falcon", "sked", "a falcon", "hsce0", "hsce1",
	"hsce2", "hsce3", "hsce4", "hsce5", "hsce6", "hsce7", "hsce8",
	"hsce9", "hshub", "ptp x0", "ptp x1", "ptp x2", "ptp x3",
	"ptp x4", "ptp x5", "ptp x6", "ptp x7", "vpr scrubber0",
	"vpr scrubber1", "dwbif", "fbfalcon", "ce shim", "gsp",
	"dont care"
};

static const char *const gpc_client_descs_gv11b[] = {
	"t1 0", "t1 1", "t1 2", "t1 3",
	"t1 4", "t1 5", "t1 6", "t1 7",
	"pe 0", "pe 1", "pe 2", "pe 3",
	"pe 4", "pe 5", "pe 6", "pe 7",
	"rast", "gcc", "gpccs",
	"prop 0", "prop 1", "prop 2", "prop 3",
	"gpm",
	"ltp utlb 0", "ltp utlb 1", "ltp utlb 2", "ltp utlb 3",
	"ltp utlb 4", "ltp utlb 5", "ltp utlb 6", "ltp utlb 7",
	"utlb",
	"t1 8", "t1 9", "t1 10", "t1 11",
	"t1 12", "t1 13", "t1 14", "t1 15",
	"tpccs 0", "tpccs 1", "tpccs 2", "tpccs 3",
	"tpccs 4", "tpccs 5", "tpccs 6", "tpccs 7",
	"pe 8", "pe 9", "tpccs 8", "tpccs 9",
	"t1 16", "t1 17", "t1 18", "t1 19",
	"pe 10", "pe 11", "tpccs 10", "tpccs 11",
	"t1 20", "t1 21", "t1 22", "t1 23",
	"pe 12", "pe 13", "tpccs 12", "tpccs 13",
	"t1 24", "t1 25", "t1 26", "t1 27",
	"pe 14", "pe 15", "tpccs 14", "tpccs 15",
	"t1 28", "t1 29", "t1 30", "t1 31",
	"pe 16", "pe 17", "tpccs 16", "tpccs 17",
	"t1 32", "t1 33", "t1 34", "t1 35",
	"pe 18", "pe 19", "tpccs 18", "tpccs 19",
	"t1 36", "t1 37", "t1 38", "t1 39",
};

static void gv11b_init_uncompressed_kind_map(void)
{
	gk20a_uc_kind_map[gmmu_pte_kind_c32_ms2_4cbra_v()] =
	gk20a_uc_kind_map[gmmu_pte_kind_c64_ms2_4cbra_v()] =
		gmmu_pte_kind_generic_16bx2_v();
}

static bool gv11b_kind_supported(u8 k)
{
	return (k == gmmu_pte_kind_c32_ms2_4cbra_v()
		|| k == gmmu_pte_kind_c64_ms2_4cbra_v());
}

static bool gv11b_kind_z(u8 k)
{
	return (k == gmmu_pte_kind_c32_ms2_4cbra_v()
		|| k == gmmu_pte_kind_c64_ms2_4cbra_v());
}

static bool gv11b_kind_compressible(u8 k)
{

	return (k == gmmu_pte_kind_c32_ms2_4cbra_v()
		|| k == gmmu_pte_kind_c64_ms2_4cbra_v());
}

static bool gv11b_kind_zbc(u8 k)
{

	return (k == gmmu_pte_kind_c32_ms2_4cbra_v()
		|| k == gmmu_pte_kind_c64_ms2_4cbra_v());
}

static void gv11b_init_kind_attr(void)
{
	u16 k;

	for (k = 0; k < 256; k++) {
		if (gv11b_kind_supported((u8)k))
			gk20a_kind_attr[k] |= GK20A_KIND_ATTR_SUPPORTED;
		if (gv11b_kind_compressible((u8)k))
			gk20a_kind_attr[k] |= GK20A_KIND_ATTR_COMPRESSIBLE;
		if (gv11b_kind_z((u8)k))
			gk20a_kind_attr[k] |= GK20A_KIND_ATTR_Z;
		if (gv11b_kind_zbc((u8)k))
			gk20a_kind_attr[k] |= GK20A_KIND_ATTR_ZBC;
	}
}

u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
				 unsigned int index)
{
	u32 reg_val;

	reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
	return fb_mmu_fault_buffer_size_enable_v(reg_val);
}

static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
				 unsigned int index, u32 next)
{
	u32 reg_val;

	nvgpu_log(g, gpu_dbg_intr, "updating get index with = %d", next);

	reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index));
	reg_val = set_field(reg_val, fb_mmu_fault_buffer_get_ptr_m(),
			 fb_mmu_fault_buffer_get_ptr_f(next));

	/* while the fault is being handled it is possible for overflow
	 * to happen,
	 */
	if (reg_val & fb_mmu_fault_buffer_get_overflow_m())
		reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f();

	gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val);

	/* make sure get ptr update is visible to everyone to avoid
	 * reading already read entry
	 */
	mb();
}

static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g,
			unsigned int index)
{
	u32 reg_val;

	reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index));
	return fb_mmu_fault_buffer_get_ptr_v(reg_val);
}

static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g,
				 unsigned int index)
{
	u32 reg_val;

	reg_val = gk20a_readl(g, fb_mmu_fault_buffer_put_r(index));
	return fb_mmu_fault_buffer_put_ptr_v(reg_val);
}

static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g,
				 unsigned int index)
{
	u32 reg_val;

	reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
	return fb_mmu_fault_buffer_size_val_v(reg_val);
}

static bool gv11b_fb_is_fault_buffer_empty(struct gk20a *g,
		 unsigned int index, u32 *get_idx)
{
	u32 put_idx;

	*get_idx = gv11b_fb_fault_buffer_get_index(g, index);
	put_idx = gv11b_fb_fault_buffer_put_index(g, index);

	return *get_idx == put_idx;
}

static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g,
				 unsigned int index)
{
	u32 get_idx, put_idx, entries;


	get_idx = gv11b_fb_fault_buffer_get_index(g, index);

	put_idx = gv11b_fb_fault_buffer_put_index(g, index);

	entries = gv11b_fb_fault_buffer_size_val(g, index);

	return get_idx == ((put_idx + 1) % entries);
}

void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
		 unsigned int index, unsigned int state)
{
	u32 fault_status;
	u32 reg_val;

	nvgpu_log_fn(g, " ");

	reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
	if (state) {
		if (gv11b_fb_is_fault_buf_enabled(g, index)) {
			nvgpu_log_info(g, "fault buffer is already enabled");
		} else {
			reg_val |= fb_mmu_fault_buffer_size_enable_true_f();
			gk20a_writel(g, fb_mmu_fault_buffer_size_r(index),
					 reg_val);
		}

	} else {
		struct nvgpu_timeout timeout;
		u32 delay = GR_IDLE_CHECK_DEFAULT;

		nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
			   NVGPU_TIMER_CPU_TIMER);

		reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));
		gk20a_writel(g, fb_mmu_fault_buffer_size_r(index), reg_val);

		fault_status = gk20a_readl(g, fb_mmu_fault_status_r());

		do {
			if (!(fault_status & fb_mmu_fault_status_busy_true_f()))
				break;
			/*
			 * Make sure fault buffer is disabled.
			 * This is to avoid accessing fault buffer by hw
			 * during the window BAR2 is being unmapped by s/w
			 */
			nvgpu_log_info(g, "fault status busy set, check again");
			fault_status = gk20a_readl(g, fb_mmu_fault_status_r());

			nvgpu_usleep_range(delay, delay * 2);
			delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
		} while (!nvgpu_timeout_expired_msg(&timeout,
				"fault status busy set"));
	}
}

void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
{
	u32 addr_lo;
	u32 addr_hi;

	nvgpu_log_fn(g, " ");

	gv11b_fb_fault_buf_set_state_hw(g, index,
					 FAULT_BUF_DISABLED);
	addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >>
					ram_in_base_shift_v());
	addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va);

	gk20a_writel(g, fb_mmu_fault_buffer_lo_r(index),
			fb_mmu_fault_buffer_lo_addr_f(addr_lo));

	gk20a_writel(g, fb_mmu_fault_buffer_hi_r(index),
			fb_mmu_fault_buffer_hi_addr_f(addr_hi));

	gk20a_writel(g, fb_mmu_fault_buffer_size_r(index),
		fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) |
		fb_mmu_fault_buffer_size_overflow_intr_enable_f());

	gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED);
}

static void gv11b_fb_intr_en_set(struct gk20a *g,
			 unsigned int index, u32 mask)
{
	u32 reg_val;

	reg_val = gk20a_readl(g, fb_niso_intr_en_set_r(index));
	reg_val |= mask;
	gk20a_writel(g, fb_niso_intr_en_set_r(index), reg_val);
}

static void gv11b_fb_intr_en_clr(struct gk20a *g,
			 unsigned int index, u32 mask)
{
	u32 reg_val;

	reg_val = gk20a_readl(g, fb_niso_intr_en_clr_r(index));
	reg_val |= mask;
	gk20a_writel(g, fb_niso_intr_en_clr_r(index), reg_val);
}

static u32 gv11b_fb_get_hub_intr_clr_mask(struct gk20a *g,
			 unsigned int intr_type)
{
	u32 mask = 0;

	if (intr_type & HUB_INTR_TYPE_OTHER) {
		mask |=
		 fb_niso_intr_en_clr_mmu_other_fault_notify_m();
	}

	if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
		mask |=
		fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m() |
		fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m();
	}

	if (intr_type & HUB_INTR_TYPE_REPLAY) {
		mask |=
		 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m() |
		 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m();
	}

	if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
		mask |=
		 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m();
	}

	if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
		mask |=
		 fb_niso_intr_en_clr_hub_access_counter_notify_m() |
		 fb_niso_intr_en_clr_hub_access_counter_error_m();
	}

	return mask;
}

static u32 gv11b_fb_get_hub_intr_en_mask(struct gk20a *g,
			 unsigned int intr_type)
{
	u32 mask = 0;

	if (intr_type & HUB_INTR_TYPE_OTHER) {
		mask |=
		 fb_niso_intr_en_set_mmu_other_fault_notify_m();
	}

	if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
		mask |=
		fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
		fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m();
	}

	if (intr_type & HUB_INTR_TYPE_REPLAY) {
		mask |=
		fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
		fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
	}

	if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
		mask |=
		 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
	}

	if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
		mask |=
		 fb_niso_intr_en_set_hub_access_counter_notify_m() |
		 fb_niso_intr_en_set_hub_access_counter_error_m();
	}

	return mask;
}

void gv11b_fb_enable_hub_intr(struct gk20a *g,
			 unsigned int index, unsigned int intr_type)
{
	u32 mask = 0;

	mask = gv11b_fb_get_hub_intr_en_mask(g, intr_type);

	if (mask)
		gv11b_fb_intr_en_set(g, index, mask);
}

void gv11b_fb_disable_hub_intr(struct gk20a *g,
			 unsigned int index, unsigned int intr_type)
{
	u32 mask = 0;

	mask = gv11b_fb_get_hub_intr_clr_mask(g, intr_type);

	if (mask)
		gv11b_fb_intr_en_clr(g, index, mask);
}

static void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
{
	u32 ecc_addr, corrected_cnt, uncorrected_cnt;
	u32 corrected_delta, uncorrected_delta;
	u32 corrected_overflow, uncorrected_overflow;

	ecc_addr = gk20a_readl(g, fb_mmu_l2tlb_ecc_address_r());
	corrected_cnt = gk20a_readl(g,
		fb_mmu_l2tlb_ecc_corrected_err_count_r());
	uncorrected_cnt = gk20a_readl(g,
		fb_mmu_l2tlb_ecc_uncorrected_err_count_r());

	corrected_delta = fb_mmu_l2tlb_ecc_corrected_err_count_total_v(
							corrected_cnt);
	uncorrected_delta = fb_mmu_l2tlb_ecc_uncorrected_err_count_total_v(
							uncorrected_cnt);
	corrected_overflow = ecc_status &
		fb_mmu_l2tlb_ecc_status_corrected_err_total_counter_overflow_m();

	uncorrected_overflow = ecc_status &
		fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m();

	/* clear the interrupt */
	if ((corrected_delta > 0) || corrected_overflow)
		gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0);
	if ((uncorrected_delta > 0) || uncorrected_overflow)
		gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0);

	gk20a_writel(g, fb_mmu_l2tlb_ecc_status_r(),
				fb_mmu_l2tlb_ecc_status_reset_clear_f());

	/* Handle overflow */
	if (corrected_overflow)
		corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s());
	if (uncorrected_overflow)
		uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s());


	g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count.counters[0] +=
							corrected_delta;
	g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count.counters[0] +=
							uncorrected_delta;

	if (ecc_status & fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m())
		nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
	if (ecc_status & fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m())
		nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
	if (corrected_overflow || uncorrected_overflow)
		nvgpu_info(g, "mmu l2tlb ecc counter overflow!");

	nvgpu_log(g, gpu_dbg_intr,
		"ecc error address: 0x%x", ecc_addr);
	nvgpu_log(g, gpu_dbg_intr,
		"ecc error count corrected: %d, uncorrected %d",
		g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count.counters[0],
		g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count.counters[0]);
}

static void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
{
	u32 ecc_addr, corrected_cnt, uncorrected_cnt;
	u32 corrected_delta, uncorrected_delta;
	u32 corrected_overflow, uncorrected_overflow;

	ecc_addr = gk20a_readl(g, fb_mmu_hubtlb_ecc_address_r());
	corrected_cnt = gk20a_readl(g,
		fb_mmu_hubtlb_ecc_corrected_err_count_r());
	uncorrected_cnt = gk20a_readl(g,
		fb_mmu_hubtlb_ecc_uncorrected_err_count_r());

	corrected_delta = fb_mmu_hubtlb_ecc_corrected_err_count_total_v(
							corrected_cnt);
	uncorrected_delta = fb_mmu_hubtlb_ecc_uncorrected_err_count_total_v(
							uncorrected_cnt);
	corrected_overflow = ecc_status &
		fb_mmu_hubtlb_ecc_status_corrected_err_total_counter_overflow_m();

	uncorrected_overflow = ecc_status &
		fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m();

	/* clear the interrupt */
	if ((corrected_delta > 0) || corrected_overflow)
		gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0);
	if ((uncorrected_delta > 0) || uncorrected_overflow)
		gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0);

	gk20a_writel(g, fb_mmu_hubtlb_ecc_status_r(),
				fb_mmu_hubtlb_ecc_status_reset_clear_f());

	/* Handle overflow */
	if (corrected_overflow)
		corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s());
	if (uncorrected_overflow)
		uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s());


	g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count.counters[0] +=
							corrected_delta;
	g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count.counters[0] +=
							uncorrected_delta;

	if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m())
		nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
	if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m())
		nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
	if (corrected_overflow || uncorrected_overflow)
		nvgpu_info(g, "mmu hubtlb ecc counter overflow!");

	nvgpu_log(g, gpu_dbg_intr,
		"ecc error address: 0x%x", ecc_addr);
	nvgpu_log(g, gpu_dbg_intr,
		"ecc error count corrected: %d, uncorrected %d",
		g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count.counters[0],
		g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count.counters[0]);
}

static void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
{
	u32 ecc_addr, corrected_cnt, uncorrected_cnt;
	u32 corrected_delta, uncorrected_delta;
	u32 corrected_overflow, uncorrected_overflow;

	ecc_addr = gk20a_readl(g, fb_mmu_fillunit_ecc_address_r());
	corrected_cnt = gk20a_readl(g,
		fb_mmu_fillunit_ecc_corrected_err_count_r());
	uncorrected_cnt = gk20a_readl(g,
		fb_mmu_fillunit_ecc_uncorrected_err_count_r());

	corrected_delta = fb_mmu_fillunit_ecc_corrected_err_count_total_v(
							corrected_cnt);
	uncorrected_delta = fb_mmu_fillunit_ecc_uncorrected_err_count_total_v(
							uncorrected_cnt);
	corrected_overflow = ecc_status &
		fb_mmu_fillunit_ecc_status_corrected_err_total_counter_overflow_m();

	uncorrected_overflow = ecc_status &
		fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m();

	/* clear the interrupt */
	if ((corrected_delta > 0) || corrected_overflow)
		gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0);
	if ((uncorrected_delta > 0) || uncorrected_overflow)
		gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0);

	gk20a_writel(g, fb_mmu_fillunit_ecc_status_r(),
				fb_mmu_fillunit_ecc_status_reset_clear_f());

	/* Handle overflow */
	if (corrected_overflow)
		corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s());
	if (uncorrected_overflow)
		uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s());


	g->ecc.eng.t19x.mmu_fillunit_corrected_err_count.counters[0] +=
							corrected_delta;
	g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count.counters[0] +=
							uncorrected_delta;

	if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m())
		nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error");
	if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m())
		nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error");
	if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m())
		nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error");
	if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m())
		nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error");

	if (corrected_overflow || uncorrected_overflow)
		nvgpu_info(g, "mmu fillunit ecc counter overflow!");

	nvgpu_log(g, gpu_dbg_intr,
		"ecc error address: 0x%x", ecc_addr);
	nvgpu_log(g, gpu_dbg_intr,
		"ecc error count corrected: %d, uncorrected %d",
		g->ecc.eng.t19x.mmu_fillunit_corrected_err_count.counters[0],
		g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count.counters[0]);
}

static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault)
{
	if (WARN_ON(mmfault->fault_type >=
				ARRAY_SIZE(fault_type_descs_gv11b)))
		mmfault->fault_type_desc =  invalid_str;
	else
		mmfault->fault_type_desc =
			 fault_type_descs_gv11b[mmfault->fault_type];

	if (WARN_ON(mmfault->client_type >=
			ARRAY_SIZE(fault_client_type_descs_gv11b)))
		mmfault->client_type_desc = invalid_str;
	else
		mmfault->client_type_desc =
			 fault_client_type_descs_gv11b[mmfault->client_type];

	mmfault->client_id_desc = invalid_str;
	if (mmfault->client_type ==
			gmmu_fault_client_type_hub_v()) {

		if (!(WARN_ON(mmfault->client_id >=
				 ARRAY_SIZE(hub_client_descs_gv11b))))
			mmfault->client_id_desc =
				 hub_client_descs_gv11b[mmfault->client_id];
	} else if (mmfault->client_type ==
			gmmu_fault_client_type_gpc_v()) {
		if (!(WARN_ON(mmfault->client_id >=
				 ARRAY_SIZE(gpc_client_descs_gv11b))))
			mmfault->client_id_desc =
				 gpc_client_descs_gv11b[mmfault->client_id];
	}

}

static void gv11b_fb_print_fault_info(struct gk20a *g,
			 struct mmu_fault_info *mmfault)
{
	if (mmfault && mmfault->valid) {
		nvgpu_err(g, "[MMU FAULT] "
			"mmu engine id:  %d, "
			"ch id:  %d, "
			"fault addr: 0x%llx, "
			"fault addr aperture: %d, "
			"fault type: %s, "
			"access type: %s, ",
			mmfault->mmu_engine_id,
			mmfault->chid,
			mmfault->fault_addr,
			mmfault->fault_addr_aperture,
			mmfault->fault_type_desc,
			fault_access_type_descs_gv11b[mmfault->access_type]);
		nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
			"mmu engine id:  %d, "
			"faulted act eng id if any: 0x%x, "
			"faulted veid if any: 0x%x, "
			"faulted pbdma id if any: 0x%x, "
			"fault addr: 0x%llx, ",
			mmfault->mmu_engine_id,
			mmfault->faulted_engine,
			mmfault->faulted_subid,
			mmfault->faulted_pbdma,
			mmfault->fault_addr);
		nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
			"fault addr aperture: %d, "
			"fault type: %s, "
			"access type: %s, "
			"inst ptr: 0x%llx, "
			"inst ptr aperture: %d, ",
			mmfault->fault_addr_aperture,
			mmfault->fault_type_desc,
			fault_access_type_descs_gv11b[mmfault->access_type],
			mmfault->inst_ptr,
			mmfault->inst_aperture);
		nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
			"ch id:  %d, "
			"timestamp hi:lo 0x%08x:0x%08x, "
			"client type: %s, "
			"client id:  %s, "
			"gpc id if client type is gpc: %d, ",
			mmfault->chid,
			mmfault->timestamp_hi, mmfault->timestamp_lo,
			mmfault->client_type_desc,
			mmfault->client_id_desc,
			mmfault->gpc_id);
		nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
			"protected mode: %d, "
			"replayable fault: %d, "
			"replayable fault en:  %d ",
			mmfault->protected_mode,
			mmfault->replayable_fault,
			mmfault->replay_fault_en);
	}
}

/*
 *Fault buffer format
 *
 * 31    28     24 23           16 15            8 7     4       0
 *.-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-.
 *|              inst_lo                  |0 0|apr|0 0 0 0 0 0 0 0|
 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
 *|                             inst_hi                           |
 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
 *|              addr_31_12               |                   |AP |
 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
 *|                            addr_63_32                         |
 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
 *|                          timestamp_lo                         |
 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
 *|                          timestamp_hi                         |
 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
 *|                           (reserved)        |    engine_id    |
 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
 *|V|R|P|  gpc_id |0 0 0|t|0|acctp|0|   client    |RF0 0|faulttype|
 */

static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
	 struct nvgpu_mem *mem, u32 offset, struct mmu_fault_info *mmfault)
{
	u32 rd32_val;
	u32 addr_lo, addr_hi;
	u64 inst_ptr;
	u32 chid = FIFO_INVAL_CHANNEL_ID;
	struct channel_gk20a *refch;

	memset(mmfault, 0, sizeof(*mmfault));

	rd32_val = nvgpu_mem_rd32(g, mem, offset +
			 gmmu_fault_buf_entry_inst_lo_w());
	addr_lo = gmmu_fault_buf_entry_inst_lo_v(rd32_val);
	addr_lo = addr_lo << ram_in_base_shift_v();

	addr_hi = nvgpu_mem_rd32(g, mem, offset +
				 gmmu_fault_buf_entry_inst_hi_w());
	addr_hi = gmmu_fault_buf_entry_inst_hi_v(addr_hi);

	inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo);

	/* refch will be put back after fault is handled */
	refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
	if (refch)
		chid = refch->chid;

	/* it is ok to continue even if refch is NULL */
	mmfault->refch = refch;
	mmfault->chid = chid;
	mmfault->inst_ptr = inst_ptr;
	mmfault->inst_aperture = gmmu_fault_buf_entry_inst_aperture_v(rd32_val);

	rd32_val = nvgpu_mem_rd32(g, mem, offset +
			 gmmu_fault_buf_entry_addr_lo_w());

	mmfault->fault_addr_aperture =
		gmmu_fault_buf_entry_addr_phys_aperture_v(rd32_val);
	addr_lo = gmmu_fault_buf_entry_addr_lo_v(rd32_val);
	addr_lo = addr_lo << ram_in_base_shift_v();

	rd32_val = nvgpu_mem_rd32(g, mem, offset +
			 gmmu_fault_buf_entry_addr_hi_w());
	addr_hi = gmmu_fault_buf_entry_addr_hi_v(rd32_val);
	mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo);

	rd32_val = nvgpu_mem_rd32(g, mem, offset +
			 gmmu_fault_buf_entry_timestamp_lo_w());
	mmfault->timestamp_lo =
		 gmmu_fault_buf_entry_timestamp_lo_v(rd32_val);

	rd32_val = nvgpu_mem_rd32(g, mem, offset +
			 gmmu_fault_buf_entry_timestamp_hi_w());
	mmfault->timestamp_hi =
		 gmmu_fault_buf_entry_timestamp_hi_v(rd32_val);

	rd32_val = nvgpu_mem_rd32(g, mem, offset +
			 gmmu_fault_buf_entry_engine_id_w());

	mmfault->mmu_engine_id =
		 gmmu_fault_buf_entry_engine_id_v(rd32_val);
	gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id,
		 &mmfault->faulted_engine, &mmfault->faulted_subid,
		 &mmfault->faulted_pbdma);

	rd32_val = nvgpu_mem_rd32(g, mem, offset +
			gmmu_fault_buf_entry_fault_type_w());
	mmfault->client_id =
		 gmmu_fault_buf_entry_client_v(rd32_val);
	mmfault->replayable_fault =
		gmmu_fault_buf_entry_replayable_fault_v(rd32_val);

	mmfault->fault_type =
		 gmmu_fault_buf_entry_fault_type_v(rd32_val);
	mmfault->access_type =
		 gmmu_fault_buf_entry_access_type_v(rd32_val);

	mmfault->client_type =
		gmmu_fault_buf_entry_mmu_client_type_v(rd32_val);

	mmfault->gpc_id =
		 gmmu_fault_buf_entry_gpc_id_v(rd32_val);
	mmfault->protected_mode =
		gmmu_fault_buf_entry_protected_mode_v(rd32_val);

	mmfault->replay_fault_en =
		gmmu_fault_buf_entry_replayable_fault_en_v(rd32_val);

	mmfault->valid = gmmu_fault_buf_entry_valid_v(rd32_val);

	rd32_val = nvgpu_mem_rd32(g, mem, offset +
			gmmu_fault_buf_entry_fault_type_w());
	rd32_val &= ~(gmmu_fault_buf_entry_valid_m());
	nvgpu_mem_wr32(g, mem, offset + gmmu_fault_buf_entry_valid_w(),
					 rd32_val);

	gv11b_fb_parse_mmfault(mmfault);
}

static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
			 struct mmu_fault_info *mmfault)
{
	unsigned int id_type;
	u32 act_eng_bitmask = 0;

	if (!mmfault->valid)
		return;

	gv11b_fb_print_fault_info(g, mmfault);

	if (mmfault->fault_type == gmmu_fault_type_unbound_inst_block_v()) {
		/*
		 * Bug 1847172: When an engine faults due to an unbound
		 * instance block, the fault cannot be isolated to a
		 * single context so we need to reset the entire runlist
		 */
		id_type = ID_TYPE_UNKNOWN;
		nvgpu_log(g, gpu_dbg_intr, "UNBOUND INST BLOCK MMU FAULT");

	} else if (mmfault->refch) {
		if (gk20a_is_channel_marked_as_tsg(mmfault->refch))
			id_type = ID_TYPE_TSG;
		else
			id_type = ID_TYPE_CHANNEL;
	} else {
		id_type = ID_TYPE_UNKNOWN;
	}

	if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID)
		act_eng_bitmask = BIT(mmfault->faulted_engine);

	g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask, mmfault->chid,
					 id_type, RC_TYPE_MMU_FAULT, mmfault);
}

static void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
		 u32 fault_status, unsigned int index)
{
	u32 get_indx, offset, rd32_val, entries;
	struct nvgpu_mem *mem;
	struct mmu_fault_info *mmfault;

	if (gv11b_fb_is_fault_buffer_empty(g, index,
						 &get_indx)) {
		nvgpu_log(g, gpu_dbg_intr, "SPURIOUS fault");
		return;
	}
	nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx);

	mem = &g->mm.hw_fault_buf[index];
	mmfault = g->mm.fault_info[index];

	entries = gv11b_fb_fault_buffer_size_val(g, index);
	nvgpu_log(g, gpu_dbg_intr, "buffer num entries = %d", entries);

	offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32);
	nvgpu_log(g, gpu_dbg_intr, "starting word offset = 0x%x", offset);

	rd32_val = nvgpu_mem_rd32(g, mem,
		 offset + gmmu_fault_buf_entry_valid_w());
	nvgpu_log(g, gpu_dbg_intr, "entry valid offset val = 0x%x", rd32_val);

	while ((rd32_val & gmmu_fault_buf_entry_valid_m())) {

		nvgpu_log(g, gpu_dbg_intr, "entry valid = 0x%x", rd32_val);

		gv11b_fb_copy_from_hw_fault_buf(g, mem, offset, mmfault);

		/* Extra 1 in buffer size is to detect buffer full.
		 * Actual number of entries for faults to be snapped are
		 * one less than number in fault_buffer_size_val
		 */
		get_indx = (get_indx + 1) % (entries - 1);
		nvgpu_log(g, gpu_dbg_intr, "new get index = %d", get_indx);

		gv11b_fb_fault_buffer_get_ptr_update(g, index, get_indx);

		gv11b_fb_handle_mmu_fault_common(g, mmfault);

		offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32);
		nvgpu_log(g, gpu_dbg_intr, "next word offset = 0x%x", offset);

		rd32_val = nvgpu_mem_rd32(g, mem,
			 offset + gmmu_fault_buf_entry_valid_w());
	}
}

static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
		u32 fault_status, struct mmu_fault_info *mmfault)
{
	u32 reg_val;
	u32 addr_lo, addr_hi;
	u64 inst_ptr;
	int chid = FIFO_INVAL_CHANNEL_ID;
	struct channel_gk20a *refch;

	memset(mmfault, 0, sizeof(*mmfault));

	if (!(fault_status & fb_mmu_fault_status_valid_set_f())) {

		nvgpu_log(g, gpu_dbg_intr, "mmu fault status valid not set");
		return;
	}

	reg_val = gk20a_readl(g, fb_mmu_fault_inst_lo_r());
	addr_lo = fb_mmu_fault_inst_lo_addr_v(reg_val);
	addr_lo = addr_lo << ram_in_base_shift_v();

	addr_hi = gk20a_readl(g, fb_mmu_fault_inst_hi_r());
	addr_hi = fb_mmu_fault_inst_hi_addr_v(addr_hi);
	inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo);

	/* refch will be put back after fault is handled */
	refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
	if (refch)
		chid = refch->chid;

	/* It is still ok to continue if refch is NULL */
	mmfault->refch = refch;
	mmfault->chid = chid;
	mmfault->inst_ptr = inst_ptr;
	mmfault->inst_aperture = fb_mmu_fault_inst_lo_aperture_v(reg_val);
	mmfault->mmu_engine_id = fb_mmu_fault_inst_lo_engine_id_v(reg_val);

	gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id,
		 &mmfault->faulted_engine, &mmfault->faulted_subid,
		 &mmfault->faulted_pbdma);

	reg_val = gk20a_readl(g, fb_mmu_fault_addr_lo_r());
	addr_lo = fb_mmu_fault_addr_lo_addr_v(reg_val);
	addr_lo = addr_lo << ram_in_base_shift_v();

	mmfault->fault_addr_aperture =
			 fb_mmu_fault_addr_lo_phys_aperture_v(reg_val);

	addr_hi = gk20a_readl(g, fb_mmu_fault_addr_hi_r());
	addr_hi = fb_mmu_fault_addr_hi_addr_v(addr_hi);
	mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo);

	reg_val = gk20a_readl(g, fb_mmu_fault_info_r());
	mmfault->fault_type = fb_mmu_fault_info_fault_type_v(reg_val);
	mmfault->replayable_fault =
			 fb_mmu_fault_info_replayable_fault_v(reg_val);
	mmfault->client_id = fb_mmu_fault_info_client_v(reg_val);
	mmfault->access_type = fb_mmu_fault_info_access_type_v(reg_val);
	mmfault->client_type = fb_mmu_fault_info_client_type_v(reg_val);
	mmfault->gpc_id = fb_mmu_fault_info_gpc_id_v(reg_val);
	mmfault->protected_mode =
			 fb_mmu_fault_info_protected_mode_v(reg_val);
	mmfault->replay_fault_en =
			fb_mmu_fault_info_replayable_fault_en_v(reg_val);

	mmfault->valid = fb_mmu_fault_info_valid_v(reg_val);

	fault_status &= ~(fb_mmu_fault_status_valid_m());
	gk20a_writel(g, fb_mmu_fault_status_r(), fault_status);

	gv11b_fb_parse_mmfault(mmfault);

}

static void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
			 u32 fault_status)
{
	u32 reg_val;
	unsigned int index = REPLAY_REG_INDEX;

	reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index));

	if (fault_status &
		 fb_mmu_fault_status_replayable_getptr_corrupted_m()) {

		nvgpu_err(g, "replayable getptr corrupted set");

		gv11b_fb_fault_buf_configure_hw(g, index);

		reg_val = set_field(reg_val,
			fb_mmu_fault_buffer_get_getptr_corrupted_m(),
			fb_mmu_fault_buffer_get_getptr_corrupted_clear_f());
	}

	if (fault_status &
		 fb_mmu_fault_status_replayable_overflow_m()) {
		bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index);

		nvgpu_err(g, "replayable overflow: buffer full:%s",
				buffer_full?"true":"false");

		reg_val = set_field(reg_val,
			fb_mmu_fault_buffer_get_overflow_m(),
			fb_mmu_fault_buffer_get_overflow_clear_f());
	}

	gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val);
}

static void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
			 u32 fault_status)
{
	u32 reg_val;
	unsigned int index = NONREPLAY_REG_INDEX;

	reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index));

	if (fault_status &
		 fb_mmu_fault_status_non_replayable_getptr_corrupted_m()) {

		nvgpu_err(g, "non replayable getptr corrupted set");

		gv11b_fb_fault_buf_configure_hw(g, index);

		reg_val = set_field(reg_val,
			fb_mmu_fault_buffer_get_getptr_corrupted_m(),
			fb_mmu_fault_buffer_get_getptr_corrupted_clear_f());
	}

	if (fault_status &
		 fb_mmu_fault_status_non_replayable_overflow_m()) {

		bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index);

		nvgpu_err(g, "non replayable overflow: buffer full:%s",
				buffer_full?"true":"false");

		reg_val = set_field(reg_val,
			fb_mmu_fault_buffer_get_overflow_m(),
			fb_mmu_fault_buffer_get_overflow_clear_f());
	}

	gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val);
}

static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
			struct mmu_fault_info *mmfault, u32 fault_status)
{
	gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX,
		HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);


	if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
		if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))
			gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX);
	}

	if (fault_status & fb_mmu_fault_status_replayable_error_m()) {
		if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))
			gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX);
	}
	gv11b_ce_mthd_buffer_fault_in_bar2_fault(g);

	g->ops.mm.init_bar2_mm_hw_setup(g);

	if (mmfault->refch) {
		gk20a_channel_put(mmfault->refch);
		mmfault->refch = NULL;
	}
	gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX,
		HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
}

static void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
			 u32 fault_status)
{
	struct mmu_fault_info *mmfault;

	mmfault = g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY];

	gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault);

	/* BAR2/Physical faults will not be snapped in hw fault buf */
	if (mmfault->mmu_engine_id == gmmu_fault_mmu_eng_id_bar2_v()) {
		nvgpu_err(g, "BAR2 MMU FAULT");
		gv11b_fb_handle_bar2_fault(g, mmfault, fault_status);

	} else if (mmfault->mmu_engine_id ==
			gmmu_fault_mmu_eng_id_physical_v()) {
		/* usually means VPR or out of bounds physical accesses */
		nvgpu_err(g, "PHYSICAL MMU FAULT");

	} else {
		gv11b_fb_handle_mmu_fault_common(g, mmfault);
	}
}

static void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status)
{
	u32 dropped_faults = 0;

	dropped_faults = fb_mmu_fault_status_dropped_bar1_phys_set_f() |
			fb_mmu_fault_status_dropped_bar1_virt_set_f() |
			fb_mmu_fault_status_dropped_bar2_phys_set_f() |
			fb_mmu_fault_status_dropped_bar2_virt_set_f() |
			fb_mmu_fault_status_dropped_ifb_phys_set_f() |
			fb_mmu_fault_status_dropped_ifb_virt_set_f() |
			fb_mmu_fault_status_dropped_other_phys_set_f()|
			fb_mmu_fault_status_dropped_other_virt_set_f();

	if (fault_status & dropped_faults) {
		nvgpu_err(g, "dropped mmu fault (0x%08x)",
				 fault_status & dropped_faults);
		gk20a_writel(g, fb_mmu_fault_status_r(), dropped_faults);
	}
}


static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
{
	u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());

	nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status);

	if (niso_intr &
		 fb_niso_intr_mmu_other_fault_notify_m()) {

		gv11b_fb_handle_dropped_mmu_fault(g, fault_status);

		gv11b_fb_handle_other_fault_notify(g, fault_status);
	}

	if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) {

		if (niso_intr &
		 fb_niso_intr_mmu_nonreplayable_fault_notify_m()) {

			gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
					fault_status, NONREPLAY_REG_INDEX);

			/*
			 * When all the faults are processed,
			 * GET and PUT will have same value and mmu fault status
			 * bit will be reset by HW
			 */
		}
		if (niso_intr &
		 fb_niso_intr_mmu_nonreplayable_fault_overflow_m()) {

			gv11b_fb_handle_nonreplay_fault_overflow(g,
				 fault_status);
		}

	}

	if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX)) {

		if (niso_intr &
		 fb_niso_intr_mmu_replayable_fault_notify_m()) {

			gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
					fault_status, REPLAY_REG_INDEX);
		}
		if (niso_intr &
		 fb_niso_intr_mmu_replayable_fault_overflow_m()) {

			gv11b_fb_handle_replay_fault_overflow(g,
				 fault_status);
		}

	}

	nvgpu_log(g, gpu_dbg_intr, "clear mmu fault status");
	gk20a_writel(g, fb_mmu_fault_status_r(),
				fb_mmu_fault_status_valid_clear_f());
}

static void gv11b_fb_hub_isr(struct gk20a *g)
{
	u32 status, niso_intr;

	nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);

	niso_intr = gk20a_readl(g, fb_niso_intr_r());

	nvgpu_info(g, "enter hub isr, niso_intr = 0x%08x", niso_intr);

	if (niso_intr &
		 (fb_niso_intr_hub_access_counter_notify_m() |
		  fb_niso_intr_hub_access_counter_error_m())) {

		nvgpu_info(g, "hub access counter notify/error");
	}
	if (niso_intr &
		fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) {

		nvgpu_info(g, "ecc uncorrected error notify");

		/* disable interrupts during handling */
		gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX,
						HUB_INTR_TYPE_ECC_UNCORRECTED);

		status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r());
		if (status)
			gv11b_handle_l2tlb_ecc_isr(g, status);

		status = gk20a_readl(g, fb_mmu_hubtlb_ecc_status_r());
		if (status)
			gv11b_handle_hubtlb_ecc_isr(g, status);

		status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r());
		if (status)
			gv11b_handle_fillunit_ecc_isr(g, status);

		/* re-enable interrupts after handling */
		gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX,
						HUB_INTR_TYPE_ECC_UNCORRECTED);

	}
	if (niso_intr &
		(fb_niso_intr_mmu_other_fault_notify_m() |
		fb_niso_intr_mmu_replayable_fault_notify_m() |
		fb_niso_intr_mmu_replayable_fault_overflow_m() |
		fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
		fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) {

		nvgpu_info(g, "MMU Fault");
		gv11b_fb_handle_mmu_fault(g, niso_intr);
	}

	nvgpu_mutex_release(&g->mm.hub_isr_mutex);
}

bool gv11b_fb_mmu_fault_pending(struct gk20a *g)
{
	if (gk20a_readl(g, fb_niso_intr_r()) &
		(fb_niso_intr_mmu_other_fault_notify_m() |
		 fb_niso_intr_mmu_ecc_uncorrected_error_notify_m() |
		 fb_niso_intr_mmu_replayable_fault_notify_m() |
		 fb_niso_intr_mmu_replayable_fault_overflow_m() |
		 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
		 fb_niso_intr_mmu_nonreplayable_fault_overflow_m()))
		return true;

	return false;
}

void gv11b_init_fb(struct gpu_ops *gops)
{
	gp10b_init_fb(gops);
	gops->fb.hub_isr = gv11b_fb_hub_isr;
	gops->fb.reset = gv11b_fb_reset;
	gops->fb.init_fs_state = gv11b_fb_init_fs_state;
	gops->fb.init_cbc = gv11b_fb_init_cbc;

	gv11b_init_uncompressed_kind_map();
	gv11b_init_kind_attr();

}