summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h12
-rw-r--r--drivers/gpu/nvgpu/gv100/hal_gv100.c4
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c75
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.h18
-rw-r--r--drivers/gpu/nvgpu/gv11b/hal_gv11b.c4
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.c46
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/mm.h3
7 files changed, 92 insertions, 70 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index b59baa78..4ff85ee3 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -162,6 +162,13 @@ struct nvgpu_gpfifo_userdata {
162 * should go in struct gk20a or be implemented with the boolean flag API defined 162 * should go in struct gk20a or be implemented with the boolean flag API defined
163 * in nvgpu/enabled.h 163 * in nvgpu/enabled.h
164 */ 164 */
165
166/* index for FB fault buffer functions */
167#define NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX 0U
168#define NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX 1U
169#define NVGPU_FB_MMU_FAULT_BUF_DISABLED 0U
170#define NVGPU_FB_MMU_FAULT_BUF_ENABLED 1U
171
165struct gpu_ops { 172struct gpu_ops {
166 struct { 173 struct {
167 int (*determine_L2_size_bytes)(struct gk20a *gk20a); 174 int (*determine_L2_size_bytes)(struct gk20a *gk20a);
@@ -564,6 +571,11 @@ struct gpu_ops {
564 u32 (*read_mmu_fault_status)(struct gk20a *g); 571 u32 (*read_mmu_fault_status)(struct gk20a *g);
565 int (*mmu_invalidate_replay)(struct gk20a *g, 572 int (*mmu_invalidate_replay)(struct gk20a *g,
566 u32 invalidate_replay_val); 573 u32 invalidate_replay_val);
574 bool (*mmu_fault_pending)(struct gk20a *g);
575 bool (*is_fault_buf_enabled)(struct gk20a *g, u32 index);
576 void (*fault_buf_set_state_hw)(struct gk20a *g,
577 u32 index, u32 state);
578 void (*fault_buf_configure_hw)(struct gk20a *g, u32 index);
567 } fb; 579 } fb;
568 struct { 580 struct {
569 void (*slcg_bus_load_gating_prod)(struct gk20a *g, bool prod); 581 void (*slcg_bus_load_gating_prod)(struct gk20a *g, bool prod);
diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c
index ea131822..9c3eb91a 100644
--- a/drivers/gpu/nvgpu/gv100/hal_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c
@@ -501,6 +501,10 @@ static const struct gpu_ops gv100_ops = {
501 .read_mmu_fault_info = fb_gv11b_read_mmu_fault_info, 501 .read_mmu_fault_info = fb_gv11b_read_mmu_fault_info,
502 .read_mmu_fault_status = fb_gv11b_read_mmu_fault_status, 502 .read_mmu_fault_status = fb_gv11b_read_mmu_fault_status,
503 .mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay, 503 .mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay,
504 .mmu_fault_pending = gv11b_fb_mmu_fault_pending,
505 .is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
506 .fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
507 .fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,
504 }, 508 },
505 .clock_gating = { 509 .clock_gating = {
506 .slcg_bus_load_gating_prod = 510 .slcg_bus_load_gating_prod =
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
index 2c2c4898..c0fcf051 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -223,17 +223,16 @@ static const char *const gpc_client_descs_gv11b[] = {
223 "t1 36", "t1 37", "t1 38", "t1 39", 223 "t1 36", "t1 37", "t1 38", "t1 39",
224}; 224};
225 225
226u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g, 226bool gv11b_fb_is_fault_buf_enabled(struct gk20a *g, u32 index)
227 unsigned int index)
228{ 227{
229 u32 reg_val; 228 u32 reg_val;
230 229
231 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index); 230 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
232 return fb_mmu_fault_buffer_size_enable_v(reg_val); 231 return fb_mmu_fault_buffer_size_enable_v(reg_val) != 0U;
233} 232}
234 233
235static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g, 234static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
236 unsigned int index, u32 next) 235 u32 index, u32 next)
237{ 236{
238 u32 reg_val; 237 u32 reg_val;
239 238
@@ -257,8 +256,7 @@ static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
257 nvgpu_mb(); 256 nvgpu_mb();
258} 257}
259 258
260static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g, 259static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g, u32 index)
261 unsigned int index)
262{ 260{
263 u32 reg_val; 261 u32 reg_val;
264 262
@@ -266,8 +264,7 @@ static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g,
266 return fb_mmu_fault_buffer_get_ptr_v(reg_val); 264 return fb_mmu_fault_buffer_get_ptr_v(reg_val);
267} 265}
268 266
269static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g, 267static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g, u32 index)
270 unsigned int index)
271{ 268{
272 u32 reg_val; 269 u32 reg_val;
273 270
@@ -275,8 +272,7 @@ static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g,
275 return fb_mmu_fault_buffer_put_ptr_v(reg_val); 272 return fb_mmu_fault_buffer_put_ptr_v(reg_val);
276} 273}
277 274
278static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g, 275static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g, u32 index)
279 unsigned int index)
280{ 276{
281 u32 reg_val; 277 u32 reg_val;
282 278
@@ -285,7 +281,7 @@ static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g,
285} 281}
286 282
287static bool gv11b_fb_is_fault_buffer_empty(struct gk20a *g, 283static bool gv11b_fb_is_fault_buffer_empty(struct gk20a *g,
288 unsigned int index, u32 *get_idx) 284 u32 index, u32 *get_idx)
289{ 285{
290 u32 put_idx; 286 u32 put_idx;
291 287
@@ -295,8 +291,7 @@ static bool gv11b_fb_is_fault_buffer_empty(struct gk20a *g,
295 return *get_idx == put_idx; 291 return *get_idx == put_idx;
296} 292}
297 293
298static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g, 294static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g, u32 index)
299 unsigned int index)
300{ 295{
301 u32 get_idx, put_idx, entries; 296 u32 get_idx, put_idx, entries;
302 297
@@ -311,7 +306,7 @@ static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g,
311} 306}
312 307
313void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g, 308void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
314 unsigned int index, unsigned int state) 309 u32 index, u32 state)
315{ 310{
316 u32 fault_status; 311 u32 fault_status;
317 u32 reg_val; 312 u32 reg_val;
@@ -319,7 +314,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
319 nvgpu_log_fn(g, " "); 314 nvgpu_log_fn(g, " ");
320 315
321 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index); 316 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
322 if (state) { 317 if (state == NVGPU_FB_MMU_FAULT_BUF_ENABLED) {
323 if (gv11b_fb_is_fault_buf_enabled(g, index)) { 318 if (gv11b_fb_is_fault_buf_enabled(g, index)) {
324 nvgpu_log_info(g, "fault buffer is already enabled"); 319 nvgpu_log_info(g, "fault buffer is already enabled");
325 } else { 320 } else {
@@ -358,7 +353,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
358 } 353 }
359} 354}
360 355
361void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index) 356void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index)
362{ 357{
363 u32 addr_lo; 358 u32 addr_lo;
364 u32 addr_hi; 359 u32 addr_hi;
@@ -366,7 +361,7 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
366 nvgpu_log_fn(g, " "); 361 nvgpu_log_fn(g, " ");
367 362
368 gv11b_fb_fault_buf_set_state_hw(g, index, 363 gv11b_fb_fault_buf_set_state_hw(g, index,
369 FAULT_BUF_DISABLED); 364 NVGPU_FB_MMU_FAULT_BUF_DISABLED);
370 addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >> 365 addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >>
371 ram_in_base_shift_v()); 366 ram_in_base_shift_v());
372 addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va); 367 addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va);
@@ -379,7 +374,7 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
379 fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) | 374 fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) |
380 fb_mmu_fault_buffer_size_overflow_intr_enable_f()); 375 fb_mmu_fault_buffer_size_overflow_intr_enable_f());
381 376
382 gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED); 377 gv11b_fb_fault_buf_set_state_hw(g, index, NVGPU_FB_MMU_FAULT_BUF_ENABLED);
383} 378}
384 379
385void gv11b_fb_enable_hub_intr(struct gk20a *g) 380void gv11b_fb_enable_hub_intr(struct gk20a *g)
@@ -929,7 +924,7 @@ static int gv11b_fb_replay_or_cancel_faults(struct gk20a *g,
929} 924}
930 925
931void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g, 926void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
932 u32 fault_status, unsigned int index) 927 u32 fault_status, u32 index)
933{ 928{
934 u32 get_indx, offset, rd32_val, entries; 929 u32 get_indx, offset, rd32_val, entries;
935 struct nvgpu_mem *mem; 930 struct nvgpu_mem *mem;
@@ -944,7 +939,8 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
944 return; 939 return;
945 } 940 }
946 nvgpu_log(g, gpu_dbg_intr, "%s MMU FAULT" , 941 nvgpu_log(g, gpu_dbg_intr, "%s MMU FAULT" ,
947 index == REPLAY_REG_INDEX ? "REPLAY" : "NON-REPLAY"); 942 index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX ?
943 "REPLAY" : "NON-REPLAY");
948 944
949 nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx); 945 nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx);
950 946
@@ -978,7 +974,8 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
978 rd32_val = nvgpu_mem_rd32(g, mem, 974 rd32_val = nvgpu_mem_rd32(g, mem,
979 offset + gmmu_fault_buf_entry_valid_w()); 975 offset + gmmu_fault_buf_entry_valid_w());
980 976
981 if (index == REPLAY_REG_INDEX && mmfault->fault_addr != 0ULL) { 977 if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
978 mmfault->fault_addr != 0ULL) {
982 /* fault_addr "0" is not supposed to be fixed ever. 979 /* fault_addr "0" is not supposed to be fixed ever.
983 * For the first time when prev = 0, next = 0 and 980 * For the first time when prev = 0, next = 0 and
984 * fault addr is also 0 then handle_mmu_fault_common will 981 * fault addr is also 0 then handle_mmu_fault_common will
@@ -998,7 +995,8 @@ void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
998 &invalidate_replay_val); 995 &invalidate_replay_val);
999 996
1000 } 997 }
1001 if (index == REPLAY_REG_INDEX && invalidate_replay_val) 998 if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
999 invalidate_replay_val != 0U)
1002 gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val); 1000 gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val);
1003} 1001}
1004 1002
@@ -1080,7 +1078,7 @@ void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
1080 u32 fault_status) 1078 u32 fault_status)
1081{ 1079{
1082 u32 reg_val; 1080 u32 reg_val;
1083 unsigned int index = REPLAY_REG_INDEX; 1081 u32 index = NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX;
1084 1082
1085 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index); 1083 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
1086 1084
@@ -1115,7 +1113,7 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
1115 u32 fault_status) 1113 u32 fault_status)
1116{ 1114{
1117 u32 reg_val; 1115 u32 reg_val;
1118 unsigned int index = NONREPLAY_REG_INDEX; 1116 u32 index = NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX;
1119 1117
1120 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index); 1118 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
1121 1119
@@ -1151,13 +1149,16 @@ static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
1151 struct mmu_fault_info *mmfault, u32 fault_status) 1149 struct mmu_fault_info *mmfault, u32 fault_status)
1152{ 1150{
1153 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) { 1151 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
1154 if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) 1152 if (gv11b_fb_is_fault_buf_enabled(g,
1155 gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX); 1153 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX))
1154 gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
1156 } 1155 }
1157 1156
1158 if (fault_status & fb_mmu_fault_status_replayable_error_m()) { 1157 if (fault_status & fb_mmu_fault_status_replayable_error_m()) {
1159 if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX)) 1158 if (gv11b_fb_is_fault_buf_enabled(g,
1160 gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX); 1159 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX))
1160 gv11b_fb_fault_buf_configure_hw(g,
1161 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
1161 } 1162 }
1162 gv11b_ce_mthd_buffer_fault_in_bar2_fault(g); 1163 gv11b_ce_mthd_buffer_fault_in_bar2_fault(g);
1163 1164
@@ -1175,7 +1176,7 @@ void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
1175 struct mmu_fault_info *mmfault; 1176 struct mmu_fault_info *mmfault;
1176 u32 invalidate_replay_val = 0; 1177 u32 invalidate_replay_val = 0;
1177 1178
1178 mmfault = &g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]; 1179 mmfault = &g->mm.fault_info[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY];
1179 1180
1180 gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault); 1181 gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault);
1181 1182
@@ -1226,9 +1227,11 @@ void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g)
1226 if (!(fault_status & fb_mmu_fault_status_replayable_m())) 1227 if (!(fault_status & fb_mmu_fault_status_replayable_m()))
1227 return; 1228 return;
1228 1229
1229 if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) { 1230 if (gv11b_fb_is_fault_buf_enabled(g,
1231 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
1230 gv11b_fb_handle_mmu_nonreplay_replay_fault(g, 1232 gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
1231 fault_status, REPLAY_REG_INDEX); 1233 fault_status,
1234 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
1232 } 1235 }
1233} 1236}
1234 1237
@@ -1246,13 +1249,14 @@ static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
1246 gv11b_fb_handle_other_fault_notify(g, fault_status); 1249 gv11b_fb_handle_other_fault_notify(g, fault_status);
1247 } 1250 }
1248 1251
1249 if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) { 1252 if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
1250 1253
1251 if (niso_intr & 1254 if (niso_intr &
1252 fb_niso_intr_mmu_nonreplayable_fault_notify_m()) { 1255 fb_niso_intr_mmu_nonreplayable_fault_notify_m()) {
1253 1256
1254 gv11b_fb_handle_mmu_nonreplay_replay_fault(g, 1257 gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
1255 fault_status, NONREPLAY_REG_INDEX); 1258 fault_status,
1259 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
1256 1260
1257 /* 1261 /*
1258 * When all the faults are processed, 1262 * When all the faults are processed,
@@ -1269,13 +1273,14 @@ static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
1269 1273
1270 } 1274 }
1271 1275
1272 if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX)) { 1276 if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
1273 1277
1274 if (niso_intr & 1278 if (niso_intr &
1275 fb_niso_intr_mmu_replayable_fault_notify_m()) { 1279 fb_niso_intr_mmu_replayable_fault_notify_m()) {
1276 1280
1277 gv11b_fb_handle_mmu_nonreplay_replay_fault(g, 1281 gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
1278 fault_status, REPLAY_REG_INDEX); 1282 fault_status,
1283 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
1279 } 1284 }
1280 if (niso_intr & 1285 if (niso_intr &
1281 fb_niso_intr_mmu_replayable_fault_overflow_m()) { 1286 fb_niso_intr_mmu_replayable_fault_overflow_m()) {
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
index 34988bd2..fe2c733d 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
@@ -28,15 +28,6 @@
28#define NONREPLAY_REG_INDEX 0 28#define NONREPLAY_REG_INDEX 0
29#define REPLAY_REG_INDEX 1 29#define REPLAY_REG_INDEX 1
30 30
31#define FAULT_BUF_DISABLED 0
32#define FAULT_BUF_ENABLED 1
33
34#define FAULT_BUF_INVALID 0
35#define FAULT_BUF_VALID 1
36
37#define FAULT_TYPE_OTHER_AND_NONREPLAY 0
38#define FAULT_TYPE_REPLAY 1
39
40struct gk20a; 31struct gk20a;
41 32
42void gv11b_fb_init_hw(struct gk20a *g); 33void gv11b_fb_init_hw(struct gk20a *g);
@@ -46,11 +37,10 @@ void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr);
46void gv11b_fb_reset(struct gk20a *g); 37void gv11b_fb_reset(struct gk20a *g);
47void gv11b_fb_hub_isr(struct gk20a *g); 38void gv11b_fb_hub_isr(struct gk20a *g);
48 39
49u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g, 40bool gv11b_fb_is_fault_buf_enabled(struct gk20a *g, u32 index );
50 unsigned int index);
51void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g, 41void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
52 unsigned int index, unsigned int state); 42 u32 index, u32 state);
53void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index); 43void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index);
54void gv11b_fb_enable_hub_intr(struct gk20a *g); 44void gv11b_fb_enable_hub_intr(struct gk20a *g);
55void gv11b_fb_disable_hub_intr(struct gk20a *g); 45void gv11b_fb_disable_hub_intr(struct gk20a *g);
56bool gv11b_fb_mmu_fault_pending(struct gk20a *g); 46bool gv11b_fb_mmu_fault_pending(struct gk20a *g);
@@ -58,7 +48,7 @@ void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status);
58void gv11b_fb_handle_other_fault_notify(struct gk20a *g, 48void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
59 u32 fault_status); 49 u32 fault_status);
60void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g, 50void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
61 u32 fault_status, unsigned int index); 51 u32 fault_status, u32 index);
62void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g, 52void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
63 u32 fault_status); 53 u32 fault_status);
64void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g, 54void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
index 2bd35f0c..09f74042 100644
--- a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
@@ -465,6 +465,10 @@ static const struct gpu_ops gv11b_ops = {
465 .read_mmu_fault_info = fb_gv11b_read_mmu_fault_info, 465 .read_mmu_fault_info = fb_gv11b_read_mmu_fault_info,
466 .read_mmu_fault_status = fb_gv11b_read_mmu_fault_status, 466 .read_mmu_fault_status = fb_gv11b_read_mmu_fault_status,
467 .mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay, 467 .mmu_invalidate_replay = gv11b_fb_mmu_invalidate_replay,
468 .mmu_fault_pending = gv11b_fb_mmu_fault_pending,
469 .is_fault_buf_enabled = gv11b_fb_is_fault_buf_enabled,
470 .fault_buf_set_state_hw = gv11b_fb_fault_buf_set_state_hw,
471 .fault_buf_configure_hw = gv11b_fb_fault_buf_configure_hw,
468 }, 472 },
469 .clock_gating = { 473 .clock_gating = {
470 .slcg_bus_load_gating_prod = 474 .slcg_bus_load_gating_prod =
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
index 394ff0ed..c7556394 100644
--- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
@@ -35,10 +35,8 @@
35#include "gp10b/mc_gp10b.h" 35#include "gp10b/mc_gp10b.h"
36 36
37#include "mm_gv11b.h" 37#include "mm_gv11b.h"
38#include "fb_gv11b.h"
39#include "subctx_gv11b.h" 38#include "subctx_gv11b.h"
40 39
41#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
42#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h> 40#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
43 41
44#define NVGPU_L3_ALLOC_BIT BIT(36) 42#define NVGPU_L3_ALLOC_BIT BIT(36)
@@ -66,7 +64,7 @@ void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
66 64
67bool gv11b_mm_mmu_fault_pending(struct gk20a *g) 65bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
68{ 66{
69 return gv11b_fb_mmu_fault_pending(g); 67 return g->ops.fb.mmu_fault_pending(g);
70} 68}
71 69
72void gv11b_mm_fault_info_mem_destroy(struct gk20a *g) 70void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
@@ -79,23 +77,27 @@ void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
79 77
80 g->ops.fb.disable_hub_intr(g); 78 g->ops.fb.disable_hub_intr(g);
81 79
82 if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) { 80 if ((g->ops.fb.is_fault_buf_enabled(g,
83 gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX, 81 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX))) {
84 FAULT_BUF_DISABLED); 82 g->ops.fb.fault_buf_set_state_hw(g,
83 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX,
84 NVGPU_FB_MMU_FAULT_BUF_DISABLED);
85 } 85 }
86 86
87 if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) { 87 if ((g->ops.fb.is_fault_buf_enabled(g,
88 gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX, 88 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX))) {
89 FAULT_BUF_DISABLED); 89 g->ops.fb.fault_buf_set_state_hw(g,
90 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX,
91 NVGPU_FB_MMU_FAULT_BUF_DISABLED);
90 } 92 }
91 93
92 if (nvgpu_mem_is_valid( 94 if (nvgpu_mem_is_valid(
93 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY])) 95 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]))
94 nvgpu_dma_unmap_free(vm, 96 nvgpu_dma_unmap_free(vm,
95 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]); 97 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]);
96 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) 98 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]))
97 nvgpu_dma_unmap_free(vm, 99 nvgpu_dma_unmap_free(vm,
98 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]); 100 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]);
99 101
100 nvgpu_mutex_release(&g->mm.hub_isr_mutex); 102 nvgpu_mutex_release(&g->mm.hub_isr_mutex);
101 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); 103 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
@@ -117,10 +119,10 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
117 gmmu_fault_buf_size_v(); 119 gmmu_fault_buf_size_v();
118 120
119 if (!nvgpu_mem_is_valid( 121 if (!nvgpu_mem_is_valid(
120 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY])) { 122 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY])) {
121 123
122 err = nvgpu_dma_alloc_map_sys(vm, fb_size, 124 err = nvgpu_dma_alloc_map_sys(vm, fb_size,
123 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]); 125 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]);
124 if (err) { 126 if (err) {
125 nvgpu_err(g, 127 nvgpu_err(g,
126 "Error in hw mmu fault buf [0] alloc in bar2 vm "); 128 "Error in hw mmu fault buf [0] alloc in bar2 vm ");
@@ -130,9 +132,9 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
130 } 132 }
131 133
132 if (!nvgpu_mem_is_valid( 134 if (!nvgpu_mem_is_valid(
133 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) { 135 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY])) {
134 err = nvgpu_dma_alloc_map_sys(vm, fb_size, 136 err = nvgpu_dma_alloc_map_sys(vm, fb_size,
135 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]); 137 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]);
136 if (err) { 138 if (err) {
137 nvgpu_err(g, 139 nvgpu_err(g,
138 "Error in hw mmu fault buf [1] alloc in bar2 vm "); 140 "Error in hw mmu fault buf [1] alloc in bar2 vm ");
@@ -145,10 +147,12 @@ static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g)
145static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g) 147static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
146{ 148{
147 if (nvgpu_mem_is_valid( 149 if (nvgpu_mem_is_valid(
148 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY])) 150 &g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY]))
149 gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX); 151 g->ops.fb.fault_buf_configure_hw(g,
150 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[FAULT_TYPE_REPLAY])) 152 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
151 gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX); 153 if (nvgpu_mem_is_valid(&g->mm.hw_fault_buf[NVGPU_MM_MMU_FAULT_TYPE_REPLAY]))
154 g->ops.fb.fault_buf_configure_hw(g,
155 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
152} 156}
153 157
154static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) 158static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
diff --git a/drivers/gpu/nvgpu/include/nvgpu/mm.h b/drivers/gpu/nvgpu/include/nvgpu/mm.h
index b06d0361..033e2548 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/mm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/mm.h
@@ -38,6 +38,9 @@ struct vm_gk20a;
38struct nvgpu_mem; 38struct nvgpu_mem;
39struct nvgpu_pd_cache; 39struct nvgpu_pd_cache;
40 40
41#define NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY 0
42#define NVGPU_MM_MMU_FAULT_TYPE_REPLAY 1
43
41#define FAULT_TYPE_NUM 2 /* replay and nonreplay faults */ 44#define FAULT_TYPE_NUM 2 /* replay and nonreplay faults */
42 45
43struct mmu_fault_info { 46struct mmu_fault_info {