summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2018-06-08 00:01:58 -0400
committerTejal Kudav <tkudav@nvidia.com>2018-06-14 09:44:08 -0400
commit9c5bcbe6f2b71592c3a07d5884b68ad10fdffffd (patch)
tree2d42e8c7de8bca04ea852296e51c2b4b97293002 /drivers/gpu
parent4e66f214fc03f088b13e1f1cdc097df67dd2c062 (diff)
gpu: nvgpu: Add HALs for mmu_fault setup and info
Add below HALs to setup mmu_fault configuration registers and to read information registers and set them on Volta gops.fb.write_mmu_fault_buffer_lo_hi() gops.fb.write_mmu_fault_buffer_get() gops.fb.write_mmu_fault_buffer_size() gops.fb.write_mmu_fault_status() gops.fb.read_mmu_fault_buffer_get() gops.fb.read_mmu_fault_buffer_put() gops.fb.read_mmu_fault_buffer_size() gops.fb.read_mmu_fault_addr_lo_hi() gops.fb.read_mmu_fault_inst_lo_hi() gops.fb.read_mmu_fault_info() gops.fb.read_mmu_fault_status() Jira NVGPUT-13 Change-Id: Ia99568ff905ada3c035efb4565613576012f5bef Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1744063 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a.h16
-rw-r--r--drivers/gpu/nvgpu/gv100/hal_gv100.c17
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c125
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.h17
-rw-r--r--drivers/gpu/nvgpu/gv11b/hal_gv11b.c17
-rw-r--r--drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c17
6 files changed, 177 insertions, 32 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h
index be8917c7..bc4bd682 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/gk20a.h
@@ -528,6 +528,22 @@ struct gpu_ops {
528 unsigned int intr_type); 528 unsigned int intr_type);
529 int (*init_fbpa)(struct gk20a *g); 529 int (*init_fbpa)(struct gk20a *g);
530 void (*fbpa_isr)(struct gk20a *g); 530 void (*fbpa_isr)(struct gk20a *g);
531 void (*write_mmu_fault_buffer_lo_hi)(struct gk20a *g, u32 index,
532 u32 addr_lo, u32 addr_hi);
533 void (*write_mmu_fault_buffer_get)(struct gk20a *g, u32 index,
534 u32 reg_val);
535 void (*write_mmu_fault_buffer_size)(struct gk20a *g, u32 index,
536 u32 reg_val);
537 void (*write_mmu_fault_status)(struct gk20a *g, u32 reg_val);
538 u32 (*read_mmu_fault_buffer_get)(struct gk20a *g, u32 index);
539 u32 (*read_mmu_fault_buffer_put)(struct gk20a *g, u32 index);
540 u32 (*read_mmu_fault_buffer_size)(struct gk20a *g, u32 index);
541 void (*read_mmu_fault_addr_lo_hi)(struct gk20a *g,
542 u32 *addr_lo, u32 *addr_hi);
543 void (*read_mmu_fault_inst_lo_hi)(struct gk20a *g,
544 u32 *inst_lo, u32 *inst_hi);
545 u32 (*read_mmu_fault_info)(struct gk20a *g);
546 u32 (*read_mmu_fault_status)(struct gk20a *g);
531 } fb; 547 } fb;
532 struct { 548 struct {
533 void (*slcg_bus_load_gating_prod)(struct gk20a *g, bool prod); 549 void (*slcg_bus_load_gating_prod)(struct gk20a *g, bool prod);
diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c
index 1d6f59b3..4e1c3fb8 100644
--- a/drivers/gpu/nvgpu/gv100/hal_gv100.c
+++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c
@@ -473,6 +473,23 @@ static const struct gpu_ops gv100_ops = {
473 .enable_nvlink = gv100_fb_enable_nvlink, 473 .enable_nvlink = gv100_fb_enable_nvlink,
474 .enable_hub_intr = gv11b_fb_enable_hub_intr, 474 .enable_hub_intr = gv11b_fb_enable_hub_intr,
475 .disable_hub_intr = gv11b_fb_disable_hub_intr, 475 .disable_hub_intr = gv11b_fb_disable_hub_intr,
476 .write_mmu_fault_buffer_lo_hi =
477 fb_gv11b_write_mmu_fault_buffer_lo_hi,
478 .write_mmu_fault_buffer_get =
479 fb_gv11b_write_mmu_fault_buffer_get,
480 .write_mmu_fault_buffer_size =
481 fb_gv11b_write_mmu_fault_buffer_size,
482 .write_mmu_fault_status = fb_gv11b_write_mmu_fault_status,
483 .read_mmu_fault_buffer_get =
484 fb_gv11b_read_mmu_fault_buffer_get,
485 .read_mmu_fault_buffer_put =
486 fb_gv11b_read_mmu_fault_buffer_put,
487 .read_mmu_fault_buffer_size =
488 fb_gv11b_read_mmu_fault_buffer_size,
489 .read_mmu_fault_addr_lo_hi = fb_gv11b_read_mmu_fault_addr_lo_hi,
490 .read_mmu_fault_inst_lo_hi = fb_gv11b_read_mmu_fault_inst_lo_hi,
491 .read_mmu_fault_info = fb_gv11b_read_mmu_fault_info,
492 .read_mmu_fault_status = fb_gv11b_read_mmu_fault_status,
476 }, 493 },
477 .clock_gating = { 494 .clock_gating = {
478 .slcg_bus_load_gating_prod = 495 .slcg_bus_load_gating_prod =
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
index 18906c01..ce8f5669 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -231,7 +231,7 @@ u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
231{ 231{
232 u32 reg_val; 232 u32 reg_val;
233 233
234 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index)); 234 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
235 return fb_mmu_fault_buffer_size_enable_v(reg_val); 235 return fb_mmu_fault_buffer_size_enable_v(reg_val);
236} 236}
237 237
@@ -242,7 +242,7 @@ static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
242 242
243 nvgpu_log(g, gpu_dbg_intr, "updating get index with = %d", next); 243 nvgpu_log(g, gpu_dbg_intr, "updating get index with = %d", next);
244 244
245 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index)); 245 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
246 reg_val = set_field(reg_val, fb_mmu_fault_buffer_get_ptr_m(), 246 reg_val = set_field(reg_val, fb_mmu_fault_buffer_get_ptr_m(),
247 fb_mmu_fault_buffer_get_ptr_f(next)); 247 fb_mmu_fault_buffer_get_ptr_f(next));
248 248
@@ -252,7 +252,7 @@ static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
252 if (reg_val & fb_mmu_fault_buffer_get_overflow_m()) 252 if (reg_val & fb_mmu_fault_buffer_get_overflow_m())
253 reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f(); 253 reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f();
254 254
255 gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val); 255 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val);
256 256
257 /* make sure get ptr update is visible to everyone to avoid 257 /* make sure get ptr update is visible to everyone to avoid
258 * reading already read entry 258 * reading already read entry
@@ -265,7 +265,7 @@ static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g,
265{ 265{
266 u32 reg_val; 266 u32 reg_val;
267 267
268 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index)); 268 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
269 return fb_mmu_fault_buffer_get_ptr_v(reg_val); 269 return fb_mmu_fault_buffer_get_ptr_v(reg_val);
270} 270}
271 271
@@ -274,7 +274,7 @@ static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g,
274{ 274{
275 u32 reg_val; 275 u32 reg_val;
276 276
277 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_put_r(index)); 277 reg_val = g->ops.fb.read_mmu_fault_buffer_put(g, index);
278 return fb_mmu_fault_buffer_put_ptr_v(reg_val); 278 return fb_mmu_fault_buffer_put_ptr_v(reg_val);
279} 279}
280 280
@@ -283,7 +283,7 @@ static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g,
283{ 283{
284 u32 reg_val; 284 u32 reg_val;
285 285
286 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index)); 286 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
287 return fb_mmu_fault_buffer_size_val_v(reg_val); 287 return fb_mmu_fault_buffer_size_val_v(reg_val);
288} 288}
289 289
@@ -321,14 +321,14 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
321 321
322 nvgpu_log_fn(g, " "); 322 nvgpu_log_fn(g, " ");
323 323
324 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index)); 324 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
325 if (state) { 325 if (state) {
326 if (gv11b_fb_is_fault_buf_enabled(g, index)) { 326 if (gv11b_fb_is_fault_buf_enabled(g, index)) {
327 nvgpu_log_info(g, "fault buffer is already enabled"); 327 nvgpu_log_info(g, "fault buffer is already enabled");
328 } else { 328 } else {
329 reg_val |= fb_mmu_fault_buffer_size_enable_true_f(); 329 reg_val |= fb_mmu_fault_buffer_size_enable_true_f();
330 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index), 330 g->ops.fb.write_mmu_fault_buffer_size(g, index,
331 reg_val); 331 reg_val);
332 } 332 }
333 333
334 } else { 334 } else {
@@ -339,9 +339,9 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
339 NVGPU_TIMER_CPU_TIMER); 339 NVGPU_TIMER_CPU_TIMER);
340 340
341 reg_val &= (~(fb_mmu_fault_buffer_size_enable_m())); 341 reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));
342 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index), reg_val); 342 g->ops.fb.write_mmu_fault_buffer_size(g, index, reg_val);
343 343
344 fault_status = gk20a_readl(g, fb_mmu_fault_status_r()); 344 fault_status = g->ops.fb.read_mmu_fault_status(g);
345 345
346 do { 346 do {
347 if (!(fault_status & fb_mmu_fault_status_busy_true_f())) 347 if (!(fault_status & fb_mmu_fault_status_busy_true_f()))
@@ -352,7 +352,7 @@ void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
352 * during the window BAR2 is being unmapped by s/w 352 * during the window BAR2 is being unmapped by s/w
353 */ 353 */
354 nvgpu_log_info(g, "fault status busy set, check again"); 354 nvgpu_log_info(g, "fault status busy set, check again");
355 fault_status = gk20a_readl(g, fb_mmu_fault_status_r()); 355 fault_status = g->ops.fb.read_mmu_fault_status(g);
356 356
357 nvgpu_usleep_range(delay, delay * 2); 357 nvgpu_usleep_range(delay, delay * 2);
358 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); 358 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
@@ -374,13 +374,11 @@ void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
374 ram_in_base_shift_v()); 374 ram_in_base_shift_v());
375 addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va); 375 addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va);
376 376
377 gk20a_writel(g, fb_mmu_fault_buffer_lo_r(index), 377 g->ops.fb.write_mmu_fault_buffer_lo_hi(g, index,
378 fb_mmu_fault_buffer_lo_addr_f(addr_lo)); 378 fb_mmu_fault_buffer_lo_addr_f(addr_lo),
379 fb_mmu_fault_buffer_hi_addr_f(addr_hi));
379 380
380 gk20a_writel(g, fb_mmu_fault_buffer_hi_r(index), 381 g->ops.fb.write_mmu_fault_buffer_size(g, index,
381 fb_mmu_fault_buffer_hi_addr_f(addr_hi));
382
383 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index),
384 fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) | 382 fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) |
385 fb_mmu_fault_buffer_size_overflow_intr_enable_f()); 383 fb_mmu_fault_buffer_size_overflow_intr_enable_f());
386 384
@@ -1083,11 +1081,11 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
1083 return; 1081 return;
1084 } 1082 }
1085 1083
1086 reg_val = gk20a_readl(g, fb_mmu_fault_inst_lo_r()); 1084 g->ops.fb.read_mmu_fault_inst_lo_hi(g, &reg_val, &addr_hi);
1085
1087 addr_lo = fb_mmu_fault_inst_lo_addr_v(reg_val); 1086 addr_lo = fb_mmu_fault_inst_lo_addr_v(reg_val);
1088 addr_lo = addr_lo << ram_in_base_shift_v(); 1087 addr_lo = addr_lo << ram_in_base_shift_v();
1089 1088
1090 addr_hi = gk20a_readl(g, fb_mmu_fault_inst_hi_r());
1091 addr_hi = fb_mmu_fault_inst_hi_addr_v(addr_hi); 1089 addr_hi = fb_mmu_fault_inst_hi_addr_v(addr_hi);
1092 inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo); 1090 inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo);
1093 1091
@@ -1107,18 +1105,18 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
1107 &mmfault->faulted_engine, &mmfault->faulted_subid, 1105 &mmfault->faulted_engine, &mmfault->faulted_subid,
1108 &mmfault->faulted_pbdma); 1106 &mmfault->faulted_pbdma);
1109 1107
1110 reg_val = gk20a_readl(g, fb_mmu_fault_addr_lo_r()); 1108 g->ops.fb.read_mmu_fault_addr_lo_hi(g, &reg_val, &addr_hi);
1109
1111 addr_lo = fb_mmu_fault_addr_lo_addr_v(reg_val); 1110 addr_lo = fb_mmu_fault_addr_lo_addr_v(reg_val);
1112 addr_lo = addr_lo << ram_in_base_shift_v(); 1111 addr_lo = addr_lo << ram_in_base_shift_v();
1113 1112
1114 mmfault->fault_addr_aperture = 1113 mmfault->fault_addr_aperture =
1115 fb_mmu_fault_addr_lo_phys_aperture_v(reg_val); 1114 fb_mmu_fault_addr_lo_phys_aperture_v(reg_val);
1116 1115
1117 addr_hi = gk20a_readl(g, fb_mmu_fault_addr_hi_r());
1118 addr_hi = fb_mmu_fault_addr_hi_addr_v(addr_hi); 1116 addr_hi = fb_mmu_fault_addr_hi_addr_v(addr_hi);
1119 mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo); 1117 mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo);
1120 1118
1121 reg_val = gk20a_readl(g, fb_mmu_fault_info_r()); 1119 reg_val = g->ops.fb.read_mmu_fault_info(g);
1122 mmfault->fault_type = fb_mmu_fault_info_fault_type_v(reg_val); 1120 mmfault->fault_type = fb_mmu_fault_info_fault_type_v(reg_val);
1123 mmfault->replayable_fault = 1121 mmfault->replayable_fault =
1124 fb_mmu_fault_info_replayable_fault_v(reg_val); 1122 fb_mmu_fault_info_replayable_fault_v(reg_val);
@@ -1134,7 +1132,7 @@ static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
1134 mmfault->valid = fb_mmu_fault_info_valid_v(reg_val); 1132 mmfault->valid = fb_mmu_fault_info_valid_v(reg_val);
1135 1133
1136 fault_status &= ~(fb_mmu_fault_status_valid_m()); 1134 fault_status &= ~(fb_mmu_fault_status_valid_m());
1137 gk20a_writel(g, fb_mmu_fault_status_r(), fault_status); 1135 g->ops.fb.write_mmu_fault_status(g, fault_status);
1138 1136
1139 gv11b_fb_parse_mmfault(mmfault); 1137 gv11b_fb_parse_mmfault(mmfault);
1140 1138
@@ -1146,7 +1144,7 @@ void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
1146 u32 reg_val; 1144 u32 reg_val;
1147 unsigned int index = REPLAY_REG_INDEX; 1145 unsigned int index = REPLAY_REG_INDEX;
1148 1146
1149 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index)); 1147 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
1150 1148
1151 if (fault_status & 1149 if (fault_status &
1152 fb_mmu_fault_status_replayable_getptr_corrupted_m()) { 1150 fb_mmu_fault_status_replayable_getptr_corrupted_m()) {
@@ -1172,7 +1170,7 @@ void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
1172 fb_mmu_fault_buffer_get_overflow_clear_f()); 1170 fb_mmu_fault_buffer_get_overflow_clear_f());
1173 } 1171 }
1174 1172
1175 gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val); 1173 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val);
1176} 1174}
1177 1175
1178void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g, 1176void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
@@ -1181,7 +1179,7 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
1181 u32 reg_val; 1179 u32 reg_val;
1182 unsigned int index = NONREPLAY_REG_INDEX; 1180 unsigned int index = NONREPLAY_REG_INDEX;
1183 1181
1184 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index)); 1182 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
1185 1183
1186 if (fault_status & 1184 if (fault_status &
1187 fb_mmu_fault_status_non_replayable_getptr_corrupted_m()) { 1185 fb_mmu_fault_status_non_replayable_getptr_corrupted_m()) {
@@ -1208,7 +1206,7 @@ void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
1208 fb_mmu_fault_buffer_get_overflow_clear_f()); 1206 fb_mmu_fault_buffer_get_overflow_clear_f());
1209 } 1207 }
1210 1208
1211 gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val); 1209 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val);
1212} 1210}
1213 1211
1214static void gv11b_fb_handle_bar2_fault(struct gk20a *g, 1212static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
@@ -1285,7 +1283,7 @@ void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status)
1285 if (fault_status & dropped_faults) { 1283 if (fault_status & dropped_faults) {
1286 nvgpu_err(g, "dropped mmu fault (0x%08x)", 1284 nvgpu_err(g, "dropped mmu fault (0x%08x)",
1287 fault_status & dropped_faults); 1285 fault_status & dropped_faults);
1288 gk20a_writel(g, fb_mmu_fault_status_r(), dropped_faults); 1286 g->ops.fb.write_mmu_fault_status(g, dropped_faults);
1289 } 1287 }
1290} 1288}
1291 1289
@@ -1304,7 +1302,7 @@ void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g)
1304 1302
1305static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr) 1303static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
1306{ 1304{
1307 u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r()); 1305 u32 fault_status = g->ops.fb.read_mmu_fault_status(g);
1308 1306
1309 nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status); 1307 nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status);
1310 1308
@@ -1357,8 +1355,8 @@ static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
1357 } 1355 }
1358 1356
1359 nvgpu_log(g, gpu_dbg_intr, "clear mmu fault status"); 1357 nvgpu_log(g, gpu_dbg_intr, "clear mmu fault status");
1360 gk20a_writel(g, fb_mmu_fault_status_r(), 1358 g->ops.fb.write_mmu_fault_status(g,
1361 fb_mmu_fault_status_valid_clear_f()); 1359 fb_mmu_fault_status_valid_clear_f());
1362} 1360}
1363 1361
1364void gv11b_fb_hub_isr(struct gk20a *g) 1362void gv11b_fb_hub_isr(struct gk20a *g)
@@ -1525,3 +1523,66 @@ static int gv11b_fb_fix_page_fault(struct gk20a *g,
1525 pte[1], pte[0]); 1523 pte[1], pte[0]);
1526 return err; 1524 return err;
1527} 1525}
1526
1527void fb_gv11b_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
1528 u32 addr_lo, u32 addr_hi)
1529{
1530 nvgpu_writel(g, fb_mmu_fault_buffer_lo_r(index), addr_lo);
1531 nvgpu_writel(g, fb_mmu_fault_buffer_hi_r(index), addr_hi);
1532}
1533
1534u32 fb_gv11b_read_mmu_fault_buffer_get(struct gk20a *g, u32 index)
1535{
1536 return nvgpu_readl(g, fb_mmu_fault_buffer_get_r(index));
1537}
1538
1539void fb_gv11b_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
1540 u32 reg_val)
1541{
1542 nvgpu_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val);
1543}
1544
1545u32 fb_gv11b_read_mmu_fault_buffer_put(struct gk20a *g, u32 index)
1546{
1547 return nvgpu_readl(g, fb_mmu_fault_buffer_put_r(index));
1548}
1549
1550u32 fb_gv11b_read_mmu_fault_buffer_size(struct gk20a *g, u32 index)
1551{
1552 return nvgpu_readl(g, fb_mmu_fault_buffer_size_r(index));
1553}
1554
1555void fb_gv11b_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
1556 u32 reg_val)
1557{
1558 nvgpu_writel(g, fb_mmu_fault_buffer_size_r(index), reg_val);
1559}
1560
1561void fb_gv11b_read_mmu_fault_addr_lo_hi(struct gk20a *g,
1562 u32 *addr_lo, u32 *addr_hi)
1563{
1564 *addr_lo = nvgpu_readl(g, fb_mmu_fault_addr_lo_r());
1565 *addr_hi = nvgpu_readl(g, fb_mmu_fault_addr_hi_r());
1566}
1567
1568void fb_gv11b_read_mmu_fault_inst_lo_hi(struct gk20a *g,
1569 u32 *inst_lo, u32 *inst_hi)
1570{
1571 *inst_lo = nvgpu_readl(g, fb_mmu_fault_inst_lo_r());
1572 *inst_hi = nvgpu_readl(g, fb_mmu_fault_inst_hi_r());
1573}
1574
1575u32 fb_gv11b_read_mmu_fault_info(struct gk20a *g)
1576{
1577 return nvgpu_readl(g, fb_mmu_fault_info_r());
1578}
1579
1580u32 fb_gv11b_read_mmu_fault_status(struct gk20a *g)
1581{
1582 return nvgpu_readl(g, fb_mmu_fault_status_r());
1583}
1584
1585void fb_gv11b_write_mmu_fault_status(struct gk20a *g, u32 reg_val)
1586{
1587 nvgpu_writel(g, fb_mmu_fault_status_r(), reg_val);
1588}
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
index 6f809b89..fef9578a 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
@@ -82,4 +82,21 @@ void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status);
82void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status); 82void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status);
83void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status); 83void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status);
84 84
85void fb_gv11b_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
86 u32 addr_lo, u32 addr_hi);
87u32 fb_gv11b_read_mmu_fault_buffer_get(struct gk20a *g, u32 index);
88void fb_gv11b_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
89 u32 reg_val);
90u32 fb_gv11b_read_mmu_fault_buffer_put(struct gk20a *g, u32 index);
91u32 fb_gv11b_read_mmu_fault_buffer_size(struct gk20a *g, u32 index);
92void fb_gv11b_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
93 u32 reg_val);
94void fb_gv11b_read_mmu_fault_addr_lo_hi(struct gk20a *g,
95 u32 *addr_lo, u32 *addr_hi);
96void fb_gv11b_read_mmu_fault_inst_lo_hi(struct gk20a *g,
97 u32 *inst_lo, u32 *inst_hi);
98u32 fb_gv11b_read_mmu_fault_info(struct gk20a *g);
99u32 fb_gv11b_read_mmu_fault_status(struct gk20a *g);
100void fb_gv11b_write_mmu_fault_status(struct gk20a *g, u32 reg_val);
101
85#endif 102#endif
diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
index 9d7dca95..ab9ff707 100644
--- a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c
@@ -438,6 +438,23 @@ static const struct gpu_ops gv11b_ops = {
438 .mem_unlock = NULL, 438 .mem_unlock = NULL,
439 .enable_hub_intr = gv11b_fb_enable_hub_intr, 439 .enable_hub_intr = gv11b_fb_enable_hub_intr,
440 .disable_hub_intr = gv11b_fb_disable_hub_intr, 440 .disable_hub_intr = gv11b_fb_disable_hub_intr,
441 .write_mmu_fault_buffer_lo_hi =
442 fb_gv11b_write_mmu_fault_buffer_lo_hi,
443 .write_mmu_fault_buffer_get =
444 fb_gv11b_write_mmu_fault_buffer_get,
445 .write_mmu_fault_buffer_size =
446 fb_gv11b_write_mmu_fault_buffer_size,
447 .write_mmu_fault_status = fb_gv11b_write_mmu_fault_status,
448 .read_mmu_fault_buffer_get =
449 fb_gv11b_read_mmu_fault_buffer_get,
450 .read_mmu_fault_buffer_put =
451 fb_gv11b_read_mmu_fault_buffer_put,
452 .read_mmu_fault_buffer_size =
453 fb_gv11b_read_mmu_fault_buffer_size,
454 .read_mmu_fault_addr_lo_hi = fb_gv11b_read_mmu_fault_addr_lo_hi,
455 .read_mmu_fault_inst_lo_hi = fb_gv11b_read_mmu_fault_inst_lo_hi,
456 .read_mmu_fault_info = fb_gv11b_read_mmu_fault_info,
457 .read_mmu_fault_status = fb_gv11b_read_mmu_fault_status,
441 }, 458 },
442 .clock_gating = { 459 .clock_gating = {
443 .slcg_bus_load_gating_prod = 460 .slcg_bus_load_gating_prod =
diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c
index 1a627d3f..09ce2492 100644
--- a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c
+++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c
@@ -291,6 +291,23 @@ static const struct gpu_ops vgpu_gv11b_ops = {
291 .hub_isr = gv11b_fb_hub_isr, 291 .hub_isr = gv11b_fb_hub_isr,
292 .enable_hub_intr = gv11b_fb_enable_hub_intr, 292 .enable_hub_intr = gv11b_fb_enable_hub_intr,
293 .disable_hub_intr = gv11b_fb_disable_hub_intr, 293 .disable_hub_intr = gv11b_fb_disable_hub_intr,
294 .write_mmu_fault_buffer_lo_hi =
295 fb_gv11b_write_mmu_fault_buffer_lo_hi,
296 .write_mmu_fault_buffer_get =
297 fb_gv11b_write_mmu_fault_buffer_get,
298 .write_mmu_fault_buffer_size =
299 fb_gv11b_write_mmu_fault_buffer_size,
300 .write_mmu_fault_status = fb_gv11b_write_mmu_fault_status,
301 .read_mmu_fault_buffer_get =
302 fb_gv11b_read_mmu_fault_buffer_get,
303 .read_mmu_fault_buffer_put =
304 fb_gv11b_read_mmu_fault_buffer_put,
305 .read_mmu_fault_buffer_size =
306 fb_gv11b_read_mmu_fault_buffer_size,
307 .read_mmu_fault_addr_lo_hi = fb_gv11b_read_mmu_fault_addr_lo_hi,
308 .read_mmu_fault_inst_lo_hi = fb_gv11b_read_mmu_fault_inst_lo_hi,
309 .read_mmu_fault_info = fb_gv11b_read_mmu_fault_info,
310 .read_mmu_fault_status = fb_gv11b_read_mmu_fault_status,
294 }, 311 },
295 .clock_gating = { 312 .clock_gating = {
296 .slcg_bus_load_gating_prod = 313 .slcg_bus_load_gating_prod =