summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorSeema Khowala <seemaj@nvidia.com>2017-06-02 12:58:23 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-07-08 14:35:48 -0400
commitaa05648fd6038b69d1ed841f33b24cf1875efd83 (patch)
tree8e9ee6c0f7b937f85b2caa4db70aebcd1e619944 /drivers/gpu
parentcf33b6c26bd054f5fe09be78ed754049821a8737 (diff)
gpu: nvgpu: gv11b: set up for enabling/handling hub intr
-implement mm ops init_mm_setup_hw This will also call *fault*setup* that will do s/w and h/w set up required to get mmu fault info -implement s/w set up for copying mmu faults Two shadow fault buffers are pre allocated which will be used to copy fault info. One for copying from fault snap registers/nonreplayable h/w fault buffers and one for replay h/w fault buffers -implement s/w set up for buffering mmu faults Replayable/Non-replayable fault buffers are mapped in BAR2 virtual/physical address space. These buffers are circular buffers in terms of address calculation. Currently there are num host channels buffers -configure h/w for buffering mmu faults if s/w set up is successful, configure h/w registers to enable buffered mode of mmu faults -if both s/w and h/w set up are successful, enable corresponding hub interrupts -implement new ops, fault_info_buf_deinit This will be called during gk20a_mm_destroy to disable hub intr and de-allocate shadow fault buf that is used to copy mmu fault info during mmu fault handling -implement mm ops remove_bar2_vm This will also unmap and free fault buffers mapped in BAR2 if fault buffers were allocated JIRA GPUT19X-7 JIRA GPUT19X-12 Change-Id: I53a38eddbb0a50a1f2024600583f2aae1f1fba6d Signed-off-by: Seema Khowala <seemaj@nvidia.com> Reviewed-on: https://git-master/r/1492682 Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c173
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.h12
-rw-r--r--drivers/gpu/nvgpu/gv11b/mc_gv11b.c19
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.c198
-rw-r--r--drivers/gpu/nvgpu/gv11b/mm_gv11b.h5
5 files changed, 379 insertions, 28 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
index 21d8cba3..da7c7d4a 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -15,6 +15,9 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18#include <nvgpu/dma.h>
19#include <nvgpu/log.h>
20
18#include "gk20a/gk20a.h" 21#include "gk20a/gk20a.h"
19#include "gk20a/kind_gk20a.h" 22#include "gk20a/kind_gk20a.h"
20 23
@@ -26,6 +29,7 @@
26#include <nvgpu/hw/gv11b/hw_fb_gv11b.h> 29#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
27#include <nvgpu/hw/gv11b/hw_mc_gv11b.h> 30#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
28#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h> 31#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
32#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
29 33
30#include <nvgpu/log.h> 34#include <nvgpu/log.h>
31#include <nvgpu/enabled.h> 35#include <nvgpu/enabled.h>
@@ -205,6 +209,90 @@ static void gv11b_init_kind_attr(void)
205 } 209 }
206} 210}
207 211
212u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
213 unsigned int index)
214{
215 u32 reg_val;
216
217 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
218 return fb_mmu_fault_buffer_size_enable_v(reg_val);
219}
220
221void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
222 unsigned int index, unsigned int state)
223{
224 u32 fault_status;
225 u32 reg_val;
226
227 nvgpu_log_fn(g, " ");
228
229 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
230 if (state) {
231 if (gv11b_fb_is_fault_buf_enabled(g, index)) {
232 nvgpu_log_info(g, "fault buffer is already enabled");
233 } else {
234 reg_val |= fb_mmu_fault_buffer_size_enable_true_f();
235 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index),
236 reg_val);
237 }
238
239 } else {
240 struct nvgpu_timeout timeout;
241 u32 delay = GR_IDLE_CHECK_DEFAULT;
242
243 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
244 NVGPU_TIMER_CPU_TIMER);
245
246 reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));
247 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index), reg_val);
248
249 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
250
251 do {
252 if (!(fault_status & fb_mmu_fault_status_busy_true_f()))
253 break;
254 /*
255 * Make sure fault buffer is disabled.
256 * This is to avoid accessing fault buffer by hw
257 * during the window BAR2 is being unmapped by s/w
258 */
259 nvgpu_log_info(g, "fault status busy set, check again");
260 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
261
262 nvgpu_usleep_range(delay, delay * 2);
263 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
264 } while (!nvgpu_timeout_expired_msg(&timeout,
265 "fault status busy set"));
266 }
267}
268
269void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
270{
271 u32 addr_lo;
272 u32 addr_hi;
273
274 nvgpu_log_fn(g, " ");
275
276 gv11b_fb_fault_buf_set_state_hw(g, index,
277 FAULT_BUF_DISABLED);
278
279 addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >>
280 ram_in_base_shift_v());
281 addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va);
282
283 gk20a_writel(g, fb_mmu_fault_buffer_lo_r(index),
284 fb_mmu_fault_buffer_lo_addr_f(addr_lo));
285
286 gk20a_writel(g, fb_mmu_fault_buffer_hi_r(index),
287 fb_mmu_fault_buffer_hi_addr_f(addr_hi));
288
289 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index),
290 fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) |
291 fb_mmu_fault_buffer_size_overflow_intr_enable_f());
292
293 gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED);
294}
295
208static void gv11b_fb_intr_en_set(struct gk20a *g, 296static void gv11b_fb_intr_en_set(struct gk20a *g,
209 unsigned int index, u32 mask) 297 unsigned int index, u32 mask)
210{ 298{
@@ -230,15 +318,32 @@ static u32 gv11b_fb_get_hub_intr_clr_mask(struct gk20a *g,
230{ 318{
231 u32 mask = 0; 319 u32 mask = 0;
232 320
233 if (intr_type == HUB_INTR_TYPE_ALL) { 321 if (intr_type & HUB_INTR_TYPE_OTHER) {
322 mask |=
323 fb_niso_intr_en_clr_mmu_other_fault_notify_m();
324 }
325
326 if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
327 mask |=
328 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m() |
329 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m();
330 }
331
332 if (intr_type & HUB_INTR_TYPE_REPLAY) {
234 mask |= 333 mask |=
235 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_set_f(); 334 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m() |
236 return mask; 335 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m();
237 } 336 }
238 337
239 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) { 338 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
240 mask |= 339 mask |=
241 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_set_f(); 340 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m();
341 }
342
343 if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
344 mask |=
345 fb_niso_intr_en_clr_hub_access_counter_notify_m() |
346 fb_niso_intr_en_clr_hub_access_counter_error_m();
242 } 347 }
243 348
244 return mask; 349 return mask;
@@ -249,15 +354,32 @@ static u32 gv11b_fb_get_hub_intr_en_mask(struct gk20a *g,
249{ 354{
250 u32 mask = 0; 355 u32 mask = 0;
251 356
252 if (intr_type == HUB_INTR_TYPE_ALL) { 357 if (intr_type & HUB_INTR_TYPE_OTHER) {
253 mask |= 358 mask |=
254 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_set_f(); 359 fb_niso_intr_en_set_mmu_other_fault_notify_m();
255 return mask; 360 }
361
362 if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
363 mask |=
364 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
365 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m();
366 }
367
368 if (intr_type & HUB_INTR_TYPE_REPLAY) {
369 mask |=
370 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
371 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
256 } 372 }
257 373
258 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) { 374 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
259 mask |= 375 mask |=
260 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_set_f(); 376 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
377 }
378
379 if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
380 mask |=
381 fb_niso_intr_en_set_hub_access_counter_notify_m() |
382 fb_niso_intr_en_set_hub_access_counter_error_m();
261 } 383 }
262 384
263 return mask; 385 return mask;
@@ -469,14 +591,17 @@ static void gv11b_fb_hub_isr(struct gk20a *g)
469 u32 status; 591 u32 status;
470 u32 niso_intr = gk20a_readl(g, fb_niso_intr_r()); 592 u32 niso_intr = gk20a_readl(g, fb_niso_intr_r());
471 593
472 nvgpu_info(g, "enter hub isr, niso_intr = 0x%x", niso_intr); 594 nvgpu_info(g, "enter hub isr, niso_intr = 0x%08x", niso_intr);
595
596 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
473 597
474 if (niso_intr & 598 if (niso_intr &
475 (fb_niso_intr_hub_access_counter_notify_pending_f() | 599 (fb_niso_intr_hub_access_counter_notify_pending_f() |
476 fb_niso_intr_hub_access_counter_error_pending_f())) { 600 fb_niso_intr_hub_access_counter_error_pending_f())) {
477 601
478 nvgpu_info(g, "hub access counter notify/error"); 602 nvgpu_info(g, "hub access counter notify/error");
479 } else if (niso_intr & 603 }
604 if (niso_intr &
480 fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) { 605 fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) {
481 606
482 nvgpu_info(g, "ecc uncorrected error notify"); 607 nvgpu_info(g, "ecc uncorrected error notify");
@@ -501,9 +626,33 @@ static void gv11b_fb_hub_isr(struct gk20a *g)
501 gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX, 626 gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX,
502 HUB_INTR_TYPE_ECC_UNCORRECTED); 627 HUB_INTR_TYPE_ECC_UNCORRECTED);
503 628
504 } else {
505 nvgpu_info(g, "mmu fault : TODO");
506 } 629 }
630 if (niso_intr &
631 (fb_niso_intr_mmu_other_fault_notify_m() |
632 fb_niso_intr_mmu_replayable_fault_notify_m() |
633 fb_niso_intr_mmu_replayable_fault_overflow_m() |
634 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
635 fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) {
636
637 nvgpu_info(g, "mmu fault : No handling in place");
638
639 }
640
641 nvgpu_mutex_release(&g->mm.hub_isr_mutex);
642}
643
644bool gv11b_fb_mmu_fault_pending(struct gk20a *g)
645{
646 if (gk20a_readl(g, fb_niso_intr_r()) &
647 (fb_niso_intr_mmu_other_fault_notify_m() |
648 fb_niso_intr_mmu_ecc_uncorrected_error_notify_m() |
649 fb_niso_intr_mmu_replayable_fault_notify_m() |
650 fb_niso_intr_mmu_replayable_fault_overflow_m() |
651 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
652 fb_niso_intr_mmu_nonreplayable_fault_overflow_m()))
653 return true;
654
655 return false;
507} 656}
508 657
509void gv11b_init_fb(struct gpu_ops *gops) 658void gv11b_init_fb(struct gpu_ops *gops)
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
index eff3c25d..d8f5e145 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h
@@ -40,9 +40,19 @@ struct gpu_ops;
40 HUB_INTR_TYPE_ECC_UNCORRECTED | \ 40 HUB_INTR_TYPE_ECC_UNCORRECTED | \
41 HUB_INTR_TYPE_ACCESS_COUNTER) 41 HUB_INTR_TYPE_ACCESS_COUNTER)
42 42
43#define FAULT_TYPE_OTHER_AND_NONREPLAY 0
44#define FAULT_TYPE_REPLAY 1
45
46void gv11b_init_fb(struct gpu_ops *gops);
47u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
48 unsigned int index);
49void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
50 unsigned int index, unsigned int state);
51void gv11b_fb_nonreplay_fault_buf_configure_hw(struct gk20a *g);
43void gv11b_fb_enable_hub_intr(struct gk20a *g, 52void gv11b_fb_enable_hub_intr(struct gk20a *g,
44 unsigned int index, unsigned int intr_type); 53 unsigned int index, unsigned int intr_type);
45void gv11b_fb_disable_hub_intr(struct gk20a *g, 54void gv11b_fb_disable_hub_intr(struct gk20a *g,
46 unsigned int index, unsigned int intr_type); 55 unsigned int index, unsigned int intr_type);
47void gv11b_init_fb(struct gpu_ops *gops); 56void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index);
57bool gv11b_fb_mmu_fault_pending(struct gk20a *g);
48#endif 58#endif
diff --git a/drivers/gpu/nvgpu/gv11b/mc_gv11b.c b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
index 8b8fcea0..cc29f74a 100644
--- a/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c
@@ -30,6 +30,10 @@ static void mc_gv11b_intr_enable(struct gk20a *g)
30 30
31 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), 31 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING),
32 0xffffffff); 32 0xffffffff);
33 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
34 0xffffffff);
35 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL);
36
33 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING] = 37 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING] =
34 mc_intr_pfifo_pending_f() | 38 mc_intr_pfifo_pending_f() |
35 mc_intr_hub_pending_f() | 39 mc_intr_hub_pending_f() |
@@ -38,20 +42,19 @@ static void mc_gv11b_intr_enable(struct gk20a *g)
38 mc_intr_ltc_pending_f() | 42 mc_intr_ltc_pending_f() |
39 eng_intr_mask; 43 eng_intr_mask;
40 44
41 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
42 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING]);
43
44 gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING),
45 0xffffffff);
46 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] = 45 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] =
47 mc_intr_pfifo_pending_f() 46 mc_intr_pfifo_pending_f()
48 | eng_intr_mask; 47 | eng_intr_mask;
48
49 /* TODO: Enable PRI faults for HUB ECC err intr */
50 gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX, g->mm.hub_intr_types);
51
52 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING),
53 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_STALLING]);
54
49 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING), 55 gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING),
50 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]); 56 g->ops.mc.intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]);
51 57
52 /* TODO: Enable PRI faults for HUB ECC err intr */
53 gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX,
54 HUB_INTR_TYPE_ECC_UNCORRECTED);
55} 58}
56 59
57static bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0) 60static bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0)
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
index cc8dafa3..d6184cee 100644
--- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c
@@ -15,13 +15,21 @@
15 15
16#include <linux/pm_runtime.h> 16#include <linux/pm_runtime.h>
17 17
18#include <nvgpu/kmem.h>
19#include <nvgpu/dma.h>
20#include <nvgpu/log.h>
21
18#include "gk20a/gk20a.h" 22#include "gk20a/gk20a.h"
23#include "gk20a/mm_gk20a.h"
19 24
20#include "gp10b/mm_gp10b.h" 25#include "gp10b/mm_gp10b.h"
26#include "gp10b/mc_gp10b.h"
21 27
22#include "mm_gv11b.h" 28#include "mm_gv11b.h"
29#include "fb_gv11b.h"
23 30
24#include <nvgpu/hw/gv11b/hw_fb_gv11b.h> 31#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
32#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
25 33
26#define NVGPU_L3_ALLOC_BIT 36 34#define NVGPU_L3_ALLOC_BIT 36
27 35
@@ -46,12 +54,187 @@ static void gv11b_init_inst_block(struct nvgpu_mem *inst_block,
46 54
47static bool gv11b_mm_mmu_fault_pending(struct gk20a *g) 55static bool gv11b_mm_mmu_fault_pending(struct gk20a *g)
48{ 56{
49 if (gk20a_readl(g, fb_niso_intr_r()) & 57 return gv11b_fb_mmu_fault_pending(g);
50 (fb_niso_intr_mmu_nonreplayable_fault_notify_pending_f() | 58}
51 fb_niso_intr_mmu_nonreplayable_fault_overflow_pending_f()))
52 return true;
53 59
54 return false; 60static void gv11b_mm_fault_info_mem_destroy(struct gk20a *g)
61{
62 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
63
64 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER |
65 HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY);
66
67 nvgpu_kfree(g, g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]);
68
69 g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] = NULL;
70 g->mm.fault_info[FAULT_TYPE_REPLAY] = NULL;
71
72 nvgpu_mutex_release(&g->mm.hub_isr_mutex);
73 nvgpu_mutex_destroy(&g->mm.hub_isr_mutex);
74}
75
76static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g,
77 u32 *hub_intr_types)
78{
79 struct mmu_fault_info *fault_info_mem;
80
81 fault_info_mem = nvgpu_kzalloc(g, sizeof(struct mmu_fault_info) *
82 FAULT_TYPE_NUM);
83 if (!fault_info_mem) {
84 nvgpu_log_info(g, "failed to alloc shadow fault info");
85 return -ENOMEM;
86 }
87 /* shadow buffer for copying mmu fault info */
88 g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] =
89 &fault_info_mem[FAULT_TYPE_OTHER_AND_NONREPLAY];
90
91 g->mm.fault_info[FAULT_TYPE_REPLAY] =
92 &fault_info_mem[FAULT_TYPE_REPLAY];
93
94 *hub_intr_types |= HUB_INTR_TYPE_OTHER;
95 return 0;
96}
97
98static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g,
99 u32 *hub_intr_types)
100{
101 struct vm_gk20a *vm = g->mm.bar2.vm;
102 int err = 0;
103 size_t fb_size;
104
105 /* Max entries take care of 1 entry used for full detection */
106 fb_size = (g->ops.fifo.get_num_fifos(g) + 1) *
107 gmmu_fault_buf_size_v();
108
109 err = nvgpu_dma_alloc_map_sys(vm, fb_size,
110 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
111 if (err) {
112 nvgpu_err(g,
113 "Error in hw mmu fault buf [0] alloc in bar2 vm ");
114 /* Fault will be snapped in pri reg but not in buffer */
115 return;
116 }
117
118 g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] =
119 HW_FAULT_BUF_STATUS_ALLOC_TRUE;
120 *hub_intr_types |= HUB_INTR_TYPE_NONREPLAY;
121
122 err = nvgpu_dma_alloc_map_sys(vm, fb_size,
123 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
124 if (err) {
125 nvgpu_err(g,
126 "Error in hw mmu fault buf [1] alloc in bar2 vm ");
127 /* Fault will be snapped in pri reg but not in buffer */
128 return;
129 }
130 g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] =
131 HW_FAULT_BUF_STATUS_ALLOC_TRUE;
132 *hub_intr_types |= HUB_INTR_TYPE_REPLAY;
133}
134
135static void gv11b_mm_mmu_hw_fault_buf_deinit(struct gk20a *g)
136{
137 struct vm_gk20a *vm = g->mm.bar2.vm;
138
139 gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_NONREPLAY |
140 HUB_INTR_TYPE_REPLAY);
141
142 g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY |
143 HUB_INTR_TYPE_REPLAY));
144
145 if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) {
146 gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX,
147 FAULT_BUF_DISABLED);
148 }
149
150 if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) {
151 gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX,
152 FAULT_BUF_DISABLED);
153 }
154
155 if (g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] ==
156 HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
157 nvgpu_dma_unmap_free(vm,
158 &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]);
159 g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] =
160 HW_FAULT_BUF_STATUS_ALLOC_FALSE;
161 }
162
163 if (g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] ==
164 HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
165 nvgpu_dma_unmap_free(vm,
166 &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]);
167 g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] =
168 HW_FAULT_BUF_STATUS_ALLOC_FALSE;
169 }
170}
171
172static void gv11b_mm_remove_bar2_vm(struct gk20a *g)
173{
174 struct mm_gk20a *mm = &g->mm;
175
176 gv11b_mm_mmu_hw_fault_buf_deinit(g);
177
178 gk20a_free_inst_block(g, &mm->bar2.inst_block);
179 nvgpu_vm_put(mm->bar2.vm);
180}
181
182static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g)
183{
184 if (g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] ==
185 HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
186 gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX);
187 }
188 if (g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] ==
189 HW_FAULT_BUF_STATUS_ALLOC_TRUE) {
190 gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX);
191 }
192}
193
194static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g)
195{
196 int err;
197
198 nvgpu_mutex_init(&g->mm.hub_isr_mutex);
199
200 g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] =
201 HW_FAULT_BUF_STATUS_ALLOC_FALSE;
202 g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] =
203 HW_FAULT_BUF_STATUS_ALLOC_FALSE;
204
205 g->mm.hub_intr_types = HUB_INTR_TYPE_ECC_UNCORRECTED;
206
207 err = gv11b_mm_mmu_fault_info_buf_init(g, &g->mm.hub_intr_types);
208
209 if (!err)
210 gv11b_mm_mmu_hw_fault_buf_init(g, &g->mm.hub_intr_types);
211
212 return err;
213}
214
215static int gv11b_init_mm_setup_hw(struct gk20a *g)
216{
217 int err = 0;
218
219 nvgpu_log_fn(g, "start");
220
221 g->ops.fb.set_mmu_page_size(g);
222 g->ops.fb.init_hw(g);
223
224 err = g->ops.mm.init_bar2_mm_hw_setup(g);
225 if (err)
226 return err;
227
228 if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g))
229 return -EBUSY;
230
231 err = gv11b_mm_mmu_fault_setup_sw(g);
232 if (!err)
233 gv11b_mm_mmu_fault_setup_hw(g);
234
235 nvgpu_log_fn(g, "end");
236
237 return err;
55} 238}
56 239
57void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate) 240void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate)
@@ -82,8 +265,11 @@ void gv11b_init_mm(struct gpu_ops *gops)
82 gp10b_init_mm(gops); 265 gp10b_init_mm(gops);
83 gops->mm.is_bar1_supported = gv11b_mm_is_bar1_supported; 266 gops->mm.is_bar1_supported = gv11b_mm_is_bar1_supported;
84 gops->mm.init_inst_block = gv11b_init_inst_block; 267 gops->mm.init_inst_block = gv11b_init_inst_block;
85 gops->mm.init_mm_setup_hw = gk20a_init_mm_setup_hw;
86 gops->mm.mmu_fault_pending = gv11b_mm_mmu_fault_pending; 268 gops->mm.mmu_fault_pending = gv11b_mm_mmu_fault_pending;
87 gops->mm.l2_flush = gv11b_mm_l2_flush; 269 gops->mm.l2_flush = gv11b_mm_l2_flush;
88 gops->mm.gpu_phys_addr = gv11b_gpu_phys_addr; 270 gops->mm.gpu_phys_addr = gv11b_gpu_phys_addr;
271 gops->mm.init_mm_setup_hw = gv11b_init_mm_setup_hw;
272 gops->mm.fault_info_mem_destroy =
273 gv11b_mm_fault_info_mem_destroy;
274 gops->mm.remove_bar2_vm = gv11b_mm_remove_bar2_vm;
89} 275}
diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.h b/drivers/gpu/nvgpu/gv11b/mm_gv11b.h
index 4284b171..a887c7f4 100644
--- a/drivers/gpu/nvgpu/gv11b/mm_gv11b.h
+++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * GV11B MM 2 * GV11B MM
3 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. 3 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -15,6 +15,9 @@
15#ifndef MM_GV11B_H 15#ifndef MM_GV11B_H
16#define MM_GV11B_H 16#define MM_GV11B_H
17 17
18#define HW_FAULT_BUF_STATUS_ALLOC_TRUE 1
19#define HW_FAULT_BUF_STATUS_ALLOC_FALSE 0
20
18struct gpu_ops; 21struct gpu_ops;
19 22
20void gv11b_init_mm(struct gpu_ops *gops); 23void gv11b_init_mm(struct gpu_ops *gops);