summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gv11b/fb_gv11b.c')
-rw-r--r--drivers/gpu/nvgpu/gv11b/fb_gv11b.c173
1 files changed, 161 insertions, 12 deletions
diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
index 21d8cba3..da7c7d4a 100644
--- a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
+++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c
@@ -15,6 +15,9 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17 17
18#include <nvgpu/dma.h>
19#include <nvgpu/log.h>
20
18#include "gk20a/gk20a.h" 21#include "gk20a/gk20a.h"
19#include "gk20a/kind_gk20a.h" 22#include "gk20a/kind_gk20a.h"
20 23
@@ -26,6 +29,7 @@
26#include <nvgpu/hw/gv11b/hw_fb_gv11b.h> 29#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
27#include <nvgpu/hw/gv11b/hw_mc_gv11b.h> 30#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
28#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h> 31#include <nvgpu/hw/gv11b/hw_fifo_gv11b.h>
32#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
29 33
30#include <nvgpu/log.h> 34#include <nvgpu/log.h>
31#include <nvgpu/enabled.h> 35#include <nvgpu/enabled.h>
@@ -205,6 +209,90 @@ static void gv11b_init_kind_attr(void)
205 } 209 }
206} 210}
207 211
212u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g,
213 unsigned int index)
214{
215 u32 reg_val;
216
217 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
218 return fb_mmu_fault_buffer_size_enable_v(reg_val);
219}
220
221void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
222 unsigned int index, unsigned int state)
223{
224 u32 fault_status;
225 u32 reg_val;
226
227 nvgpu_log_fn(g, " ");
228
229 reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index));
230 if (state) {
231 if (gv11b_fb_is_fault_buf_enabled(g, index)) {
232 nvgpu_log_info(g, "fault buffer is already enabled");
233 } else {
234 reg_val |= fb_mmu_fault_buffer_size_enable_true_f();
235 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index),
236 reg_val);
237 }
238
239 } else {
240 struct nvgpu_timeout timeout;
241 u32 delay = GR_IDLE_CHECK_DEFAULT;
242
243 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
244 NVGPU_TIMER_CPU_TIMER);
245
246 reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));
247 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index), reg_val);
248
249 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
250
251 do {
252 if (!(fault_status & fb_mmu_fault_status_busy_true_f()))
253 break;
254 /*
255 * Make sure fault buffer is disabled.
256 * This is to avoid accessing fault buffer by hw
257 * during the window BAR2 is being unmapped by s/w
258 */
259 nvgpu_log_info(g, "fault status busy set, check again");
260 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
261
262 nvgpu_usleep_range(delay, delay * 2);
263 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
264 } while (!nvgpu_timeout_expired_msg(&timeout,
265 "fault status busy set"));
266 }
267}
268
269void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index)
270{
271 u32 addr_lo;
272 u32 addr_hi;
273
274 nvgpu_log_fn(g, " ");
275
276 gv11b_fb_fault_buf_set_state_hw(g, index,
277 FAULT_BUF_DISABLED);
278
279 addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >>
280 ram_in_base_shift_v());
281 addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va);
282
283 gk20a_writel(g, fb_mmu_fault_buffer_lo_r(index),
284 fb_mmu_fault_buffer_lo_addr_f(addr_lo));
285
286 gk20a_writel(g, fb_mmu_fault_buffer_hi_r(index),
287 fb_mmu_fault_buffer_hi_addr_f(addr_hi));
288
289 gk20a_writel(g, fb_mmu_fault_buffer_size_r(index),
290 fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) |
291 fb_mmu_fault_buffer_size_overflow_intr_enable_f());
292
293 gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED);
294}
295
208static void gv11b_fb_intr_en_set(struct gk20a *g, 296static void gv11b_fb_intr_en_set(struct gk20a *g,
209 unsigned int index, u32 mask) 297 unsigned int index, u32 mask)
210{ 298{
@@ -230,15 +318,32 @@ static u32 gv11b_fb_get_hub_intr_clr_mask(struct gk20a *g,
230{ 318{
231 u32 mask = 0; 319 u32 mask = 0;
232 320
233 if (intr_type == HUB_INTR_TYPE_ALL) { 321 if (intr_type & HUB_INTR_TYPE_OTHER) {
322 mask |=
323 fb_niso_intr_en_clr_mmu_other_fault_notify_m();
324 }
325
326 if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
327 mask |=
328 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m() |
329 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m();
330 }
331
332 if (intr_type & HUB_INTR_TYPE_REPLAY) {
234 mask |= 333 mask |=
235 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_set_f(); 334 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m() |
236 return mask; 335 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m();
237 } 336 }
238 337
239 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) { 338 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
240 mask |= 339 mask |=
241 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_set_f(); 340 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m();
341 }
342
343 if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
344 mask |=
345 fb_niso_intr_en_clr_hub_access_counter_notify_m() |
346 fb_niso_intr_en_clr_hub_access_counter_error_m();
242 } 347 }
243 348
244 return mask; 349 return mask;
@@ -249,15 +354,32 @@ static u32 gv11b_fb_get_hub_intr_en_mask(struct gk20a *g,
249{ 354{
250 u32 mask = 0; 355 u32 mask = 0;
251 356
252 if (intr_type == HUB_INTR_TYPE_ALL) { 357 if (intr_type & HUB_INTR_TYPE_OTHER) {
253 mask |= 358 mask |=
254 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_set_f(); 359 fb_niso_intr_en_set_mmu_other_fault_notify_m();
255 return mask; 360 }
361
362 if (intr_type & HUB_INTR_TYPE_NONREPLAY) {
363 mask |=
364 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
365 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m();
366 }
367
368 if (intr_type & HUB_INTR_TYPE_REPLAY) {
369 mask |=
370 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
371 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
256 } 372 }
257 373
258 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) { 374 if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) {
259 mask |= 375 mask |=
260 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_set_f(); 376 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
377 }
378
379 if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) {
380 mask |=
381 fb_niso_intr_en_set_hub_access_counter_notify_m() |
382 fb_niso_intr_en_set_hub_access_counter_error_m();
261 } 383 }
262 384
263 return mask; 385 return mask;
@@ -469,14 +591,17 @@ static void gv11b_fb_hub_isr(struct gk20a *g)
469 u32 status; 591 u32 status;
470 u32 niso_intr = gk20a_readl(g, fb_niso_intr_r()); 592 u32 niso_intr = gk20a_readl(g, fb_niso_intr_r());
471 593
472 nvgpu_info(g, "enter hub isr, niso_intr = 0x%x", niso_intr); 594 nvgpu_info(g, "enter hub isr, niso_intr = 0x%08x", niso_intr);
595
596 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
473 597
474 if (niso_intr & 598 if (niso_intr &
475 (fb_niso_intr_hub_access_counter_notify_pending_f() | 599 (fb_niso_intr_hub_access_counter_notify_pending_f() |
476 fb_niso_intr_hub_access_counter_error_pending_f())) { 600 fb_niso_intr_hub_access_counter_error_pending_f())) {
477 601
478 nvgpu_info(g, "hub access counter notify/error"); 602 nvgpu_info(g, "hub access counter notify/error");
479 } else if (niso_intr & 603 }
604 if (niso_intr &
480 fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) { 605 fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) {
481 606
482 nvgpu_info(g, "ecc uncorrected error notify"); 607 nvgpu_info(g, "ecc uncorrected error notify");
@@ -501,9 +626,33 @@ static void gv11b_fb_hub_isr(struct gk20a *g)
501 gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX, 626 gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX,
502 HUB_INTR_TYPE_ECC_UNCORRECTED); 627 HUB_INTR_TYPE_ECC_UNCORRECTED);
503 628
504 } else {
505 nvgpu_info(g, "mmu fault : TODO");
506 } 629 }
630 if (niso_intr &
631 (fb_niso_intr_mmu_other_fault_notify_m() |
632 fb_niso_intr_mmu_replayable_fault_notify_m() |
633 fb_niso_intr_mmu_replayable_fault_overflow_m() |
634 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
635 fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) {
636
637 nvgpu_info(g, "mmu fault : No handling in place");
638
639 }
640
641 nvgpu_mutex_release(&g->mm.hub_isr_mutex);
642}
643
644bool gv11b_fb_mmu_fault_pending(struct gk20a *g)
645{
646 if (gk20a_readl(g, fb_niso_intr_r()) &
647 (fb_niso_intr_mmu_other_fault_notify_m() |
648 fb_niso_intr_mmu_ecc_uncorrected_error_notify_m() |
649 fb_niso_intr_mmu_replayable_fault_notify_m() |
650 fb_niso_intr_mmu_replayable_fault_overflow_m() |
651 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
652 fb_niso_intr_mmu_nonreplayable_fault_overflow_m()))
653 return true;
654
655 return false;
507} 656}
508 657
509void gv11b_init_fb(struct gpu_ops *gops) 658void gv11b_init_fb(struct gpu_ops *gops)