summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2018-07-02 17:30:26 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-07-12 23:44:13 -0400
commitb97bcb3c689426a1b099e88ceef4d55584e2362b (patch)
tree4ad683912a323eca81a493314db3d74b46b6aa71 /drivers/gpu/nvgpu/common
parentb07a304ba3e747c80fe3e0a16caec88c8e1e8b28 (diff)
gpu: nvgpu: Move FB to common
Move all FB HAL implementations to common/fb. JIRA NVGPU-596 Change-Id: Id4ea09d608f5d6d1b245bddac09ecf1444b8ab30 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1769724 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gk20a.c123
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gk20a.h33
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gm20b.c229
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gm20b.h47
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gp106.c51
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gp106.h28
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gp10b.c38
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gp10b.h32
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gv100.c257
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gv100.h36
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gv11b.c1516
-rw-r--r--drivers/gpu/nvgpu/common/fb/fb_gv11b.h82
12 files changed, 2472 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gk20a.c b/drivers/gpu/nvgpu/common/fb/fb_gk20a.c
new file mode 100644
index 00000000..d27ac9d0
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gk20a.c
@@ -0,0 +1,123 @@
1/*
2 * GK20A memory interface
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <trace/events/gk20a.h>
26
27#include "gk20a/gk20a.h"
28
29#include "fb_gk20a.h"
30
31#include <nvgpu/timers.h>
32
33#include <nvgpu/hw/gk20a/hw_mc_gk20a.h>
34#include <nvgpu/hw/gk20a/hw_fb_gk20a.h>
35
36void fb_gk20a_reset(struct gk20a *g)
37{
38 u32 val;
39
40 nvgpu_log_info(g, "reset gk20a fb");
41
42 g->ops.mc.reset(g, mc_enable_pfb_enabled_f() |
43 mc_enable_l2_enabled_f() |
44 mc_enable_xbar_enabled_f() |
45 mc_enable_hub_enabled_f());
46
47 val = gk20a_readl(g, mc_elpg_enable_r());
48 val |= mc_elpg_enable_xbar_enabled_f()
49 | mc_elpg_enable_pfb_enabled_f()
50 | mc_elpg_enable_hub_enabled_f();
51 gk20a_writel(g, mc_elpg_enable_r(), val);
52}
53
54void gk20a_fb_init_hw(struct gk20a *g)
55{
56 u32 addr = nvgpu_mem_get_addr(g, &g->mm.sysmem_flush) >> 8;
57
58 gk20a_writel(g, fb_niso_flush_sysmem_addr_r(), addr);
59}
60
61void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
62{
63 struct nvgpu_timeout timeout;
64 u32 addr_lo;
65 u32 data;
66
67 nvgpu_log_fn(g, " ");
68
69 /* pagetables are considered sw states which are preserved after
70 prepare_poweroff. When gk20a deinit releases those pagetables,
71 common code in vm unmap path calls tlb invalidate that touches
72 hw. Use the power_on flag to skip tlb invalidation when gpu
73 power is turned off */
74
75 if (!g->power_on)
76 return;
77
78 addr_lo = u64_lo32(nvgpu_mem_get_addr(g, pdb) >> 12);
79
80 nvgpu_mutex_acquire(&g->mm.tlb_lock);
81
82 trace_gk20a_mm_tlb_invalidate(g->name);
83
84 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
85
86 do {
87 data = gk20a_readl(g, fb_mmu_ctrl_r());
88 if (fb_mmu_ctrl_pri_fifo_space_v(data) != 0)
89 break;
90 nvgpu_udelay(2);
91 } while (!nvgpu_timeout_expired_msg(&timeout,
92 "wait mmu fifo space"));
93
94 if (nvgpu_timeout_peek_expired(&timeout))
95 goto out;
96
97 nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
98
99 gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
100 fb_mmu_invalidate_pdb_addr_f(addr_lo) |
101 nvgpu_aperture_mask(g, pdb,
102 fb_mmu_invalidate_pdb_aperture_sys_mem_f(),
103 fb_mmu_invalidate_pdb_aperture_sys_mem_f(),
104 fb_mmu_invalidate_pdb_aperture_vid_mem_f()));
105
106 gk20a_writel(g, fb_mmu_invalidate_r(),
107 fb_mmu_invalidate_all_va_true_f() |
108 fb_mmu_invalidate_trigger_true_f());
109
110 do {
111 data = gk20a_readl(g, fb_mmu_ctrl_r());
112 if (fb_mmu_ctrl_pri_fifo_empty_v(data) !=
113 fb_mmu_ctrl_pri_fifo_empty_false_f())
114 break;
115 nvgpu_udelay(2);
116 } while (!nvgpu_timeout_expired_msg(&timeout,
117 "wait mmu invalidate"));
118
119 trace_gk20a_mm_tlb_invalidate_done(g->name);
120
121out:
122 nvgpu_mutex_release(&g->mm.tlb_lock);
123}
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gk20a.h b/drivers/gpu/nvgpu/common/fb/fb_gk20a.h
new file mode 100644
index 00000000..072c9027
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gk20a.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef FB_GK20A_H
24#define FB_GK20A_H
25
26struct gk20a;
27struct nvgpu_mem;
28
29void fb_gk20a_reset(struct gk20a *g);
30void gk20a_fb_init_hw(struct gk20a *g);
31void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb);
32
33#endif
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gm20b.c b/drivers/gpu/nvgpu/common/fb/fb_gm20b.c
new file mode 100644
index 00000000..bd093b31
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gm20b.c
@@ -0,0 +1,229 @@
1/*
2 * GM20B GPC MMU
3 *
4 * Copyright (c) 2014-2018, NVIDIA CORPORATION. All rights reserved.
5*
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/sizes.h>
26
27#include "gk20a/gk20a.h"
28
29#include "fb_gk20a.h"
30#include "fb_gm20b.h"
31
32#include <nvgpu/hw/gm20b/hw_fb_gm20b.h>
33#include <nvgpu/hw/gm20b/hw_top_gm20b.h>
34#include <nvgpu/hw/gm20b/hw_gmmu_gm20b.h>
35#include <nvgpu/hw/gm20b/hw_gr_gm20b.h>
36
37#define VPR_INFO_FETCH_WAIT (5)
38#define WPR_INFO_ADDR_ALIGNMENT 0x0000000c
39
40void fb_gm20b_init_fs_state(struct gk20a *g)
41{
42 nvgpu_log_info(g, "initialize gm20b fb");
43
44 gk20a_writel(g, fb_fbhub_num_active_ltcs_r(),
45 g->ltc_count);
46
47 if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
48 /* Bypass MMU check for non-secure boot. For
49 * secure-boot,this register write has no-effect */
50 gk20a_writel(g, fb_priv_mmu_phy_secure_r(), 0xffffffffU);
51 }
52}
53
54void gm20b_fb_set_mmu_page_size(struct gk20a *g)
55{
56 /* set large page size in fb */
57 u32 fb_mmu_ctrl = gk20a_readl(g, fb_mmu_ctrl_r());
58 fb_mmu_ctrl |= fb_mmu_ctrl_use_pdb_big_page_size_true_f();
59 gk20a_writel(g, fb_mmu_ctrl_r(), fb_mmu_ctrl);
60}
61
62bool gm20b_fb_set_use_full_comp_tag_line(struct gk20a *g)
63{
64 /* set large page size in fb */
65 u32 fb_mmu_ctrl = gk20a_readl(g, fb_mmu_ctrl_r());
66 fb_mmu_ctrl |= fb_mmu_ctrl_use_full_comp_tag_line_true_f();
67 gk20a_writel(g, fb_mmu_ctrl_r(), fb_mmu_ctrl);
68
69 return true;
70}
71
72u32 gm20b_fb_mmu_ctrl(struct gk20a *g)
73{
74 return gk20a_readl(g, fb_mmu_ctrl_r());
75}
76
77u32 gm20b_fb_mmu_debug_ctrl(struct gk20a *g)
78{
79 return gk20a_readl(g, fb_mmu_debug_ctrl_r());
80}
81
82u32 gm20b_fb_mmu_debug_wr(struct gk20a *g)
83{
84 return gk20a_readl(g, fb_mmu_debug_wr_r());
85}
86
87u32 gm20b_fb_mmu_debug_rd(struct gk20a *g)
88{
89 return gk20a_readl(g, fb_mmu_debug_rd_r());
90}
91
92unsigned int gm20b_fb_compression_page_size(struct gk20a *g)
93{
94 return SZ_128K;
95}
96
97unsigned int gm20b_fb_compressible_page_size(struct gk20a *g)
98{
99 return SZ_64K;
100}
101
102u32 gm20b_fb_compression_align_mask(struct gk20a *g)
103{
104 return SZ_64K - 1;
105}
106
107void gm20b_fb_dump_vpr_wpr_info(struct gk20a *g)
108{
109 u32 val;
110
111 /* print vpr and wpr info */
112 val = gk20a_readl(g, fb_mmu_vpr_info_r());
113 val &= ~0x3;
114 val |= fb_mmu_vpr_info_index_addr_lo_v();
115 gk20a_writel(g, fb_mmu_vpr_info_r(), val);
116 nvgpu_err(g, "VPR: %08x %08x %08x %08x",
117 gk20a_readl(g, fb_mmu_vpr_info_r()),
118 gk20a_readl(g, fb_mmu_vpr_info_r()),
119 gk20a_readl(g, fb_mmu_vpr_info_r()),
120 gk20a_readl(g, fb_mmu_vpr_info_r()));
121
122 val = gk20a_readl(g, fb_mmu_wpr_info_r());
123 val &= ~0xf;
124 val |= (fb_mmu_wpr_info_index_allow_read_v());
125 gk20a_writel(g, fb_mmu_wpr_info_r(), val);
126 nvgpu_err(g, "WPR: %08x %08x %08x %08x %08x %08x",
127 gk20a_readl(g, fb_mmu_wpr_info_r()),
128 gk20a_readl(g, fb_mmu_wpr_info_r()),
129 gk20a_readl(g, fb_mmu_wpr_info_r()),
130 gk20a_readl(g, fb_mmu_wpr_info_r()),
131 gk20a_readl(g, fb_mmu_wpr_info_r()),
132 gk20a_readl(g, fb_mmu_wpr_info_r()));
133
134}
135
136static int gm20b_fb_vpr_info_fetch_wait(struct gk20a *g,
137 unsigned int msec)
138{
139 struct nvgpu_timeout timeout;
140
141 nvgpu_timeout_init(g, &timeout, msec, NVGPU_TIMER_CPU_TIMER);
142
143 do {
144 u32 val;
145
146 val = gk20a_readl(g, fb_mmu_vpr_info_r());
147 if (fb_mmu_vpr_info_fetch_v(val) ==
148 fb_mmu_vpr_info_fetch_false_v())
149 return 0;
150
151 } while (!nvgpu_timeout_expired(&timeout));
152
153 return -ETIMEDOUT;
154}
155
156int gm20b_fb_vpr_info_fetch(struct gk20a *g)
157{
158 if (gm20b_fb_vpr_info_fetch_wait(g, VPR_INFO_FETCH_WAIT)) {
159 return -ETIMEDOUT;
160 }
161
162 gk20a_writel(g, fb_mmu_vpr_info_r(),
163 fb_mmu_vpr_info_fetch_true_v());
164
165 return gm20b_fb_vpr_info_fetch_wait(g, VPR_INFO_FETCH_WAIT);
166}
167
168void gm20b_fb_read_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf)
169{
170 u32 val = 0;
171 u64 wpr_start = 0;
172 u64 wpr_end = 0;
173
174 val = gk20a_readl(g, fb_mmu_wpr_info_r());
175 val &= ~0xF;
176 val |= fb_mmu_wpr_info_index_wpr1_addr_lo_v();
177 gk20a_writel(g, fb_mmu_wpr_info_r(), val);
178
179 val = gk20a_readl(g, fb_mmu_wpr_info_r()) >> 0x4;
180 wpr_start = hi32_lo32_to_u64(
181 (val >> (32 - WPR_INFO_ADDR_ALIGNMENT)),
182 (val << WPR_INFO_ADDR_ALIGNMENT));
183
184 val = gk20a_readl(g, fb_mmu_wpr_info_r());
185 val &= ~0xF;
186 val |= fb_mmu_wpr_info_index_wpr1_addr_hi_v();
187 gk20a_writel(g, fb_mmu_wpr_info_r(), val);
188
189 val = gk20a_readl(g, fb_mmu_wpr_info_r()) >> 0x4;
190 wpr_end = hi32_lo32_to_u64(
191 (val >> (32 - WPR_INFO_ADDR_ALIGNMENT)),
192 (val << WPR_INFO_ADDR_ALIGNMENT));
193
194 inf->wpr_base = wpr_start;
195 inf->nonwpr_base = 0;
196 inf->size = (wpr_end - wpr_start);
197}
198
199bool gm20b_fb_debug_mode_enabled(struct gk20a *g)
200{
201 u32 debug_ctrl = gk20a_readl(g, gr_gpcs_pri_mmu_debug_ctrl_r());
202 return gr_gpcs_pri_mmu_debug_ctrl_debug_v(debug_ctrl) ==
203 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v();
204}
205
206void gm20b_fb_set_debug_mode(struct gk20a *g, bool enable)
207{
208 u32 reg_val, fb_debug_ctrl, gpc_debug_ctrl;
209
210 if (enable) {
211 fb_debug_ctrl = fb_mmu_debug_ctrl_debug_enabled_f();
212 gpc_debug_ctrl = gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_f();
213 g->mmu_debug_ctrl = true;
214 } else {
215 fb_debug_ctrl = fb_mmu_debug_ctrl_debug_disabled_f();
216 gpc_debug_ctrl = gr_gpcs_pri_mmu_debug_ctrl_debug_disabled_f();
217 g->mmu_debug_ctrl = false;
218 }
219
220 reg_val = gk20a_readl(g, fb_mmu_debug_ctrl_r());
221 reg_val = set_field(reg_val,
222 fb_mmu_debug_ctrl_debug_m(), fb_debug_ctrl);
223 gk20a_writel(g, fb_mmu_debug_ctrl_r(), reg_val);
224
225 reg_val = gk20a_readl(g, gr_gpcs_pri_mmu_debug_ctrl_r());
226 reg_val = set_field(reg_val,
227 gr_gpcs_pri_mmu_debug_ctrl_debug_m(), gpc_debug_ctrl);
228 gk20a_writel(g, gr_gpcs_pri_mmu_debug_ctrl_r(), reg_val);
229}
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gm20b.h b/drivers/gpu/nvgpu/common/fb/fb_gm20b.h
new file mode 100644
index 00000000..eb868b01
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gm20b.h
@@ -0,0 +1,47 @@
1/*
2 * GM20B FB
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVHOST_GM20B_FB
26#define _NVHOST_GM20B_FB
27
28struct gk20a;
29struct wpr_carveout_info;
30
31void fb_gm20b_init_fs_state(struct gk20a *g);
32void gm20b_fb_set_mmu_page_size(struct gk20a *g);
33bool gm20b_fb_set_use_full_comp_tag_line(struct gk20a *g);
34u32 gm20b_fb_mmu_ctrl(struct gk20a *g);
35u32 gm20b_fb_mmu_debug_ctrl(struct gk20a *g);
36u32 gm20b_fb_mmu_debug_wr(struct gk20a *g);
37u32 gm20b_fb_mmu_debug_rd(struct gk20a *g);
38unsigned int gm20b_fb_compression_page_size(struct gk20a *g);
39unsigned int gm20b_fb_compressible_page_size(struct gk20a *g);
40u32 gm20b_fb_compression_align_mask(struct gk20a *g);
41void gm20b_fb_dump_vpr_wpr_info(struct gk20a *g);
42void gm20b_fb_read_wpr_info(struct gk20a *g, struct wpr_carveout_info *inf);
43int gm20b_fb_vpr_info_fetch(struct gk20a *g);
44bool gm20b_fb_debug_mode_enabled(struct gk20a *g);
45void gm20b_fb_set_debug_mode(struct gk20a *g, bool enable);
46
47#endif
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gp106.c b/drivers/gpu/nvgpu/common/fb/fb_gp106.c
new file mode 100644
index 00000000..6f257db4
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gp106.c
@@ -0,0 +1,51 @@
1/*
2 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include "gk20a/gk20a.h"
24
25#include "fb_gp10b.h"
26#include "fb_gp106.h"
27
28#include <nvgpu/hw/gp106/hw_fb_gp106.h>
29
30#define HW_SCRUB_TIMEOUT_DEFAULT 100 /* usec */
31#define HW_SCRUB_TIMEOUT_MAX 2000000 /* usec */
32
33void gp106_fb_reset(struct gk20a *g)
34{
35 u32 val;
36
37 int retries = HW_SCRUB_TIMEOUT_MAX / HW_SCRUB_TIMEOUT_DEFAULT;
38 /* wait for memory to be accessible */
39 do {
40 u32 w = gk20a_readl(g, fb_niso_scrub_status_r());
41 if (fb_niso_scrub_status_flag_v(w)) {
42 nvgpu_log_fn(g, "done");
43 break;
44 }
45 nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT);
46 } while (--retries);
47
48 val = gk20a_readl(g, fb_mmu_priv_level_mask_r());
49 val &= ~fb_mmu_priv_level_mask_write_violation_m();
50 gk20a_writel(g, fb_mmu_priv_level_mask_r(), val);
51}
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gp106.h b/drivers/gpu/nvgpu/common/fb/fb_gp106.h
new file mode 100644
index 00000000..d5ee87f4
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gp106.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef FB_GP106_H
24#define FB_GP106_H
25struct gpu_ops;
26
27void gp106_fb_reset(struct gk20a *g);
28#endif
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gp10b.c b/drivers/gpu/nvgpu/common/fb/fb_gp10b.c
new file mode 100644
index 00000000..45fc8373
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gp10b.c
@@ -0,0 +1,38 @@
1/*
2 * GP10B FB
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5*
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/sizes.h>
26
27#include "gk20a/gk20a.h"
28#include "fb_gp10b.h"
29
30unsigned int gp10b_fb_compression_page_size(struct gk20a *g)
31{
32 return SZ_64K;
33}
34
35unsigned int gp10b_fb_compressible_page_size(struct gk20a *g)
36{
37 return SZ_4K;
38}
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gp10b.h b/drivers/gpu/nvgpu/common/fb/fb_gp10b.h
new file mode 100644
index 00000000..52aa2a75
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gp10b.h
@@ -0,0 +1,32 @@
1/*
2 * GP10B FB
3 *
4 * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_GP10B_FB
26#define _NVGPU_GP10B_FB
27struct gk20a;
28
29unsigned int gp10b_fb_compression_page_size(struct gk20a *g);
30unsigned int gp10b_fb_compressible_page_size(struct gk20a *g);
31
32#endif
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv100.c b/drivers/gpu/nvgpu/common/fb/fb_gv100.c
new file mode 100644
index 00000000..155c1e8b
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gv100.c
@@ -0,0 +1,257 @@
1/*
2 * GV100 FB
3 *
4 * Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/types.h>
26
27#include <nvgpu/dma.h>
28#include <nvgpu/log.h>
29#include <nvgpu/enabled.h>
30#include <nvgpu/gmmu.h>
31#include <nvgpu/nvgpu_common.h>
32#include <nvgpu/kmem.h>
33#include <nvgpu/nvgpu_mem.h>
34#include <nvgpu/acr/nvgpu_acr.h>
35#include <nvgpu/firmware.h>
36#include <nvgpu/pmu.h>
37#include <nvgpu/falcon.h>
38
39#include "gk20a/gk20a.h"
40#include "gm20b/acr_gm20b.h"
41
42#include "fb_gv100.h"
43
44#include <nvgpu/hw/gv100/hw_fb_gv100.h>
45#include <nvgpu/hw/gv100/hw_falcon_gv100.h>
46#include <nvgpu/hw/gv100/hw_mc_gv100.h>
47
48#define HW_SCRUB_TIMEOUT_DEFAULT 100 /* usec */
49#define HW_SCRUB_TIMEOUT_MAX 2000000 /* usec */
50#define MEM_UNLOCK_TIMEOUT 3500 /* msec */
51
52void gv100_fb_reset(struct gk20a *g)
53{
54 u32 val;
55 int retries = HW_SCRUB_TIMEOUT_MAX / HW_SCRUB_TIMEOUT_DEFAULT;
56
57 nvgpu_info(g, "reset gv100 fb");
58
59 /* wait for memory to be accessible */
60 do {
61 u32 w = gk20a_readl(g, fb_niso_scrub_status_r());
62 if (fb_niso_scrub_status_flag_v(w)) {
63 nvgpu_info(g, "done");
64 break;
65 }
66 nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT);
67 } while (--retries);
68
69 val = gk20a_readl(g, fb_mmu_priv_level_mask_r());
70 val &= ~fb_mmu_priv_level_mask_write_violation_m();
71 gk20a_writel(g, fb_mmu_priv_level_mask_r(), val);
72}
73
74void gv100_fb_enable_hub_intr(struct gk20a *g)
75{
76 u32 mask = 0;
77
78 mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
79 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
80 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
81 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
82 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
83
84 gk20a_writel(g, fb_niso_intr_en_set_r(0),
85 mask);
86}
87
88void gv100_fb_disable_hub_intr(struct gk20a *g)
89{
90 u32 mask = 0;
91
92 mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
93 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
94 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
95 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
96 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m();
97
98 gk20a_writel(g, fb_niso_intr_en_clr_r(0),
99 mask);
100}
101
102int gv100_fb_memory_unlock(struct gk20a *g)
103{
104 struct nvgpu_firmware *mem_unlock_fw = NULL;
105 struct bin_hdr *hsbin_hdr = NULL;
106 struct acr_fw_header *fw_hdr = NULL;
107 u32 *mem_unlock_ucode = NULL;
108 u32 *mem_unlock_ucode_header = NULL;
109 u32 sec_imem_dest = 0;
110 u32 val = 0;
111 int err = 0;
112
113 nvgpu_log_fn(g, " ");
114
115 nvgpu_log_info(g, "fb_mmu_vpr_info = 0x%08x",
116 gk20a_readl(g, fb_mmu_vpr_info_r()));
117 /*
118 * mem_unlock.bin should be written to install
119 * traps even if VPR isn’t actually supported
120 */
121 mem_unlock_fw = nvgpu_request_firmware(g, "mem_unlock.bin", 0);
122 if (!mem_unlock_fw) {
123 nvgpu_err(g, "mem unlock ucode get fail");
124 err = -ENOENT;
125 goto exit;
126 }
127
128 /* Enable nvdec */
129 g->ops.mc.enable(g, mc_enable_nvdec_enabled_f());
130
131 /* nvdec falcon reset */
132 nvgpu_flcn_reset(&g->nvdec_flcn);
133
134 hsbin_hdr = (struct bin_hdr *)mem_unlock_fw->data;
135 fw_hdr = (struct acr_fw_header *)(mem_unlock_fw->data +
136 hsbin_hdr->header_offset);
137
138 mem_unlock_ucode_header = (u32 *)(mem_unlock_fw->data +
139 fw_hdr->hdr_offset);
140 mem_unlock_ucode = (u32 *)(mem_unlock_fw->data +
141 hsbin_hdr->data_offset);
142
143 /* Patch Ucode singnatures */
144 if (acr_ucode_patch_sig(g, mem_unlock_ucode,
145 (u32 *)(mem_unlock_fw->data + fw_hdr->sig_prod_offset),
146 (u32 *)(mem_unlock_fw->data + fw_hdr->sig_dbg_offset),
147 (u32 *)(mem_unlock_fw->data + fw_hdr->patch_loc),
148 (u32 *)(mem_unlock_fw->data + fw_hdr->patch_sig)) < 0) {
149 nvgpu_err(g, "mem unlock patch signatures fail");
150 err = -EPERM;
151 goto exit;
152 }
153
154 /* Clear interrupts */
155 nvgpu_flcn_set_irq(&g->nvdec_flcn, false, 0x0, 0x0);
156
157 /* Copy Non Secure IMEM code */
158 nvgpu_flcn_copy_to_imem(&g->nvdec_flcn, 0,
159 (u8 *)&mem_unlock_ucode[
160 mem_unlock_ucode_header[OS_CODE_OFFSET] >> 2],
161 mem_unlock_ucode_header[OS_CODE_SIZE], 0, false,
162 GET_IMEM_TAG(mem_unlock_ucode_header[OS_CODE_OFFSET]));
163
164 /* Put secure code after non-secure block */
165 sec_imem_dest = GET_NEXT_BLOCK(mem_unlock_ucode_header[OS_CODE_SIZE]);
166
167 nvgpu_flcn_copy_to_imem(&g->nvdec_flcn, sec_imem_dest,
168 (u8 *)&mem_unlock_ucode[
169 mem_unlock_ucode_header[APP_0_CODE_OFFSET] >> 2],
170 mem_unlock_ucode_header[APP_0_CODE_SIZE], 0, true,
171 GET_IMEM_TAG(mem_unlock_ucode_header[APP_0_CODE_OFFSET]));
172
173 /* load DMEM: ensure that signatures are patched */
174 nvgpu_flcn_copy_to_dmem(&g->nvdec_flcn, 0, (u8 *)&mem_unlock_ucode[
175 mem_unlock_ucode_header[OS_DATA_OFFSET] >> 2],
176 mem_unlock_ucode_header[OS_DATA_SIZE], 0);
177
178 nvgpu_log_info(g, "nvdec sctl reg %x\n",
179 gk20a_readl(g, g->nvdec_flcn.flcn_base +
180 falcon_falcon_sctl_r()));
181
182 /* set BOOTVEC to start of non-secure code */
183 nvgpu_flcn_bootstrap(&g->nvdec_flcn, 0);
184
185 /* wait for complete & halt */
186 nvgpu_flcn_wait_for_halt(&g->nvdec_flcn, MEM_UNLOCK_TIMEOUT);
187
188 /* check mem unlock status */
189 val = nvgpu_flcn_mailbox_read(&g->nvdec_flcn, 0);
190 if (val) {
191 nvgpu_err(g, "memory unlock failed, err %x", val);
192 err = -1;
193 goto exit;
194 }
195
196 nvgpu_log_info(g, "nvdec sctl reg %x\n",
197 gk20a_readl(g, g->nvdec_flcn.flcn_base +
198 falcon_falcon_sctl_r()));
199
200exit:
201 if (mem_unlock_fw)
202 nvgpu_release_firmware(g, mem_unlock_fw);
203
204 nvgpu_log_fn(g, "done, status - %d", err);
205
206 return err;
207}
208
209int gv100_fb_init_nvlink(struct gk20a *g)
210{
211 u32 data;
212 u32 mask = g->nvlink.enabled_links;
213
214 /* Map enabled link to SYSMEM */
215 data = nvgpu_readl(g, fb_hshub_config0_r());
216 data = set_field(data, fb_hshub_config0_sysmem_nvlink_mask_m(),
217 fb_hshub_config0_sysmem_nvlink_mask_f(mask));
218 nvgpu_writel(g, fb_hshub_config0_r(), data);
219
220 return 0;
221}
222
223int gv100_fb_enable_nvlink(struct gk20a *g)
224{
225 u32 data;
226
227 nvgpu_log(g, gpu_dbg_nvlink|gpu_dbg_info, "enabling nvlink");
228
229 /* Enable nvlink for NISO FBHUB */
230 data = nvgpu_readl(g, fb_niso_cfg1_r());
231 data = set_field(data, fb_niso_cfg1_sysmem_nvlink_m(),
232 fb_niso_cfg1_sysmem_nvlink_enabled_f());
233 nvgpu_writel(g, fb_niso_cfg1_r(), data);
234
235 /* Setup atomics */
236 data = nvgpu_readl(g, fb_mmu_ctrl_r());
237 data = set_field(data, fb_mmu_ctrl_atomic_capability_mode_m(),
238 fb_mmu_ctrl_atomic_capability_mode_rmw_f());
239 nvgpu_writel(g, fb_mmu_ctrl_r(), data);
240
241 data = nvgpu_readl(g, fb_hsmmu_pri_mmu_ctrl_r());
242 data = set_field(data, fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_m(),
243 fb_hsmmu_pri_mmu_ctrl_atomic_capability_mode_rmw_f());
244 nvgpu_writel(g, fb_hsmmu_pri_mmu_ctrl_r(), data);
245
246 data = nvgpu_readl(g, fb_fbhub_num_active_ltcs_r());
247 data = set_field(data, fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_m(),
248 fb_fbhub_num_active_ltcs_hub_sys_atomic_mode_use_rmw_f());
249 nvgpu_writel(g, fb_fbhub_num_active_ltcs_r(), data);
250
251 data = nvgpu_readl(g, fb_hshub_num_active_ltcs_r());
252 data = set_field(data, fb_hshub_num_active_ltcs_hub_sys_atomic_mode_m(),
253 fb_hshub_num_active_ltcs_hub_sys_atomic_mode_use_rmw_f());
254 nvgpu_writel(g, fb_hshub_num_active_ltcs_r(), data);
255
256 return 0;
257}
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv100.h b/drivers/gpu/nvgpu/common/fb/fb_gv100.h
new file mode 100644
index 00000000..195baccf
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gv100.h
@@ -0,0 +1,36 @@
1/*
2 * GV100 FB
3 *
4 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_GV100_FB
26#define _NVGPU_GV100_FB
27
28struct gk20a;
29
30void gv100_fb_reset(struct gk20a *g);
31void gv100_fb_enable_hub_intr(struct gk20a *g);
32void gv100_fb_disable_hub_intr(struct gk20a *g);
33int gv100_fb_memory_unlock(struct gk20a *g);
34int gv100_fb_init_nvlink(struct gk20a *g);
35int gv100_fb_enable_nvlink(struct gk20a *g);
36#endif
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv11b.c b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c
new file mode 100644
index 00000000..69a71575
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gv11b.c
@@ -0,0 +1,1516 @@
1/*
2 * GV11B FB
3 *
4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include <nvgpu/dma.h>
26#include <nvgpu/log.h>
27#include <nvgpu/enabled.h>
28#include <nvgpu/gmmu.h>
29#include <nvgpu/barrier.h>
30#include <nvgpu/bug.h>
31#include <nvgpu/soc.h>
32
33#include "gk20a/gk20a.h"
34#include "gk20a/mm_gk20a.h"
35
36#include "gv11b/fifo_gv11b.h"
37#include "gv11b/ce_gv11b.h"
38
39#include "fb_gk20a.h"
40#include "fb_gp10b.h"
41#include "fb_gv11b.h"
42
43#include <nvgpu/hw/gv11b/hw_fb_gv11b.h>
44#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
45#include <nvgpu/hw/gv11b/hw_ram_gv11b.h>
46#include <nvgpu/hw/gv11b/hw_gmmu_gv11b.h>
47
48static int gv11b_fb_fix_page_fault(struct gk20a *g,
49 struct mmu_fault_info *mmfault);
50
51static void gv11b_init_nvlink_soc_credits(struct gk20a *g)
52{
53 if (nvgpu_is_bpmp_running(g) && (!nvgpu_platform_is_simulation(g))) {
54 nvgpu_log(g, gpu_dbg_info, "nvlink soc credits init done by bpmp");
55 } else {
56#ifndef __NVGPU_POSIX__
57 nvgpu_mss_nvlink_init_credits(g);
58#endif
59 }
60}
61
62void gv11b_fb_init_hw(struct gk20a *g)
63{
64 gk20a_fb_init_hw(g);
65
66 g->ops.fb.enable_hub_intr(g);
67}
68
69void gv11b_fb_init_fs_state(struct gk20a *g)
70{
71 nvgpu_log(g, gpu_dbg_fn, "initialize gv11b fb");
72
73 nvgpu_log(g, gpu_dbg_info, "fbhub active ltcs %x",
74 gk20a_readl(g, fb_fbhub_num_active_ltcs_r()));
75
76 nvgpu_log(g, gpu_dbg_info, "mmu active ltcs %u",
77 fb_mmu_num_active_ltcs_count_v(
78 gk20a_readl(g, fb_mmu_num_active_ltcs_r())));
79
80 if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) {
81 /* Bypass MMU check for non-secure boot. For
82 * secure-boot,this register write has no-effect */
83 gk20a_writel(g, fb_priv_mmu_phy_secure_r(), 0xffffffffU);
84 }
85}
86
87void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr)
88{
89 u32 max_size = gr->max_comptag_mem;
90 /* one tag line covers 64KB */
91 u32 max_comptag_lines = max_size << 4;
92 u32 compbit_base_post_divide;
93 u64 compbit_base_post_multiply64;
94 u64 compbit_store_iova;
95 u64 compbit_base_post_divide64;
96
97 if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL))
98 compbit_store_iova = nvgpu_mem_get_phys_addr(g,
99 &gr->compbit_store.mem);
100 else
101 compbit_store_iova = nvgpu_mem_get_addr(g,
102 &gr->compbit_store.mem);
103
104 compbit_base_post_divide64 = compbit_store_iova >>
105 fb_mmu_cbc_base_address_alignment_shift_v();
106
107 do_div(compbit_base_post_divide64, g->ltc_count);
108 compbit_base_post_divide = u64_lo32(compbit_base_post_divide64);
109
110 compbit_base_post_multiply64 = ((u64)compbit_base_post_divide *
111 g->ltc_count) << fb_mmu_cbc_base_address_alignment_shift_v();
112
113 if (compbit_base_post_multiply64 < compbit_store_iova)
114 compbit_base_post_divide++;
115
116 if (g->ops.ltc.cbc_fix_config)
117 compbit_base_post_divide =
118 g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide);
119
120 gk20a_writel(g, fb_mmu_cbc_base_r(),
121 fb_mmu_cbc_base_address_f(compbit_base_post_divide));
122
123 nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte,
124 "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n",
125 (u32)(compbit_store_iova >> 32),
126 (u32)(compbit_store_iova & 0xffffffff),
127 compbit_base_post_divide);
128 nvgpu_log(g, gpu_dbg_fn, "cbc base %x",
129 gk20a_readl(g, fb_mmu_cbc_base_r()));
130
131 gr->compbit_store.base_hw = compbit_base_post_divide;
132
133 g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate,
134 0, max_comptag_lines - 1);
135
136}
137
138void gv11b_fb_reset(struct gk20a *g)
139{
140 gv11b_init_nvlink_soc_credits(g);
141}
142
143static const char * const invalid_str = "invalid";
144
145static const char *const fault_type_descs_gv11b[] = {
146 "invalid pde",
147 "invalid pde size",
148 "invalid pte",
149 "limit violation",
150 "unbound inst block",
151 "priv violation",
152 "write",
153 "read",
154 "pitch mask violation",
155 "work creation",
156 "unsupported aperture",
157 "compression failure",
158 "unsupported kind",
159 "region violation",
160 "poison",
161 "atomic"
162};
163
164static const char *const fault_client_type_descs_gv11b[] = {
165 "gpc",
166 "hub",
167};
168
169static const char *const fault_access_type_descs_gv11b[] = {
170 "virt read",
171 "virt write",
172 "virt atomic strong",
173 "virt prefetch",
174 "virt atomic weak",
175 "xxx",
176 "xxx",
177 "xxx",
178 "phys read",
179 "phys write",
180 "phys atomic",
181 "phys prefetch",
182};
183
184static const char *const hub_client_descs_gv11b[] = {
185 "vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu",
186 "host cpu nb", "iso", "mmu", "nvdec", "nvenc1", "nvenc2",
187 "niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc",
188 "scc nb", "sec", "ssync", "gr copy", "xv", "mmu nb",
189 "nvenc", "d falcon", "sked", "a falcon", "hsce0", "hsce1",
190 "hsce2", "hsce3", "hsce4", "hsce5", "hsce6", "hsce7", "hsce8",
191 "hsce9", "hshub", "ptp x0", "ptp x1", "ptp x2", "ptp x3",
192 "ptp x4", "ptp x5", "ptp x6", "ptp x7", "vpr scrubber0",
193 "vpr scrubber1", "dwbif", "fbfalcon", "ce shim", "gsp",
194 "dont care"
195};
196
197static const char *const gpc_client_descs_gv11b[] = {
198 "t1 0", "t1 1", "t1 2", "t1 3",
199 "t1 4", "t1 5", "t1 6", "t1 7",
200 "pe 0", "pe 1", "pe 2", "pe 3",
201 "pe 4", "pe 5", "pe 6", "pe 7",
202 "rast", "gcc", "gpccs",
203 "prop 0", "prop 1", "prop 2", "prop 3",
204 "gpm",
205 "ltp utlb 0", "ltp utlb 1", "ltp utlb 2", "ltp utlb 3",
206 "ltp utlb 4", "ltp utlb 5", "ltp utlb 6", "ltp utlb 7",
207 "utlb",
208 "t1 8", "t1 9", "t1 10", "t1 11",
209 "t1 12", "t1 13", "t1 14", "t1 15",
210 "tpccs 0", "tpccs 1", "tpccs 2", "tpccs 3",
211 "tpccs 4", "tpccs 5", "tpccs 6", "tpccs 7",
212 "pe 8", "pe 9", "tpccs 8", "tpccs 9",
213 "t1 16", "t1 17", "t1 18", "t1 19",
214 "pe 10", "pe 11", "tpccs 10", "tpccs 11",
215 "t1 20", "t1 21", "t1 22", "t1 23",
216 "pe 12", "pe 13", "tpccs 12", "tpccs 13",
217 "t1 24", "t1 25", "t1 26", "t1 27",
218 "pe 14", "pe 15", "tpccs 14", "tpccs 15",
219 "t1 28", "t1 29", "t1 30", "t1 31",
220 "pe 16", "pe 17", "tpccs 16", "tpccs 17",
221 "t1 32", "t1 33", "t1 34", "t1 35",
222 "pe 18", "pe 19", "tpccs 18", "tpccs 19",
223 "t1 36", "t1 37", "t1 38", "t1 39",
224};
225
226bool gv11b_fb_is_fault_buf_enabled(struct gk20a *g, u32 index)
227{
228 u32 reg_val;
229
230 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
231 return fb_mmu_fault_buffer_size_enable_v(reg_val) != 0U;
232}
233
234static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g,
235 u32 index, u32 next)
236{
237 u32 reg_val;
238
239 nvgpu_log(g, gpu_dbg_intr, "updating get index with = %d", next);
240
241 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
242 reg_val = set_field(reg_val, fb_mmu_fault_buffer_get_ptr_m(),
243 fb_mmu_fault_buffer_get_ptr_f(next));
244
245 /* while the fault is being handled it is possible for overflow
246 * to happen,
247 */
248 if (reg_val & fb_mmu_fault_buffer_get_overflow_m())
249 reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f();
250
251 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val);
252
253 /* make sure get ptr update is visible to everyone to avoid
254 * reading already read entry
255 */
256 nvgpu_mb();
257}
258
259static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g, u32 index)
260{
261 u32 reg_val;
262
263 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
264 return fb_mmu_fault_buffer_get_ptr_v(reg_val);
265}
266
267static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g, u32 index)
268{
269 u32 reg_val;
270
271 reg_val = g->ops.fb.read_mmu_fault_buffer_put(g, index);
272 return fb_mmu_fault_buffer_put_ptr_v(reg_val);
273}
274
275static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g, u32 index)
276{
277 u32 reg_val;
278
279 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
280 return fb_mmu_fault_buffer_size_val_v(reg_val);
281}
282
283static bool gv11b_fb_is_fault_buffer_empty(struct gk20a *g,
284 u32 index, u32 *get_idx)
285{
286 u32 put_idx;
287
288 *get_idx = gv11b_fb_fault_buffer_get_index(g, index);
289 put_idx = gv11b_fb_fault_buffer_put_index(g, index);
290
291 return *get_idx == put_idx;
292}
293
294static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g, u32 index)
295{
296 u32 get_idx, put_idx, entries;
297
298
299 get_idx = gv11b_fb_fault_buffer_get_index(g, index);
300
301 put_idx = gv11b_fb_fault_buffer_put_index(g, index);
302
303 entries = gv11b_fb_fault_buffer_size_val(g, index);
304
305 return get_idx == ((put_idx + 1) % entries);
306}
307
308void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
309 u32 index, u32 state)
310{
311 u32 fault_status;
312 u32 reg_val;
313
314 nvgpu_log_fn(g, " ");
315
316 reg_val = g->ops.fb.read_mmu_fault_buffer_size(g, index);
317 if (state == NVGPU_FB_MMU_FAULT_BUF_ENABLED) {
318 if (gv11b_fb_is_fault_buf_enabled(g, index)) {
319 nvgpu_log_info(g, "fault buffer is already enabled");
320 } else {
321 reg_val |= fb_mmu_fault_buffer_size_enable_true_f();
322 g->ops.fb.write_mmu_fault_buffer_size(g, index,
323 reg_val);
324 }
325
326 } else {
327 struct nvgpu_timeout timeout;
328 u32 delay = GR_IDLE_CHECK_DEFAULT;
329
330 nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g),
331 NVGPU_TIMER_CPU_TIMER);
332
333 reg_val &= (~(fb_mmu_fault_buffer_size_enable_m()));
334 g->ops.fb.write_mmu_fault_buffer_size(g, index, reg_val);
335
336 fault_status = g->ops.fb.read_mmu_fault_status(g);
337
338 do {
339 if (!(fault_status & fb_mmu_fault_status_busy_true_f()))
340 break;
341 /*
342 * Make sure fault buffer is disabled.
343 * This is to avoid accessing fault buffer by hw
344 * during the window BAR2 is being unmapped by s/w
345 */
346 nvgpu_log_info(g, "fault status busy set, check again");
347 fault_status = g->ops.fb.read_mmu_fault_status(g);
348
349 nvgpu_usleep_range(delay, delay * 2);
350 delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX);
351 } while (!nvgpu_timeout_expired_msg(&timeout,
352 "fault status busy set"));
353 }
354}
355
356void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index)
357{
358 u32 addr_lo;
359 u32 addr_hi;
360
361 nvgpu_log_fn(g, " ");
362
363 gv11b_fb_fault_buf_set_state_hw(g, index,
364 NVGPU_FB_MMU_FAULT_BUF_DISABLED);
365 addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >>
366 ram_in_base_shift_v());
367 addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va);
368
369 g->ops.fb.write_mmu_fault_buffer_lo_hi(g, index,
370 fb_mmu_fault_buffer_lo_addr_f(addr_lo),
371 fb_mmu_fault_buffer_hi_addr_f(addr_hi));
372
373 g->ops.fb.write_mmu_fault_buffer_size(g, index,
374 fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) |
375 fb_mmu_fault_buffer_size_overflow_intr_enable_f());
376
377 gv11b_fb_fault_buf_set_state_hw(g, index, NVGPU_FB_MMU_FAULT_BUF_ENABLED);
378}
379
380void gv11b_fb_enable_hub_intr(struct gk20a *g)
381{
382 u32 mask = 0;
383
384 mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
385 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
386 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
387 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
388 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m() |
389 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
390
391 gk20a_writel(g, fb_niso_intr_en_set_r(0),
392 mask);
393}
394
395void gv11b_fb_disable_hub_intr(struct gk20a *g)
396{
397 u32 mask = 0;
398
399 mask = fb_niso_intr_en_set_mmu_other_fault_notify_m() |
400 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() |
401 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m() |
402 fb_niso_intr_en_set_mmu_replayable_fault_notify_m() |
403 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m() |
404 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m();
405
406 gk20a_writel(g, fb_niso_intr_en_clr_r(0),
407 mask);
408}
409
410void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status)
411{
412 u32 ecc_addr, corrected_cnt, uncorrected_cnt;
413 u32 corrected_delta, uncorrected_delta;
414 u32 corrected_overflow, uncorrected_overflow;
415
416 ecc_addr = gk20a_readl(g, fb_mmu_l2tlb_ecc_address_r());
417 corrected_cnt = gk20a_readl(g,
418 fb_mmu_l2tlb_ecc_corrected_err_count_r());
419 uncorrected_cnt = gk20a_readl(g,
420 fb_mmu_l2tlb_ecc_uncorrected_err_count_r());
421
422 corrected_delta = fb_mmu_l2tlb_ecc_corrected_err_count_total_v(
423 corrected_cnt);
424 uncorrected_delta = fb_mmu_l2tlb_ecc_uncorrected_err_count_total_v(
425 uncorrected_cnt);
426 corrected_overflow = ecc_status &
427 fb_mmu_l2tlb_ecc_status_corrected_err_total_counter_overflow_m();
428
429 uncorrected_overflow = ecc_status &
430 fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m();
431
432 /* clear the interrupt */
433 if ((corrected_delta > 0) || corrected_overflow)
434 gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0);
435 if ((uncorrected_delta > 0) || uncorrected_overflow)
436 gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0);
437
438 gk20a_writel(g, fb_mmu_l2tlb_ecc_status_r(),
439 fb_mmu_l2tlb_ecc_status_reset_clear_f());
440
441 /* Handle overflow */
442 if (corrected_overflow)
443 corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s());
444 if (uncorrected_overflow)
445 uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s());
446
447
448 g->ecc.fb.mmu_l2tlb_corrected_err_count.counters[0] +=
449 corrected_delta;
450 g->ecc.fb.mmu_l2tlb_uncorrected_err_count.counters[0] +=
451 uncorrected_delta;
452
453 if (ecc_status & fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m())
454 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
455 if (ecc_status & fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m())
456 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
457 if (corrected_overflow || uncorrected_overflow)
458 nvgpu_info(g, "mmu l2tlb ecc counter overflow!");
459
460 nvgpu_log(g, gpu_dbg_intr,
461 "ecc error address: 0x%x", ecc_addr);
462 nvgpu_log(g, gpu_dbg_intr,
463 "ecc error count corrected: %d, uncorrected %d",
464 g->ecc.fb.mmu_l2tlb_corrected_err_count.counters[0],
465 g->ecc.fb.mmu_l2tlb_uncorrected_err_count.counters[0]);
466}
467
468void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status)
469{
470 u32 ecc_addr, corrected_cnt, uncorrected_cnt;
471 u32 corrected_delta, uncorrected_delta;
472 u32 corrected_overflow, uncorrected_overflow;
473
474 ecc_addr = gk20a_readl(g, fb_mmu_hubtlb_ecc_address_r());
475 corrected_cnt = gk20a_readl(g,
476 fb_mmu_hubtlb_ecc_corrected_err_count_r());
477 uncorrected_cnt = gk20a_readl(g,
478 fb_mmu_hubtlb_ecc_uncorrected_err_count_r());
479
480 corrected_delta = fb_mmu_hubtlb_ecc_corrected_err_count_total_v(
481 corrected_cnt);
482 uncorrected_delta = fb_mmu_hubtlb_ecc_uncorrected_err_count_total_v(
483 uncorrected_cnt);
484 corrected_overflow = ecc_status &
485 fb_mmu_hubtlb_ecc_status_corrected_err_total_counter_overflow_m();
486
487 uncorrected_overflow = ecc_status &
488 fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m();
489
490 /* clear the interrupt */
491 if ((corrected_delta > 0) || corrected_overflow)
492 gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0);
493 if ((uncorrected_delta > 0) || uncorrected_overflow)
494 gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0);
495
496 gk20a_writel(g, fb_mmu_hubtlb_ecc_status_r(),
497 fb_mmu_hubtlb_ecc_status_reset_clear_f());
498
499 /* Handle overflow */
500 if (corrected_overflow)
501 corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s());
502 if (uncorrected_overflow)
503 uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s());
504
505
506 g->ecc.fb.mmu_hubtlb_corrected_err_count.counters[0] +=
507 corrected_delta;
508 g->ecc.fb.mmu_hubtlb_uncorrected_err_count.counters[0] +=
509 uncorrected_delta;
510
511 if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m())
512 nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error");
513 if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m())
514 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error");
515 if (corrected_overflow || uncorrected_overflow)
516 nvgpu_info(g, "mmu hubtlb ecc counter overflow!");
517
518 nvgpu_log(g, gpu_dbg_intr,
519 "ecc error address: 0x%x", ecc_addr);
520 nvgpu_log(g, gpu_dbg_intr,
521 "ecc error count corrected: %d, uncorrected %d",
522 g->ecc.fb.mmu_hubtlb_corrected_err_count.counters[0],
523 g->ecc.fb.mmu_hubtlb_uncorrected_err_count.counters[0]);
524}
525
526void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status)
527{
528 u32 ecc_addr, corrected_cnt, uncorrected_cnt;
529 u32 corrected_delta, uncorrected_delta;
530 u32 corrected_overflow, uncorrected_overflow;
531
532 ecc_addr = gk20a_readl(g, fb_mmu_fillunit_ecc_address_r());
533 corrected_cnt = gk20a_readl(g,
534 fb_mmu_fillunit_ecc_corrected_err_count_r());
535 uncorrected_cnt = gk20a_readl(g,
536 fb_mmu_fillunit_ecc_uncorrected_err_count_r());
537
538 corrected_delta = fb_mmu_fillunit_ecc_corrected_err_count_total_v(
539 corrected_cnt);
540 uncorrected_delta = fb_mmu_fillunit_ecc_uncorrected_err_count_total_v(
541 uncorrected_cnt);
542 corrected_overflow = ecc_status &
543 fb_mmu_fillunit_ecc_status_corrected_err_total_counter_overflow_m();
544
545 uncorrected_overflow = ecc_status &
546 fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m();
547
548 /* clear the interrupt */
549 if ((corrected_delta > 0) || corrected_overflow)
550 gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0);
551 if ((uncorrected_delta > 0) || uncorrected_overflow)
552 gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0);
553
554 gk20a_writel(g, fb_mmu_fillunit_ecc_status_r(),
555 fb_mmu_fillunit_ecc_status_reset_clear_f());
556
557 /* Handle overflow */
558 if (corrected_overflow)
559 corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s());
560 if (uncorrected_overflow)
561 uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s());
562
563
564 g->ecc.fb.mmu_fillunit_corrected_err_count.counters[0] +=
565 corrected_delta;
566 g->ecc.fb.mmu_fillunit_uncorrected_err_count.counters[0] +=
567 uncorrected_delta;
568
569 if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m())
570 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error");
571 if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m())
572 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error");
573 if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m())
574 nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error");
575 if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m())
576 nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error");
577
578 if (corrected_overflow || uncorrected_overflow)
579 nvgpu_info(g, "mmu fillunit ecc counter overflow!");
580
581 nvgpu_log(g, gpu_dbg_intr,
582 "ecc error address: 0x%x", ecc_addr);
583 nvgpu_log(g, gpu_dbg_intr,
584 "ecc error count corrected: %d, uncorrected %d",
585 g->ecc.fb.mmu_fillunit_corrected_err_count.counters[0],
586 g->ecc.fb.mmu_fillunit_uncorrected_err_count.counters[0]);
587}
588
589static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault)
590{
591 if (WARN_ON(mmfault->fault_type >=
592 ARRAY_SIZE(fault_type_descs_gv11b)))
593 mmfault->fault_type_desc = invalid_str;
594 else
595 mmfault->fault_type_desc =
596 fault_type_descs_gv11b[mmfault->fault_type];
597
598 if (WARN_ON(mmfault->client_type >=
599 ARRAY_SIZE(fault_client_type_descs_gv11b)))
600 mmfault->client_type_desc = invalid_str;
601 else
602 mmfault->client_type_desc =
603 fault_client_type_descs_gv11b[mmfault->client_type];
604
605 mmfault->client_id_desc = invalid_str;
606 if (mmfault->client_type ==
607 gmmu_fault_client_type_hub_v()) {
608
609 if (!(WARN_ON(mmfault->client_id >=
610 ARRAY_SIZE(hub_client_descs_gv11b))))
611 mmfault->client_id_desc =
612 hub_client_descs_gv11b[mmfault->client_id];
613 } else if (mmfault->client_type ==
614 gmmu_fault_client_type_gpc_v()) {
615 if (!(WARN_ON(mmfault->client_id >=
616 ARRAY_SIZE(gpc_client_descs_gv11b))))
617 mmfault->client_id_desc =
618 gpc_client_descs_gv11b[mmfault->client_id];
619 }
620
621}
622
623static void gv11b_fb_print_fault_info(struct gk20a *g,
624 struct mmu_fault_info *mmfault)
625{
626 if (mmfault && mmfault->valid) {
627 nvgpu_err(g, "[MMU FAULT] "
628 "mmu engine id: %d, "
629 "ch id: %d, "
630 "fault addr: 0x%llx, "
631 "fault addr aperture: %d, "
632 "fault type: %s, "
633 "access type: %s, ",
634 mmfault->mmu_engine_id,
635 mmfault->chid,
636 mmfault->fault_addr,
637 mmfault->fault_addr_aperture,
638 mmfault->fault_type_desc,
639 fault_access_type_descs_gv11b[mmfault->access_type]);
640 nvgpu_err(g, "[MMU FAULT] "
641 "protected mode: %d, "
642 "client type: %s, "
643 "client id: %s, "
644 "gpc id if client type is gpc: %d, ",
645 mmfault->protected_mode,
646 mmfault->client_type_desc,
647 mmfault->client_id_desc,
648 mmfault->gpc_id);
649
650 nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
651 "faulted act eng id if any: 0x%x, "
652 "faulted veid if any: 0x%x, "
653 "faulted pbdma id if any: 0x%x, ",
654 mmfault->faulted_engine,
655 mmfault->faulted_subid,
656 mmfault->faulted_pbdma);
657 nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] "
658 "inst ptr: 0x%llx, "
659 "inst ptr aperture: %d, "
660 "replayable fault: %d, "
661 "replayable fault en: %d "
662 "timestamp hi:lo 0x%08x:0x%08x, ",
663 mmfault->inst_ptr,
664 mmfault->inst_aperture,
665 mmfault->replayable_fault,
666 mmfault->replay_fault_en,
667 mmfault->timestamp_hi, mmfault->timestamp_lo);
668 }
669}
670
671/*
672 *Fault buffer format
673 *
674 * 31 28 24 23 16 15 8 7 4 0
675 *.-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-.
676 *| inst_lo |0 0|apr|0 0 0 0 0 0 0 0|
677 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
678 *| inst_hi |
679 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
680 *| addr_31_12 | |AP |
681 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
682 *| addr_63_32 |
683 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
684 *| timestamp_lo |
685 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
686 *| timestamp_hi |
687 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
688 *| (reserved) | engine_id |
689 *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-'
690 *|V|R|P| gpc_id |0 0 0|t|0|acctp|0| client |RF0 0|faulttype|
691 */
692
693static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g,
694 struct nvgpu_mem *mem, u32 offset, struct mmu_fault_info *mmfault)
695{
696 u32 rd32_val;
697 u32 addr_lo, addr_hi;
698 u64 inst_ptr;
699 u32 chid = FIFO_INVAL_CHANNEL_ID;
700 struct channel_gk20a *refch;
701
702 memset(mmfault, 0, sizeof(*mmfault));
703
704 rd32_val = nvgpu_mem_rd32(g, mem, offset +
705 gmmu_fault_buf_entry_inst_lo_w());
706 addr_lo = gmmu_fault_buf_entry_inst_lo_v(rd32_val);
707 addr_lo = addr_lo << ram_in_base_shift_v();
708
709 addr_hi = nvgpu_mem_rd32(g, mem, offset +
710 gmmu_fault_buf_entry_inst_hi_w());
711 addr_hi = gmmu_fault_buf_entry_inst_hi_v(addr_hi);
712
713 inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo);
714
715 /* refch will be put back after fault is handled */
716 refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
717 if (refch)
718 chid = refch->chid;
719
720 /* it is ok to continue even if refch is NULL */
721 mmfault->refch = refch;
722 mmfault->chid = chid;
723 mmfault->inst_ptr = inst_ptr;
724 mmfault->inst_aperture = gmmu_fault_buf_entry_inst_aperture_v(rd32_val);
725
726 rd32_val = nvgpu_mem_rd32(g, mem, offset +
727 gmmu_fault_buf_entry_addr_lo_w());
728
729 mmfault->fault_addr_aperture =
730 gmmu_fault_buf_entry_addr_phys_aperture_v(rd32_val);
731 addr_lo = gmmu_fault_buf_entry_addr_lo_v(rd32_val);
732 addr_lo = addr_lo << ram_in_base_shift_v();
733
734 rd32_val = nvgpu_mem_rd32(g, mem, offset +
735 gmmu_fault_buf_entry_addr_hi_w());
736 addr_hi = gmmu_fault_buf_entry_addr_hi_v(rd32_val);
737 mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo);
738
739 rd32_val = nvgpu_mem_rd32(g, mem, offset +
740 gmmu_fault_buf_entry_timestamp_lo_w());
741 mmfault->timestamp_lo =
742 gmmu_fault_buf_entry_timestamp_lo_v(rd32_val);
743
744 rd32_val = nvgpu_mem_rd32(g, mem, offset +
745 gmmu_fault_buf_entry_timestamp_hi_w());
746 mmfault->timestamp_hi =
747 gmmu_fault_buf_entry_timestamp_hi_v(rd32_val);
748
749 rd32_val = nvgpu_mem_rd32(g, mem, offset +
750 gmmu_fault_buf_entry_engine_id_w());
751
752 mmfault->mmu_engine_id =
753 gmmu_fault_buf_entry_engine_id_v(rd32_val);
754 gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id,
755 &mmfault->faulted_engine, &mmfault->faulted_subid,
756 &mmfault->faulted_pbdma);
757
758 rd32_val = nvgpu_mem_rd32(g, mem, offset +
759 gmmu_fault_buf_entry_fault_type_w());
760 mmfault->client_id =
761 gmmu_fault_buf_entry_client_v(rd32_val);
762 mmfault->replayable_fault =
763 gmmu_fault_buf_entry_replayable_fault_v(rd32_val);
764
765 mmfault->fault_type =
766 gmmu_fault_buf_entry_fault_type_v(rd32_val);
767 mmfault->access_type =
768 gmmu_fault_buf_entry_access_type_v(rd32_val);
769
770 mmfault->client_type =
771 gmmu_fault_buf_entry_mmu_client_type_v(rd32_val);
772
773 mmfault->gpc_id =
774 gmmu_fault_buf_entry_gpc_id_v(rd32_val);
775 mmfault->protected_mode =
776 gmmu_fault_buf_entry_protected_mode_v(rd32_val);
777
778 mmfault->replay_fault_en =
779 gmmu_fault_buf_entry_replayable_fault_en_v(rd32_val);
780
781 mmfault->valid = gmmu_fault_buf_entry_valid_v(rd32_val);
782
783 rd32_val = nvgpu_mem_rd32(g, mem, offset +
784 gmmu_fault_buf_entry_fault_type_w());
785 rd32_val &= ~(gmmu_fault_buf_entry_valid_m());
786 nvgpu_mem_wr32(g, mem, offset + gmmu_fault_buf_entry_valid_w(),
787 rd32_val);
788
789 gv11b_fb_parse_mmfault(mmfault);
790}
791
792static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g,
793 struct mmu_fault_info *mmfault, u32 *invalidate_replay_val)
794{
795 unsigned int id_type;
796 u32 num_lce, act_eng_bitmask = 0;
797 int err = 0;
798 u32 id = ((u32)~0);
799
800 if (!mmfault->valid)
801 return;
802
803 gv11b_fb_print_fault_info(g, mmfault);
804
805 num_lce = gv11b_ce_get_num_lce(g);
806 if ((mmfault->mmu_engine_id >=
807 gmmu_fault_mmu_eng_id_ce0_v()) &&
808 (mmfault->mmu_engine_id <
809 gmmu_fault_mmu_eng_id_ce0_v() + num_lce)) {
810 /* CE page faults are not reported as replayable */
811 nvgpu_log(g, gpu_dbg_intr, "CE Faulted");
812 err = gv11b_fb_fix_page_fault(g, mmfault);
813 gv11b_fifo_reset_pbdma_and_eng_faulted(g, mmfault->refch,
814 mmfault->faulted_pbdma, mmfault->faulted_engine);
815 if (!err) {
816 nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Fixed");
817 *invalidate_replay_val = 0;
818 /* refch in mmfault is assigned at the time of copying
819 * fault info from snap reg or bar2 fault buf
820 */
821 gk20a_channel_put(mmfault->refch);
822 return;
823 }
824 /* Do recovery. Channel recovery needs refch */
825 nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Not Fixed");
826 }
827
828 if (!mmfault->replayable_fault) {
829 if (mmfault->fault_type ==
830 gmmu_fault_type_unbound_inst_block_v()) {
831 /*
832 * Bug 1847172: When an engine faults due to an unbound
833 * instance block, the fault cannot be isolated to a
834 * single context so we need to reset the entire runlist
835 */
836 id_type = ID_TYPE_UNKNOWN;
837
838 } else if (mmfault->refch) {
839 if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) {
840 id = mmfault->refch->tsgid;
841 id_type = ID_TYPE_TSG;
842 } else {
843 id = mmfault->chid;
844 id_type = ID_TYPE_CHANNEL;
845 }
846 if (mmfault->refch->mmu_nack_handled) {
847 /* We have already recovered for the same
848 * context, skip doing another recovery.
849 */
850 mmfault->refch->mmu_nack_handled = false;
851 /*
852 * Recovery path can be entered twice for the
853 * same error in case of mmu nack. If mmu
854 * nack interrupt is handled before mmu fault
855 * then channel reference is increased to avoid
856 * closing the channel by userspace. Decrement
857 * channel reference.
858 */
859 gk20a_channel_put(mmfault->refch);
860 /* refch in mmfault is assigned at the time
861 * of copying fault info from snap reg or bar2
862 * fault buf.
863 */
864 gk20a_channel_put(mmfault->refch);
865 return;
866 }
867 } else {
868 id_type = ID_TYPE_UNKNOWN;
869 }
870 if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID)
871 act_eng_bitmask = BIT(mmfault->faulted_engine);
872
873 /* Indicate recovery is handled if mmu fault is a result of
874 * mmu nack.
875 */
876 mmfault->refch->mmu_nack_handled = true;
877 g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask,
878 id, id_type, RC_TYPE_MMU_FAULT, mmfault);
879 } else {
880 if (mmfault->fault_type == gmmu_fault_type_pte_v()) {
881 nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix");
882 err = gv11b_fb_fix_page_fault(g, mmfault);
883 if (err)
884 *invalidate_replay_val |=
885 fb_mmu_invalidate_replay_cancel_global_f();
886 else
887 *invalidate_replay_val |=
888 fb_mmu_invalidate_replay_start_ack_all_f();
889 } else {
890 /* cancel faults other than invalid pte */
891 *invalidate_replay_val |=
892 fb_mmu_invalidate_replay_cancel_global_f();
893 }
894 /* refch in mmfault is assigned at the time of copying
895 * fault info from snap reg or bar2 fault buf
896 */
897 gk20a_channel_put(mmfault->refch);
898 }
899}
900
901static int gv11b_fb_replay_or_cancel_faults(struct gk20a *g,
902 u32 invalidate_replay_val)
903{
904 int err = 0;
905
906 nvgpu_log_fn(g, " ");
907
908 if (invalidate_replay_val &
909 fb_mmu_invalidate_replay_cancel_global_f()) {
910 /*
911 * cancel faults so that next time it faults as
912 * replayable faults and channel recovery can be done
913 */
914 err = g->ops.fb.mmu_invalidate_replay(g,
915 fb_mmu_invalidate_replay_cancel_global_f());
916 } else if (invalidate_replay_val &
917 fb_mmu_invalidate_replay_start_ack_all_f()) {
918 /* pte valid is fixed. replay faulting request */
919 err = g->ops.fb.mmu_invalidate_replay(g,
920 fb_mmu_invalidate_replay_start_ack_all_f());
921 }
922
923 return err;
924}
925
926void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
927 u32 fault_status, u32 index)
928{
929 u32 get_indx, offset, rd32_val, entries;
930 struct nvgpu_mem *mem;
931 struct mmu_fault_info *mmfault;
932 u32 invalidate_replay_val = 0;
933 u64 prev_fault_addr = 0ULL;
934 u64 next_fault_addr = 0ULL;
935
936 if (gv11b_fb_is_fault_buffer_empty(g, index, &get_indx)) {
937 nvgpu_log(g, gpu_dbg_intr,
938 "SPURIOUS mmu fault: reg index:%d", index);
939 return;
940 }
941 nvgpu_log(g, gpu_dbg_intr, "%s MMU FAULT" ,
942 index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX ?
943 "REPLAY" : "NON-REPLAY");
944
945 nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx);
946
947 mem = &g->mm.hw_fault_buf[index];
948 mmfault = &g->mm.fault_info[index];
949
950 entries = gv11b_fb_fault_buffer_size_val(g, index);
951 nvgpu_log(g, gpu_dbg_intr, "buffer num entries = %d", entries);
952
953 offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32);
954 nvgpu_log(g, gpu_dbg_intr, "starting word offset = 0x%x", offset);
955
956 rd32_val = nvgpu_mem_rd32(g, mem,
957 offset + gmmu_fault_buf_entry_valid_w());
958 nvgpu_log(g, gpu_dbg_intr, "entry valid offset val = 0x%x", rd32_val);
959
960 while ((rd32_val & gmmu_fault_buf_entry_valid_m())) {
961
962 nvgpu_log(g, gpu_dbg_intr, "entry valid = 0x%x", rd32_val);
963
964 gv11b_fb_copy_from_hw_fault_buf(g, mem, offset, mmfault);
965
966 get_indx = (get_indx + 1) % entries;
967 nvgpu_log(g, gpu_dbg_intr, "new get index = %d", get_indx);
968
969 gv11b_fb_fault_buffer_get_ptr_update(g, index, get_indx);
970
971 offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32);
972 nvgpu_log(g, gpu_dbg_intr, "next word offset = 0x%x", offset);
973
974 rd32_val = nvgpu_mem_rd32(g, mem,
975 offset + gmmu_fault_buf_entry_valid_w());
976
977 if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
978 mmfault->fault_addr != 0ULL) {
979 /* fault_addr "0" is not supposed to be fixed ever.
980 * For the first time when prev = 0, next = 0 and
981 * fault addr is also 0 then handle_mmu_fault_common will
982 * not be called. Fix by checking fault_addr not equal to 0
983 */
984 prev_fault_addr = next_fault_addr;
985 next_fault_addr = mmfault->fault_addr;
986 if (prev_fault_addr == next_fault_addr) {
987 nvgpu_log(g, gpu_dbg_intr, "pte already scanned");
988 if (mmfault->refch)
989 gk20a_channel_put(mmfault->refch);
990 continue;
991 }
992 }
993
994 gv11b_fb_handle_mmu_fault_common(g, mmfault,
995 &invalidate_replay_val);
996
997 }
998 if (index == NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX &&
999 invalidate_replay_val != 0U)
1000 gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val);
1001}
1002
1003static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g,
1004 u32 fault_status, struct mmu_fault_info *mmfault)
1005{
1006 u32 reg_val;
1007 u32 addr_lo, addr_hi;
1008 u64 inst_ptr;
1009 int chid = FIFO_INVAL_CHANNEL_ID;
1010 struct channel_gk20a *refch;
1011
1012 memset(mmfault, 0, sizeof(*mmfault));
1013
1014 if (!(fault_status & fb_mmu_fault_status_valid_set_f())) {
1015
1016 nvgpu_log(g, gpu_dbg_intr, "mmu fault status valid not set");
1017 return;
1018 }
1019
1020 g->ops.fb.read_mmu_fault_inst_lo_hi(g, &reg_val, &addr_hi);
1021
1022 addr_lo = fb_mmu_fault_inst_lo_addr_v(reg_val);
1023 addr_lo = addr_lo << ram_in_base_shift_v();
1024
1025 addr_hi = fb_mmu_fault_inst_hi_addr_v(addr_hi);
1026 inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo);
1027
1028 /* refch will be put back after fault is handled */
1029 refch = gk20a_refch_from_inst_ptr(g, inst_ptr);
1030 if (refch)
1031 chid = refch->chid;
1032
1033 /* It is still ok to continue if refch is NULL */
1034 mmfault->refch = refch;
1035 mmfault->chid = chid;
1036 mmfault->inst_ptr = inst_ptr;
1037 mmfault->inst_aperture = fb_mmu_fault_inst_lo_aperture_v(reg_val);
1038 mmfault->mmu_engine_id = fb_mmu_fault_inst_lo_engine_id_v(reg_val);
1039
1040 gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id,
1041 &mmfault->faulted_engine, &mmfault->faulted_subid,
1042 &mmfault->faulted_pbdma);
1043
1044 g->ops.fb.read_mmu_fault_addr_lo_hi(g, &reg_val, &addr_hi);
1045
1046 addr_lo = fb_mmu_fault_addr_lo_addr_v(reg_val);
1047 addr_lo = addr_lo << ram_in_base_shift_v();
1048
1049 mmfault->fault_addr_aperture =
1050 fb_mmu_fault_addr_lo_phys_aperture_v(reg_val);
1051
1052 addr_hi = fb_mmu_fault_addr_hi_addr_v(addr_hi);
1053 mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo);
1054
1055 reg_val = g->ops.fb.read_mmu_fault_info(g);
1056 mmfault->fault_type = fb_mmu_fault_info_fault_type_v(reg_val);
1057 mmfault->replayable_fault =
1058 fb_mmu_fault_info_replayable_fault_v(reg_val);
1059 mmfault->client_id = fb_mmu_fault_info_client_v(reg_val);
1060 mmfault->access_type = fb_mmu_fault_info_access_type_v(reg_val);
1061 mmfault->client_type = fb_mmu_fault_info_client_type_v(reg_val);
1062 mmfault->gpc_id = fb_mmu_fault_info_gpc_id_v(reg_val);
1063 mmfault->protected_mode =
1064 fb_mmu_fault_info_protected_mode_v(reg_val);
1065 mmfault->replay_fault_en =
1066 fb_mmu_fault_info_replayable_fault_en_v(reg_val);
1067
1068 mmfault->valid = fb_mmu_fault_info_valid_v(reg_val);
1069
1070 fault_status &= ~(fb_mmu_fault_status_valid_m());
1071 g->ops.fb.write_mmu_fault_status(g, fault_status);
1072
1073 gv11b_fb_parse_mmfault(mmfault);
1074
1075}
1076
1077void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
1078 u32 fault_status)
1079{
1080 u32 reg_val;
1081 u32 index = NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX;
1082
1083 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
1084
1085 if (fault_status &
1086 fb_mmu_fault_status_replayable_getptr_corrupted_m()) {
1087
1088 nvgpu_err(g, "replayable getptr corrupted set");
1089
1090 gv11b_fb_fault_buf_configure_hw(g, index);
1091
1092 reg_val = set_field(reg_val,
1093 fb_mmu_fault_buffer_get_getptr_corrupted_m(),
1094 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f());
1095 }
1096
1097 if (fault_status &
1098 fb_mmu_fault_status_replayable_overflow_m()) {
1099 bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index);
1100
1101 nvgpu_err(g, "replayable overflow: buffer full:%s",
1102 buffer_full?"true":"false");
1103
1104 reg_val = set_field(reg_val,
1105 fb_mmu_fault_buffer_get_overflow_m(),
1106 fb_mmu_fault_buffer_get_overflow_clear_f());
1107 }
1108
1109 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val);
1110}
1111
1112void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
1113 u32 fault_status)
1114{
1115 u32 reg_val;
1116 u32 index = NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX;
1117
1118 reg_val = g->ops.fb.read_mmu_fault_buffer_get(g, index);
1119
1120 if (fault_status &
1121 fb_mmu_fault_status_non_replayable_getptr_corrupted_m()) {
1122
1123 nvgpu_err(g, "non replayable getptr corrupted set");
1124
1125 gv11b_fb_fault_buf_configure_hw(g, index);
1126
1127 reg_val = set_field(reg_val,
1128 fb_mmu_fault_buffer_get_getptr_corrupted_m(),
1129 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f());
1130 }
1131
1132 if (fault_status &
1133 fb_mmu_fault_status_non_replayable_overflow_m()) {
1134
1135 bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index);
1136
1137 nvgpu_err(g, "non replayable overflow: buffer full:%s",
1138 buffer_full?"true":"false");
1139
1140 reg_val = set_field(reg_val,
1141 fb_mmu_fault_buffer_get_overflow_m(),
1142 fb_mmu_fault_buffer_get_overflow_clear_f());
1143 }
1144
1145 g->ops.fb.write_mmu_fault_buffer_get(g, index, reg_val);
1146}
1147
1148static void gv11b_fb_handle_bar2_fault(struct gk20a *g,
1149 struct mmu_fault_info *mmfault, u32 fault_status)
1150{
1151 if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) {
1152 if (gv11b_fb_is_fault_buf_enabled(g,
1153 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX))
1154 gv11b_fb_fault_buf_configure_hw(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
1155 }
1156
1157 if (fault_status & fb_mmu_fault_status_replayable_error_m()) {
1158 if (gv11b_fb_is_fault_buf_enabled(g,
1159 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX))
1160 gv11b_fb_fault_buf_configure_hw(g,
1161 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
1162 }
1163 gv11b_ce_mthd_buffer_fault_in_bar2_fault(g);
1164
1165 g->ops.bus.bar2_bind(g, &g->mm.bar2.inst_block);
1166
1167 if (mmfault->refch) {
1168 gk20a_channel_put(mmfault->refch);
1169 mmfault->refch = NULL;
1170 }
1171}
1172
1173void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
1174 u32 fault_status)
1175{
1176 struct mmu_fault_info *mmfault;
1177 u32 invalidate_replay_val = 0;
1178
1179 mmfault = &g->mm.fault_info[NVGPU_MM_MMU_FAULT_TYPE_OTHER_AND_NONREPLAY];
1180
1181 gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault);
1182
1183 /* BAR2/Physical faults will not be snapped in hw fault buf */
1184 if (mmfault->mmu_engine_id == gmmu_fault_mmu_eng_id_bar2_v()) {
1185 nvgpu_err(g, "BAR2 MMU FAULT");
1186 gv11b_fb_handle_bar2_fault(g, mmfault, fault_status);
1187
1188 } else if (mmfault->mmu_engine_id ==
1189 gmmu_fault_mmu_eng_id_physical_v()) {
1190 /* usually means VPR or out of bounds physical accesses */
1191 nvgpu_err(g, "PHYSICAL MMU FAULT");
1192
1193 } else {
1194 gv11b_fb_handle_mmu_fault_common(g, mmfault,
1195 &invalidate_replay_val);
1196
1197 if (invalidate_replay_val)
1198 gv11b_fb_replay_or_cancel_faults(g,
1199 invalidate_replay_val);
1200 }
1201}
1202
1203void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status)
1204{
1205 u32 dropped_faults = 0;
1206
1207 dropped_faults = fb_mmu_fault_status_dropped_bar1_phys_set_f() |
1208 fb_mmu_fault_status_dropped_bar1_virt_set_f() |
1209 fb_mmu_fault_status_dropped_bar2_phys_set_f() |
1210 fb_mmu_fault_status_dropped_bar2_virt_set_f() |
1211 fb_mmu_fault_status_dropped_ifb_phys_set_f() |
1212 fb_mmu_fault_status_dropped_ifb_virt_set_f() |
1213 fb_mmu_fault_status_dropped_other_phys_set_f()|
1214 fb_mmu_fault_status_dropped_other_virt_set_f();
1215
1216 if (fault_status & dropped_faults) {
1217 nvgpu_err(g, "dropped mmu fault (0x%08x)",
1218 fault_status & dropped_faults);
1219 g->ops.fb.write_mmu_fault_status(g, dropped_faults);
1220 }
1221}
1222
1223void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g)
1224{
1225 u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r());
1226
1227 if (!(fault_status & fb_mmu_fault_status_replayable_m()))
1228 return;
1229
1230 if (gv11b_fb_is_fault_buf_enabled(g,
1231 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
1232 gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
1233 fault_status,
1234 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
1235 }
1236}
1237
1238static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr)
1239{
1240 u32 fault_status = g->ops.fb.read_mmu_fault_status(g);
1241
1242 nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status);
1243
1244 if (niso_intr &
1245 fb_niso_intr_mmu_other_fault_notify_m()) {
1246
1247 gv11b_fb_handle_dropped_mmu_fault(g, fault_status);
1248
1249 gv11b_fb_handle_other_fault_notify(g, fault_status);
1250 }
1251
1252 if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX)) {
1253
1254 if (niso_intr &
1255 fb_niso_intr_mmu_nonreplayable_fault_notify_m()) {
1256
1257 gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
1258 fault_status,
1259 NVGPU_FB_MMU_FAULT_NONREPLAY_REG_INDEX);
1260
1261 /*
1262 * When all the faults are processed,
1263 * GET and PUT will have same value and mmu fault status
1264 * bit will be reset by HW
1265 */
1266 }
1267 if (niso_intr &
1268 fb_niso_intr_mmu_nonreplayable_fault_overflow_m()) {
1269
1270 gv11b_fb_handle_nonreplay_fault_overflow(g,
1271 fault_status);
1272 }
1273
1274 }
1275
1276 if (gv11b_fb_is_fault_buf_enabled(g, NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX)) {
1277
1278 if (niso_intr &
1279 fb_niso_intr_mmu_replayable_fault_notify_m()) {
1280
1281 gv11b_fb_handle_mmu_nonreplay_replay_fault(g,
1282 fault_status,
1283 NVGPU_FB_MMU_FAULT_REPLAY_REG_INDEX);
1284 }
1285 if (niso_intr &
1286 fb_niso_intr_mmu_replayable_fault_overflow_m()) {
1287
1288 gv11b_fb_handle_replay_fault_overflow(g,
1289 fault_status);
1290 }
1291
1292 }
1293
1294 nvgpu_log(g, gpu_dbg_intr, "clear mmu fault status");
1295 g->ops.fb.write_mmu_fault_status(g,
1296 fb_mmu_fault_status_valid_clear_f());
1297}
1298
1299void gv11b_fb_hub_isr(struct gk20a *g)
1300{
1301 u32 status, niso_intr;
1302
1303 nvgpu_mutex_acquire(&g->mm.hub_isr_mutex);
1304
1305 niso_intr = gk20a_readl(g, fb_niso_intr_r());
1306
1307 nvgpu_log(g, gpu_dbg_intr, "enter hub isr, niso_intr = 0x%08x",
1308 niso_intr);
1309
1310 if (niso_intr &
1311 (fb_niso_intr_hub_access_counter_notify_m() |
1312 fb_niso_intr_hub_access_counter_error_m())) {
1313
1314 nvgpu_info(g, "hub access counter notify/error");
1315 }
1316 if (niso_intr &
1317 fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) {
1318
1319 nvgpu_info(g, "ecc uncorrected error notify");
1320
1321 status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r());
1322 if (status)
1323 gv11b_handle_l2tlb_ecc_isr(g, status);
1324
1325 status = gk20a_readl(g, fb_mmu_hubtlb_ecc_status_r());
1326 if (status)
1327 gv11b_handle_hubtlb_ecc_isr(g, status);
1328
1329 status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r());
1330 if (status)
1331 gv11b_handle_fillunit_ecc_isr(g, status);
1332 }
1333 if (niso_intr &
1334 (fb_niso_intr_mmu_other_fault_notify_m() |
1335 fb_niso_intr_mmu_replayable_fault_notify_m() |
1336 fb_niso_intr_mmu_replayable_fault_overflow_m() |
1337 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
1338 fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) {
1339
1340 nvgpu_log(g, gpu_dbg_intr, "MMU Fault");
1341 gv11b_fb_handle_mmu_fault(g, niso_intr);
1342 }
1343
1344 nvgpu_mutex_release(&g->mm.hub_isr_mutex);
1345}
1346
1347bool gv11b_fb_mmu_fault_pending(struct gk20a *g)
1348{
1349 if (gk20a_readl(g, fb_niso_intr_r()) &
1350 (fb_niso_intr_mmu_other_fault_notify_m() |
1351 fb_niso_intr_mmu_ecc_uncorrected_error_notify_m() |
1352 fb_niso_intr_mmu_replayable_fault_notify_m() |
1353 fb_niso_intr_mmu_replayable_fault_overflow_m() |
1354 fb_niso_intr_mmu_nonreplayable_fault_notify_m() |
1355 fb_niso_intr_mmu_nonreplayable_fault_overflow_m()))
1356 return true;
1357
1358 return false;
1359}
1360
1361int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
1362 u32 invalidate_replay_val)
1363{
1364 int err = -ETIMEDOUT;
1365 u32 reg_val;
1366 struct nvgpu_timeout timeout;
1367
1368 nvgpu_log_fn(g, " ");
1369
1370 nvgpu_mutex_acquire(&g->mm.tlb_lock);
1371
1372 reg_val = gk20a_readl(g, fb_mmu_invalidate_r());
1373
1374 reg_val |= fb_mmu_invalidate_all_va_true_f() |
1375 fb_mmu_invalidate_all_pdb_true_f() |
1376 invalidate_replay_val |
1377 fb_mmu_invalidate_trigger_true_f();
1378
1379 gk20a_writel(g, fb_mmu_invalidate_r(), reg_val);
1380
1381 /* retry 200 times */
1382 nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
1383 do {
1384 reg_val = gk20a_readl(g, fb_mmu_ctrl_r());
1385 if (fb_mmu_ctrl_pri_fifo_empty_v(reg_val) !=
1386 fb_mmu_ctrl_pri_fifo_empty_false_f()) {
1387 err = 0;
1388 break;
1389 }
1390 nvgpu_udelay(5);
1391 } while (!nvgpu_timeout_expired_msg(&timeout,
1392 "invalidate replay failed on 0x%llx"));
1393 if (err)
1394 nvgpu_err(g, "invalidate replay timedout");
1395
1396 nvgpu_mutex_release(&g->mm.tlb_lock);
1397
1398 return err;
1399}
1400
1401static int gv11b_fb_fix_page_fault(struct gk20a *g,
1402 struct mmu_fault_info *mmfault)
1403{
1404 int err = 0;
1405 u32 pte[2];
1406
1407 if (mmfault->refch == NULL) {
1408 nvgpu_log(g, gpu_dbg_intr, "refch from mmu_fault_info is NULL");
1409 return -EINVAL;
1410 }
1411
1412 err = __nvgpu_get_pte(g,
1413 mmfault->refch->vm, mmfault->fault_addr, &pte[0]);
1414 if (err) {
1415 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte not found");
1416 return err;
1417 }
1418 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1419 "pte: %#08x %#08x", pte[1], pte[0]);
1420
1421 if (pte[0] == 0x0 && pte[1] == 0x0) {
1422 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1423 "pte all zeros, do not set valid");
1424 return -1;
1425 }
1426 if (pte[0] & gmmu_new_pte_valid_true_f()) {
1427 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1428 "pte valid already set");
1429 return -1;
1430 }
1431
1432 pte[0] |= gmmu_new_pte_valid_true_f();
1433 if (pte[0] & gmmu_new_pte_read_only_true_f())
1434 pte[0] &= ~(gmmu_new_pte_read_only_true_f());
1435 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1436 "new pte: %#08x %#08x", pte[1], pte[0]);
1437
1438 err = __nvgpu_set_pte(g,
1439 mmfault->refch->vm, mmfault->fault_addr, &pte[0]);
1440 if (err) {
1441 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte not fixed");
1442 return err;
1443 }
1444 /* invalidate tlb so that GMMU does not use old cached translation */
1445 g->ops.fb.tlb_invalidate(g, mmfault->refch->vm->pdb.mem);
1446
1447 err = __nvgpu_get_pte(g,
1448 mmfault->refch->vm, mmfault->fault_addr, &pte[0]);
1449 nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte,
1450 "pte after tlb invalidate: %#08x %#08x",
1451 pte[1], pte[0]);
1452 return err;
1453}
1454
1455void fb_gv11b_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
1456 u32 addr_lo, u32 addr_hi)
1457{
1458 nvgpu_writel(g, fb_mmu_fault_buffer_lo_r(index), addr_lo);
1459 nvgpu_writel(g, fb_mmu_fault_buffer_hi_r(index), addr_hi);
1460}
1461
1462u32 fb_gv11b_read_mmu_fault_buffer_get(struct gk20a *g, u32 index)
1463{
1464 return nvgpu_readl(g, fb_mmu_fault_buffer_get_r(index));
1465}
1466
1467void fb_gv11b_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
1468 u32 reg_val)
1469{
1470 nvgpu_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val);
1471}
1472
1473u32 fb_gv11b_read_mmu_fault_buffer_put(struct gk20a *g, u32 index)
1474{
1475 return nvgpu_readl(g, fb_mmu_fault_buffer_put_r(index));
1476}
1477
1478u32 fb_gv11b_read_mmu_fault_buffer_size(struct gk20a *g, u32 index)
1479{
1480 return nvgpu_readl(g, fb_mmu_fault_buffer_size_r(index));
1481}
1482
1483void fb_gv11b_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
1484 u32 reg_val)
1485{
1486 nvgpu_writel(g, fb_mmu_fault_buffer_size_r(index), reg_val);
1487}
1488
1489void fb_gv11b_read_mmu_fault_addr_lo_hi(struct gk20a *g,
1490 u32 *addr_lo, u32 *addr_hi)
1491{
1492 *addr_lo = nvgpu_readl(g, fb_mmu_fault_addr_lo_r());
1493 *addr_hi = nvgpu_readl(g, fb_mmu_fault_addr_hi_r());
1494}
1495
1496void fb_gv11b_read_mmu_fault_inst_lo_hi(struct gk20a *g,
1497 u32 *inst_lo, u32 *inst_hi)
1498{
1499 *inst_lo = nvgpu_readl(g, fb_mmu_fault_inst_lo_r());
1500 *inst_hi = nvgpu_readl(g, fb_mmu_fault_inst_hi_r());
1501}
1502
1503u32 fb_gv11b_read_mmu_fault_info(struct gk20a *g)
1504{
1505 return nvgpu_readl(g, fb_mmu_fault_info_r());
1506}
1507
1508u32 fb_gv11b_read_mmu_fault_status(struct gk20a *g)
1509{
1510 return nvgpu_readl(g, fb_mmu_fault_status_r());
1511}
1512
1513void fb_gv11b_write_mmu_fault_status(struct gk20a *g, u32 reg_val)
1514{
1515 nvgpu_writel(g, fb_mmu_fault_status_r(), reg_val);
1516}
diff --git a/drivers/gpu/nvgpu/common/fb/fb_gv11b.h b/drivers/gpu/nvgpu/common/fb/fb_gv11b.h
new file mode 100644
index 00000000..71fb3a41
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/fb/fb_gv11b.h
@@ -0,0 +1,82 @@
1/*
2 * GV11B FB
3 *
4 * Copyright (c) 2016-2018, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef _NVGPU_GV11B_FB
26#define _NVGPU_GV11B_FB
27
28#define NONREPLAY_REG_INDEX 0
29#define REPLAY_REG_INDEX 1
30
31struct gk20a;
32struct gr_gk20a;
33
34void gv11b_fb_init_hw(struct gk20a *g);
35
36void gv11b_fb_init_fs_state(struct gk20a *g);
37void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr);
38void gv11b_fb_reset(struct gk20a *g);
39void gv11b_fb_hub_isr(struct gk20a *g);
40
41bool gv11b_fb_is_fault_buf_enabled(struct gk20a *g, u32 index );
42void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g,
43 u32 index, u32 state);
44void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, u32 index);
45void gv11b_fb_enable_hub_intr(struct gk20a *g);
46void gv11b_fb_disable_hub_intr(struct gk20a *g);
47bool gv11b_fb_mmu_fault_pending(struct gk20a *g);
48void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status);
49void gv11b_fb_handle_other_fault_notify(struct gk20a *g,
50 u32 fault_status);
51void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g,
52 u32 fault_status, u32 index);
53void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g,
54 u32 fault_status);
55void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g,
56 u32 fault_status);
57void gv11b_fb_handle_replayable_mmu_fault(struct gk20a *g);
58void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status);
59void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status);
60void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status);
61
62void fb_gv11b_write_mmu_fault_buffer_lo_hi(struct gk20a *g, u32 index,
63 u32 addr_lo, u32 addr_hi);
64u32 fb_gv11b_read_mmu_fault_buffer_get(struct gk20a *g, u32 index);
65void fb_gv11b_write_mmu_fault_buffer_get(struct gk20a *g, u32 index,
66 u32 reg_val);
67u32 fb_gv11b_read_mmu_fault_buffer_put(struct gk20a *g, u32 index);
68u32 fb_gv11b_read_mmu_fault_buffer_size(struct gk20a *g, u32 index);
69void fb_gv11b_write_mmu_fault_buffer_size(struct gk20a *g, u32 index,
70 u32 reg_val);
71void fb_gv11b_read_mmu_fault_addr_lo_hi(struct gk20a *g,
72 u32 *addr_lo, u32 *addr_hi);
73void fb_gv11b_read_mmu_fault_inst_lo_hi(struct gk20a *g,
74 u32 *inst_lo, u32 *inst_hi);
75u32 fb_gv11b_read_mmu_fault_info(struct gk20a *g);
76u32 fb_gv11b_read_mmu_fault_status(struct gk20a *g);
77void fb_gv11b_write_mmu_fault_status(struct gk20a *g, u32 reg_val);
78
79int gv11b_fb_mmu_invalidate_replay(struct gk20a *g,
80 u32 invalidate_replay_val);
81
82#endif